diff --git a/go.mod b/go.mod index a63ba7294bf..e2b595ea16e 100644 --- a/go.mod +++ b/go.mod @@ -8,21 +8,21 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/container-orchestrated-devices/container-device-interface v0.4.0 github.com/containerd/cgroups v1.0.4 - github.com/containerd/containerd v1.5.8 + github.com/containerd/containerd v1.6.4 github.com/containerd/cri-containerd v1.19.0 github.com/containerd/fifo v1.0.0 github.com/containerd/ttrpc v1.1.0 github.com/containerd/typeurl v1.0.2 github.com/containernetworking/cni v1.1.1 github.com/containernetworking/plugins v1.1.1 - github.com/containers/buildah v1.23.1 - github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f + github.com/containers/buildah v1.26.1 + github.com/containers/common v0.48.0 github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c - github.com/containers/image/v5 v5.17.0 + github.com/containers/image/v5 v5.21.1 github.com/containers/ocicrypt v1.1.5 github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d - github.com/containers/storage v1.37.0 + github.com/containers/storage v1.40.2 github.com/coreos/go-systemd/v22 v22.3.2 github.com/cpuguy83/go-md2man v1.0.10 github.com/creack/pty v1.1.18 @@ -86,7 +86,7 @@ require ( capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/hcsshim v0.9.2 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -104,22 +104,23 @@ require ( github.com/cilium/ebpf v0.7.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.10.1 // indirect - github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect + github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/containers/psgo v1.7.1 // indirect github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect - github.com/docker/docker v20.10.12+incompatible // indirect + github.com/docker/docker v20.10.14+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 // indirect github.com/emirpasic/gods v1.12.0 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/fsouza/go-dockerclient v1.7.4 // indirect + github.com/fsouza/go-dockerclient v1.7.11 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.3.1 // indirect @@ -154,11 +155,11 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jinzhu/copier v0.3.2 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kevinburke/ssh_config v1.1.0 // indirect - github.com/klauspost/compress v1.14.2 // indirect + github.com/klauspost/compress v1.15.2 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -175,22 +176,21 @@ require ( github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.0 // indirect + github.com/moby/sys/mountinfo v0.6.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/mtrmac/gpgme v0.1.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 // indirect - github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 // indirect + github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/proglottis/gpgme v0.1.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect @@ -207,10 +207,11 @@ require ( github.com/spiegel-im-spiegel/errs v1.0.2 // indirect github.com/spiegel-im-spiegel/go-cvss v0.4.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/sylabs/sif/v2 v2.7.0 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.1.5 // indirect + github.com/vbauerster/mpb/v7 v7.4.1 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/xanzy/go-gitlab v0.68.0 // indirect github.com/xanzy/ssh-agent v0.3.0 // indirect diff --git a/go.sum b/go.sum index af323ecd782..67d949ace83 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b h1:Dyvj8g9LBROQ6vC1GyfOPrH7GHvZAjMCIGwclTTsFaE= @@ -64,6 +65,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -124,6 +126,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -146,8 +149,9 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 h1:cSHEbLj0GZeHM1mWG84qEnGFojNEQ83W7cwaPRjcwXU= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -312,6 +316,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -339,16 +344,21 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig= +github.com/containerd/containerd v1.6.4 h1:SEDZBp10mhCp+hkO3Njz/YhGrI7ah3edNcUlRdUPOgg= +github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/cri-containerd v1.19.0 h1:PcTvvl+SHaekCMQZFQkYjn1RKlYrK6khYbuhOeF68k0= github.com/containerd/cri-containerd v1.19.0/go.mod h1:wxbGdReWGCalzGOEpifoHeYCK4xAgnj4o/4bVB+9voU= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -360,6 +370,10 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -370,6 +384,8 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= @@ -377,8 +393,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b5837 github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng= -github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -400,6 +416,7 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= @@ -407,26 +424,31 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/buildah v1.23.1 h1:Tpc9DsRuU+0Oofewpxb6OJVNQjCu7yloN/obUqzfDTY= github.com/containers/buildah v1.23.1/go.mod h1:4WnrN0yrA7ab0ppgunixu2WM1rlD2rG8QLJAKbEkZlQ= +github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E= +github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY= github.com/containers/common v0.44.2/go.mod h1:7sdP4vmI5Bm6FPFxb3lvAh1Iktb6tiO1MzjUzhxdoGo= -github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f h1:vVmx51AzWvB4/ao2zyR6s053a1leLTOh+zsOPVWQRgA= github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f/go.mod h1:aml/OO4FmYfPbfT87rvWiCgkLzTdqO6PuZ/xXq6bPbk= +github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw= +github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c h1:x6gYpFEXrLN+4+NgGg8QkmkGrtujHKgdUgoGY9oUtjU= github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c/go.mod h1:YogXfQJ+PfB6Dsc/tZDoTazDBdWiAUYnyElblkEtALo= github.com/containers/image/v5 v5.10.4/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= -github.com/containers/image/v5 v5.17.0 h1:KS5pro80CCsSp5qDBTMmSAWQo+xcBX19zUPExmYX2OQ= -github.com/containers/image/v5 v5.17.0/go.mod h1:GnYVusVRFPMMTAAUkrcS8NNSpBp8oyrjOUe04AAmRr4= -github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= +github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d h1:cztBXICnZeM0Cz/7IzbLbMs8jYCnJmil94NJszFYNXA= @@ -437,8 +459,10 @@ github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3E github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20= github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8= -github.com/containers/storage v1.37.0 h1:HVhDsur6sx889ZIZ1d1kEiOzv3gsr5q0diX2VZmOdSg= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= +github.com/containers/storage v1.40.2 h1:GUlHaGnrs1JOEwv6YEvkQdgYXOXZdU1Angy4wgWNgF8= +github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -510,17 +534,20 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -585,8 +612,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsouza/go-dockerclient v1.7.4 h1:daYb0km2a91aNt2KTc4AEcTwgExYtQXHhkt5mjdRD1o= github.com/fsouza/go-dockerclient v1.7.4/go.mod h1:het+LPt7NaTEVGgwXJAKxPn77RZrQKb2EXJb4e+BHv0= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.11 h1:pRmGMANAl+tmr+IYNYq8IWWcSbiKQMSRumYLv8H5sfk= +github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= @@ -978,8 +1007,9 @@ github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7H github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w= github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1042,8 +1072,9 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1200,9 +1231,12 @@ github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+S github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc= +github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= @@ -1225,7 +1259,6 @@ github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1: github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1241,6 +1274,7 @@ github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4N github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= @@ -1301,7 +1335,10 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20200206005212-79b036d80240/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1312,6 +1349,7 @@ github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT5 github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= @@ -1335,17 +1373,18 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= -github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4= github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 h1:bfLqBGqF04GAoqbMkSrd5VPk/t66OnP0bTTisMaF4ro= +github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orangecms/go-framebuffer v0.0.0-20200613202404-a0700d90c330/go.mod h1:3Myb/UszJY32F2G7yGkUtcW/ejHpjlGfYLim7cv2uKA= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -1384,6 +1423,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -1391,6 +1432,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -1478,6 +1520,8 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= @@ -1576,9 +1620,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk= +github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1605,6 +1652,7 @@ github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2Ogf github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/twitchtv/twirp v5.8.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/u-root/gobusybox/src v0.0.0-20220120221406-4e2fbb8f41bf/go.mod h1:n6S68wL67GT3KLZWmLzrJCr1rMLQ6xzA9rv2k46kF2Q= github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a/go.mod h1:RWIgJWqm9/0gjBZ0Hl8iR6MVGzZ+yAda2uqqLmetE2I= @@ -1647,8 +1695,8 @@ github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8J github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c= github.com/vbauerster/mpb/v7 v7.1.4/go.mod h1:4zulrZfvshMOnd2APiHgWS9Yrw08AzZVRr9G11tkpcQ= -github.com/vbauerster/mpb/v7 v7.1.5 h1:vtUEUfQHmNeJETyF4AcRCOV6RC4wqFwNORy52UMXPbQ= -github.com/vbauerster/mpb/v7 v7.1.5/go.mod h1:4M8+qAoQqV60WDNktBM5k05i1iTrXE7rjKOHEVkVlec= +github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA= +github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -1747,6 +1795,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdm go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= @@ -1807,6 +1856,7 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1929,10 +1979,12 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2099,12 +2151,10 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210925032602-92d5a993a665/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2114,7 +2164,9 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2139,6 +2191,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2356,7 +2409,6 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= @@ -2453,6 +2505,7 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= @@ -2500,6 +2553,7 @@ k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0V k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2539,6 +2593,7 @@ sigs.k8s.io/release-utils v0.2.0/go.mod h1:9O5livl2h3Q56jUkoZ7UnV22XVRB6MuD4l/51 sigs.k8s.io/release-utils v0.7.0 h1:W8+0glcVHbBUpeQ9V31zKyzSh5V/06XRlWBHlyMcpkw= sigs.k8s.io/release-utils v0.7.0/go.mod h1:SK+/kkc2i7ZO0CFXDCvXpzIZyt13cPlscaApaZD7VmU= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0d2eb45b8e1..0952d8c7392 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -74,6 +74,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) } + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + // And for DefaultMode. selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} if config.AEAD().Mode() != packet.AEADModeEAX { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index c3fedf7a250..44c6ebf0ddd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -37,15 +37,17 @@ type Identity struct { Name string // by convention, has the form "Full Name (comment) " UserId *packet.UserId SelfSignature *packet.Signature - Signatures []*packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // A Subkey is an additional public key in an Entity. Subkeys can be used for // encryption. type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature } // A Key identifies a specific public key in an Entity. This is either the @@ -55,6 +57,7 @@ type Key struct { PublicKey *packet.PublicKey PrivateKey *packet.PrivateKey SelfSignature *packet.Signature + Revocations []*packet.Signature } // A KeyRing provides access to public and private keys. @@ -71,28 +74,58 @@ type KeyRing interface { DecryptionKeys() []Key } -// PrimaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. func (e *Entity) PrimaryIdentity() *Identity { - var firstIdentity *Identity + var primaryIdentity *Identity for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident } } - return firstIdentity + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if (existingId == nil) { + return true + } + + if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + return true + } + + if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + return false + } + + if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) } // EncryptionKey returns the best candidate Key for encrypting a message to the // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the primary key has expired. + // Fail to find any encryption key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -104,6 +137,8 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { subkey.Sig.FlagEncryptCommunications && subkey.PublicKey.PubKeyAlgo.CanEncrypt() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { candidateSubkey = i maxTime = subkey.Sig.CreationTime @@ -112,17 +147,16 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we don't have any candidate subkeys for encryption and // the primary key doesn't have any usage metadata then we // assume that the primary key is ok. Or, if the primary key is // marked as ok to encrypt with, then we can obviously use it. - // Also, check expiry again just to be safe. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } return Key{}, false @@ -137,10 +171,13 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) { // SigningKeyById return the Key for signing a message with this // Entity and keyID. func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - // Fail to find any signing key if the primary key has expired. + // Fail to find any signing key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -152,8 +189,10 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { subkey.Sig.FlagSign && subkey.PublicKey.PubKeyAlgo.CanSign() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PrivateKey.KeyId == id) { + (id == 0 || subkey.PublicKey.KeyId == id) { candidateSubkey = idx maxTime = subkey.Sig.CreationTime } @@ -161,22 +200,63 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we have no candidate subkey then we assume that it's ok to sign // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. Also, check expiry again just to be safe. + // sign with, then we can use it. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && - (id == 0 || e.PrivateKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in return Key{}, false } +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + // An EntityList contains one or more Entities. type EntityList []*Entity @@ -184,21 +264,14 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } for _, subKey := range e.Subkeys { if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -210,15 +283,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { + if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 { var usage byte if key.SelfSignature.FlagCertify { usage |= packet.KeyFlagCertify @@ -247,7 +312,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) { for _, e := range el { for _, subKey := range e.Subkeys { if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -442,11 +507,22 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { break } - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { return errors.StructuralError("user ID self-signature invalid: " + err.Error()) } - if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { identity.SelfSignature = sig } identity.Signatures = append(identity.Signatures, sig) @@ -488,9 +564,9 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p switch sig.SigType { case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig + subKey.Revocations = append(subKey.Revocations, sig) case packet.SigTypeSubkeyBinding: - if shouldReplaceSubkeySig(subKey.Sig, sig) { + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { subKey.Sig = sig } } @@ -505,22 +581,6 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p return nil } -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - // SerializePrivate serializes an Entity, including private key material, but // excluding signatures from other entities, to the given Writer. // Identities and subkeys are re-signed in case they changed since NewEntry. @@ -549,20 +609,37 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo if err != nil { return } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { return } if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) if err != nil { return } } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return + for _, revocation := range ident.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + if ident.SelfSignature != nil { + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } } } for _, subkey := range e.Subkeys { @@ -583,6 +660,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo } } } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return @@ -598,6 +681,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -615,6 +704,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return err @@ -659,14 +754,13 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co // specified reason code and text (RFC4880 section-5.2.3.23). // If config is nil, sensible defaults will be used. func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } @@ -686,22 +780,21 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea return errors.InvalidArgumentError("given subkey is not associated with this key") } - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } - if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { return err } - sk.Sig = revSig + sk.Revocations = append(sk.Revocations, revSig) return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go index 21d17c2f8be..a2219e64d1c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -138,6 +138,69 @@ heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB =IKnw -----END PGP PUBLIC KEY BLOCK-----` +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR @@ -304,33 +367,34 @@ X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE -FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL -CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl -1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo -aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy -OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 -/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA -g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla -d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP -2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l -nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX -nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX -SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 -rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA -0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ -wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa -LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV -8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz -j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy -AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 -1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ -QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp -f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn -3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK -2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA -ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi -f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj -5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=522n +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT -----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index fe0da9d3685..65203813778 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -395,6 +395,7 @@ const ( SigTypeDirectSignature = 0x1F SigTypeKeyRevocation = 0x20 SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 28971c1ada2..297dc40ba8c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -735,13 +735,13 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by the passed in signingKey. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } - return signingKey.VerifySignature(h, sig) + return pk.VerifySignature(h, sig) } // userIdSignatureHash returns a Hash of the message that needs to be signed diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6b1704e4292..86602d02904 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -28,6 +28,10 @@ const ( KeyFlagSign KeyFlagEncryptCommunications KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey ) // Signature represents a signature. See RFC 4880, section 5.2. @@ -68,14 +72,19 @@ type Signature struct { IssuerFingerprint []byte IsPrimaryId *bool + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 + RevocationReason *ReasonForRevocation RevocationReasonText string // In a self-signature, these flags are set there is a features subpacket @@ -218,6 +227,7 @@ const ( prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 @@ -368,6 +378,15 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if subpacket[0]&KeyFlagEncryptStorage != 0 { sig.FlagEncryptStorage = true } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 if !isHashed { @@ -377,8 +396,8 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("empty revocation reason subpacket") return } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.24 specifies a very general @@ -416,6 +435,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + if !isHashed { + return + } + sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: v, l := subpacket[0], len(subpacket[1:]) if v == 5 && l != 32 || v != 5 && l != 20 { @@ -507,6 +532,9 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { if subpacket.hashed == hashed { n := serializeSubpacketLength(to, len(subpacket.contents)+1) to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } to = to[1+n:] n = copy(to, subpacket.contents) to = to[n:] @@ -714,6 +742,14 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return sig.Sign(h, priv, config) } +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + // Serialize marshals sig to w. Sign, SignUserId or SignKey must have been // called first. func (sig *Signature) Serialize(w io.Writer) (err error) { @@ -826,7 +862,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) } if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) @@ -850,6 +886,15 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagEncryptStorage { flags |= KeyFlagEncryptStorage } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) } @@ -892,6 +937,10 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) } + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + if len(sig.PreferredAEAD) > 0 { subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) } @@ -899,7 +948,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. if sig.RevocationReason != nil { subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index d6bea7d4acc..614fbafd5e1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -156,5 +156,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index a649ebbab39..acb7cc6e29f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -370,11 +370,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // If signature KeyID matches if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - scr.md.Signature = sig - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { - scr.md.SignatureError = errors.ErrSignatureExpired + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + now := scr.config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + signatureError = errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + signatureError = errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + signatureError = errors.ErrKeyExpired + } } + scr.md.Signature = sig + scr.md.SignatureError = signatureError } else { scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) } @@ -483,10 +497,16 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, err = key.PublicKey.VerifySignature(h, sig) if err == nil { now := config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + return key.Entity, errors.ErrKeyRevoked + } if sig.SigExpired(now) { return key.Entity, errors.ErrSignatureExpired } - if key.PublicKey.KeyExpired(key.SelfSignature, now) { + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { return key.Entity, errors.ErrKeyExpired } return key.Entity, nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index bb74b13872a..23a24f20305 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -208,6 +208,15 @@ func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signe return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) } +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + // writeAndSign writes the data as a payload package and, optionally, signs // it. hints contains optional information, that is also encrypted, // that aids the recipients in processing the message. The resulting @@ -358,7 +367,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En var ok bool encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } sig := to[i].PrimaryIdentity().SelfSignature @@ -416,7 +425,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } var payload io.WriteCloser - if aeadSupported { + if config.AEAD() != nil && aeadSupported { payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) if err != nil { return diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go index b7f86da8695..d3d9839d639 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package events defines the event pushing and subscription service. +// Package events defines the ttrpc event service. package events diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/api/types/task/doc.go similarity index 91% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/api/types/task/doc.go index c073fa4ad7c..e10c7a46993 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/api/types/task/doc.go @@ -13,3 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ + +// Package task defines the task service. +package task diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 8b600673fe3..5606cc88a94 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,15 +21,14 @@ package cio import ( "context" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "syscall" "github.com/containerd/fifo" - "github.com/pkg/errors" ) // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root @@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { return nil, err } } - dir, err := ioutil.TempDir(root, "") + dir, err := os.MkdirTemp(root, "") if err != nil { return nil, err } @@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { if fifos.Stdin != "" { if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdin fifo") + return f, fmt.Errorf("failed to open stdin fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdin != nil { @@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if fifos.Stdout != "" { if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdout fifo") + return f, fmt.Errorf("failed to open stdout fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdout != nil { @@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if !fifos.Terminal && fifos.Stderr != "" { if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stderr fifo") + return f, fmt.Errorf("failed to open stderr fifo: %w", retErr) } } return f, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index ded475788f6..f3d736a6d42 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -23,7 +23,6 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const pipeRoot = `\\.\pipe` @@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdin != "" { l, err := winio.ListenPipe(fifos.Stdin, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err) } cios.closers = append(cios.closers, l) @@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdout != "" { l, err := winio.ListenPipe(fifos.Stdout, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err) } cios.closers = append(cios.closers, l) @@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stderr != "" { l, err := winio.ListenPipe(fifos.Stderr, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err) } cios.closers = append(cios.closers, l) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go new file mode 100644 index 00000000000..1391884cde7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" + // DefaultRuntime would be a multiple of choices, thus empty + DefaultRuntime = "" + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 6b69cd06b9b..8e2619a381a 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index a80700075f9..9f4bed8b077 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 05a35228ca4..87622559708 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -17,7 +17,7 @@ // Package errdefs defines the common errors used throughout containerd // packages. // -// Use with errors.Wrap and error.Wrapf to add context to an error. +// Use with fmt.Errorf to add context to an error. // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. @@ -28,8 +28,7 @@ package errdefs import ( "context" - - "github.com/pkg/errors" + "errors" ) // Definitions of common error types used throughout containerd. All containerd diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 209f63bd0fc..7a9b33e05af 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -18,9 +18,9 @@ package errdefs import ( "context" + "fmt" "strings" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -68,9 +68,9 @@ func ToGRPC(err error) error { // ToGRPCf maps the error to grpc error codes, assembling the formatting string // and combining it with the target error string. // -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) } // FromGRPC returns the underlying error from a grpc service based on the grpc error code @@ -104,9 +104,9 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrap(cls, msg) + err = fmt.Errorf("%s: %w", msg, cls) } else { - err = errors.WithStack(cls) + err = cls } return err diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go new file mode 100644 index 00000000000..a1f385d7abd --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -0,0 +1,251 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package exchange + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/typeurl" + goevents "github.com/docker/go-events" + "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +// Exchange broadcasts events +type Exchange struct { + broadcaster *goevents.Broadcaster +} + +// NewExchange returns a new event Exchange +func NewExchange() *Exchange { + return &Exchange{ + broadcaster: goevents.NewBroadcaster(), + } +} + +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + +// Forward accepts an envelope to be directly distributed on the exchange. +// +// This is useful when an event is forwarded on behalf of another namespace or +// when the event is propagated on behalf of another publisher. +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { + if err := validateEnvelope(envelope); err != nil { + return err + } + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error forwarding event") + } else { + logger.Debug("event forwarded") + } + }() + + return e.broadcaster.Write(envelope) +} + +// Publish packages and sends an event. The caller will be considered the +// initial publisher of the event. This means the timestamp will be calculated +// at this point and this method may read from the calling context. +func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { + var ( + namespace string + encoded *types.Any + envelope events.Envelope + ) + + namespace, err = namespaces.NamespaceRequired(ctx) + if err != nil { + return fmt.Errorf("failed publishing event: %w", err) + } + if err := validateTopic(topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", topic, err) + } + + encoded, err = typeurl.MarshalAny(event) + if err != nil { + return err + } + + envelope.Timestamp = time.Now().UTC() + envelope.Namespace = namespace + envelope.Topic = topic + envelope.Event = encoded + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error publishing event") + } else { + logger.Debug("event published") + } + }() + + return e.broadcaster.Write(&envelope) +} + +// Subscribe to events on the exchange. Events are sent through the returned +// channel ch. If an error is encountered, it will be sent on channel errs and +// errs will be closed. To end the subscription, cancel the provided context. +// +// Zero or more filters may be provided as strings. Only events that match +// *any* of the provided filters will be sent on the channel. The filters use +// the standard containerd filters package syntax. +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evch = make(chan *events.Envelope) + errq = make(chan error, 1) + channel = goevents.NewChannel(0) + queue = goevents.NewQueue(channel) + dst goevents.Sink = queue + ) + + closeAll := func() { + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) + } + + ch = evch + errs = errq + + if len(fs) > 0 { + filter, err := filters.ParseAll(fs...) + if err != nil { + errq <- fmt.Errorf("failed parsing subscription filters: %w", err) + closeAll() + return + } + + dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool { + return filter.Match(adapt(gev)) + })) + } + + e.broadcaster.Add(dst) + + go func() { + defer closeAll() + + var err error + loop: + for { + select { + case ev := <-channel.C: + env, ok := ev.(*events.Envelope) + if !ok { + // TODO(stevvooe): For the most part, we are well protected + // from this condition. Both Forward and Publish protect + // from this. + err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev) + break + } + + select { + case evch <- env: + case <-ctx.Done(): + break loop + } + case <-ctx.Done(): + break loop + } + } + + if err == nil { + if cerr := ctx.Err(); cerr != context.Canceled { + err = cerr + } + } + + errq <- err + }() + + return +} + +func validateTopic(topic string) error { + if topic == "" { + return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument) + } + + if topic[0] != '/' { + return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument) + } + + if len(topic) == 1 { + return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument) + } + + components := strings.Split(topic[1:], "/") + for _, component := range components { + if err := identifiers.Validate(component); err != nil { + return fmt.Errorf("failed validation on component %q: %w", component, err) + } + } + + return nil +} + +func validateEnvelope(envelope *events.Envelope) error { + if err := identifiers.Validate(envelope.Namespace); err != nil { + return fmt.Errorf("event envelope has invalid namespace: %w", err) + } + + if err := validateTopic(envelope.Topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err) + } + + if envelope.Timestamp.IsZero() { + return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument) + } + + return nil +} + +func adapt(ev interface{}) filters.Adaptor { + if adaptor, ok := ev.(filters.Adaptor); ok { + return adaptor + } + + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 00000000000..5a9c559c1e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 00000000000..cf09d8d9e4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 00000000000..49182d7b7bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,291 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 00000000000..b76aab9b4a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 00000000000..6a485467b8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index f52317b491b..cbd3a52ba9e 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -25,10 +25,10 @@ package identifiers import ( + "fmt" "regexp" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -51,15 +51,15 @@ var ( // In general identifiers that pass this validation should be safe for use as filesystem path components. func Validate(s string) error { if len(s) == 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument) } if len(s) > maxLength { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument) } if !identifierRe.MatchString(s) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 37b6a7d1c8a..0db9562b82b 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -52,7 +52,8 @@ const ( // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) } // GetLogger retrieves the current logger from the context. If no logger is @@ -61,7 +62,7 @@ func GetLogger(ctx context.Context) *logrus.Entry { logger := ctx.Value(loggerKey{}) if logger == nil { - return L + return L.WithContext(ctx) } return logger.(*logrus.Entry) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index b53c9012c1b..e5e23fe4301 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -18,11 +18,11 @@ package namespaces import ( "context" + "fmt" "os" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" - "github.com/pkg/errors" ) const ( @@ -69,10 +69,10 @@ func Namespace(ctx context.Context) (string, bool) { func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + return "", fmt.Errorf("namespace is required: %w", errdefs.ErrFailedPrecondition) } if err := identifiers.Validate(namespace); err != nil { - return "", errors.Wrap(err, "namespace validation") + return "", fmt.Errorf("namespace validation: %w", err) } return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go index b382215025d..176a0e6673a 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go @@ -16,11 +16,11 @@ package constants -// TODO(random-liu): Merge annotations package into this package. - const ( // K8sContainerdNamespace is the namespace we use to connect containerd. K8sContainerdNamespace = "k8s.io" - // CRIVersion is the CRI version supported by the CRI plugin. - CRIVersion = "v1alpha2" + // CRIVersion is the latest CRI version supported by the CRI plugin. + CRIVersion = "v1" + // CRIVersionAlpha is the alpha version of CRI supported by the CRI plugin. + CRIVersionAlpha = "v1alpha2" ) diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go index 59d41411f23..2cdc97e1de5 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/containerd/containerd/cio" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // AttachOptions specifies how to attach to a container. diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go index 2780b958ab5..f0f90840160 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go index dcc9fe6eb94..b6c9c12028f 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,13 @@ package io import ( + "fmt" "io" "net" "os" "sync" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -62,7 +60,7 @@ func openPipe(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -70,7 +68,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go index 27721e71724..674a89fdff8 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go @@ -21,11 +21,10 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "time" "github.com/sirupsen/logrus" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -43,7 +42,7 @@ const ( // NewDiscardLogger creates logger which discards all the input. func NewDiscardLogger() io.WriteCloser { - return cioutil.NewNopWriteCloser(ioutil.Discard) + return cioutil.NewNopWriteCloser(io.Discard) } // NewCRILogger returns a write closer which redirect container log into diff --git a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go index d0e0bf37efa..cf027eaf8c0 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go @@ -18,8 +18,8 @@ package util import ( "encoding/json" - - "github.com/pkg/errors" + "errors" + "fmt" ) // DeepCopy makes a deep copy from src into dst. @@ -32,11 +32,11 @@ func DeepCopy(dst interface{}, src interface{}) error { } bytes, err := json.Marshal(src) if err != nil { - return errors.Wrap(err, "unable to marshal src") + return fmt.Errorf("unable to marshal src: %w", err) } err = json.Unmarshal(bytes, dst) if err != nil { - return errors.Wrap(err, "unable to unmarshal into dst") + return fmt.Errorf("unable to unmarshal into dst: %w", err) } return nil } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index aa604baab92..74c303b944f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -18,10 +18,9 @@ package dialer import ( "context" + "fmt" "net" "time" - - "github.com/pkg/errors" ) type dialResult struct { @@ -74,6 +73,6 @@ func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { dr.c.Close() } }() - return nil, errors.Errorf("dial %s: timeout", address) + return nil, fmt.Errorf("dial %s: timeout", address) } } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index e7d19583395..b4304ffbf1f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go new file mode 100644 index 00000000000..bc1af75abb7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shutdown + +import ( + "context" + "errors" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ErrShutdown is the error condition when a context has been fully shutdown +var ErrShutdown = errors.New("shutdown") + +// Service is used to facilitate shutdown by through callback +// registration and shutdown initiation +type Service interface { + // Shutdown initiates shutdown + Shutdown() + // RegisterCallback registers functions to be called on shutdown and before + // the shutdown channel is closed. A callback error will propagate to the + // context error + RegisterCallback(func(context.Context) error) +} + +// WithShutdown returns a context which is similar to a cancel context, but +// with callbacks which can propagate to the context error. Unlike a cancel +// context, the shutdown context cannot be canceled from the parent context. +// However, future child contexes will be canceled upon shutdown. +func WithShutdown(ctx context.Context) (context.Context, Service) { + ss := &shutdownService{ + Context: ctx, + doneC: make(chan struct{}), + timeout: 30 * time.Second, + } + return ss, ss +} + +type shutdownService struct { + context.Context + + mu sync.Mutex + isShutdown bool + callbacks []func(context.Context) error + doneC chan struct{} + err error + timeout time.Duration +} + +func (s *shutdownService) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + if s.isShutdown { + return + } + s.isShutdown = true + + go func(callbacks []func(context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + grp, ctx := errgroup.WithContext(ctx) + for i := range callbacks { + fn := callbacks[i] + grp.Go(func() error { return fn(ctx) }) + } + err := grp.Wait() + if err == nil { + err = ErrShutdown + } + s.mu.Lock() + s.err = err + close(s.doneC) + s.mu.Unlock() + }(s.callbacks) +} + +func (s *shutdownService) Done() <-chan struct{} { + return s.doneC +} + +func (s *shutdownService) Err() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.err +} +func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.callbacks == nil { + s.callbacks = []func(context.Context) error{} + } + s.callbacks = append(s.callbacks, fn) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index 8b4d925d28e..f05ab7aa9f9 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,13 +17,14 @@ package ttrpcutil import ( + "errors" + "fmt" "sync" "time" v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) const ttrpcDialTimeout = 5 * time.Second @@ -43,7 +44,7 @@ func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { conn, err := dialer.Dialer(address, ttrpcDialTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to connect") + return nil, fmt.Errorf("failed to connect: %w", err) } client := ttrpc.NewClient(conn, opts...) diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index aab756fd2a6..4f8d7dd2d59 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index c7657e1869b..3913ef66373 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform { switch platform.Architecture { case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } vector = append(vector, specs.Platform{ Architecture: "386", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, - Variant: platform.Variant, }) case "arm": if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 4a7177e3138..046e0356d19 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -18,6 +18,7 @@ package platforms import ( "bufio" + "fmt" "os" "runtime" "strings" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) // Present the ARM instruction set architecture, eg: v7, v8 @@ -48,7 +48,7 @@ func cpuVariant() string { // by ourselves. We can just parse these information from /proc/cpuinfo func getCPUInfo(pattern string) (info string, err error) { if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) } cpuinfo, err := os.Open("/proc/cpuinfo") @@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) { return "", err } - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) } func getCPUVariant() string { diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 6ede94061eb..dbe9957ca9d 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false @@ -86,9 +86,11 @@ func normalizeArch(arch, variant string) (string, string) { case "i386": arch = "386" variant = "" - case "x86_64", "x86-64": + case "x86_64", "x86-64", "amd64": arch = "amd64" - variant = "" + if variant == "v1" { + variant = "" + } case "aarch64", "arm64": arch = "arm64" switch variant { diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go index cb77fbc9f7b..cfa3ff34a19 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -16,27 +16,11 @@ package platforms -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - // DefaultString returns the default string specifier for the platform. func DefaultString() string { return Format(DefaultSpec()) } -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - // DefaultStrict returns strict form of Default. func DefaultStrict() MatchComparer { return OnlyStrict(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000000..e249fe48d38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index e8a7d5ffa0d..49690f1b3e7 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. @@ -18,6 +19,22 @@ package platforms +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + // Default returns the default matcher for the platform. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index 0c380e3b7c1..c1aaf72ca8e 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -29,6 +27,18 @@ import ( "golang.org/x/sys/windows" ) +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + type matchComparer struct { defaults Matcher osVersionPrefix string diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 088bdea0508..8f955d036df 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -107,6 +107,8 @@ package platforms import ( + "fmt" + "path" "regexp" "runtime" "strconv" @@ -114,7 +116,6 @@ import ( "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -166,14 +167,14 @@ func (m *matcher) String() string { func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) } } @@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. @@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -246,20 +247,7 @@ func Format(platform specs.Platform) string { return "unknown" } - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") + return path.Join(platform.OS, platform.Architecture, platform.Variant) } // Normalize validates and translate the platform to the canonical value. @@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - return platform } diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go new file mode 100644 index 00000000000..dcb533c8a74 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -0,0 +1,171 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitContext is used for plugin initialization +type InitContext struct { + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + + // deprecated: will be removed in 2.0, use plugin.EventType + Events *exchange.Exchange + + Meta *Meta // plugins can fill in metadata at init. + + plugins *Set +} + +// NewContext returns a new plugin InitContext +func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext { + return &InitContext{ + Context: ctx, + Root: filepath.Join(root, r.URI()), + State: filepath.Join(state, r.URI()), + Meta: &Meta{ + Exports: map[string]string{}, + }, + plugins: plugins, + } +} + +// Get returns the first plugin by its type +func (i *InitContext) Get(t Type) (interface{}, error) { + return i.plugins.Get(t) +} + +// Meta contains information gathered from the registration and initialization +// process. +type Meta struct { + Platforms []ocispec.Platform // platforms supported by plugin + Exports map[string]string // values exported by plugin + Capabilities []string // feature switches for plugin +} + +// Plugin represents an initialized plugin, used with an init context. +type Plugin struct { + Registration *Registration // registration, as initialized + Config interface{} // config, as initialized + Meta *Meta + + instance interface{} + err error // will be set if there was an error initializing the plugin +} + +// Err returns the errors during initialization. +// returns nil if not error was encountered +func (p *Plugin) Err() error { + return p.err +} + +// Instance returns the instance and any initialization error of the plugin +func (p *Plugin) Instance() (interface{}, error) { + return p.instance, p.err +} + +// Set defines a plugin collection, used with InitContext. +// +// This maintains ordering and unique indexing over the set. +// +// After iteratively instantiating plugins, this set should represent, the +// ordered, initialization set of plugins for a containerd instance. +type Set struct { + ordered []*Plugin // order of initialization + byTypeAndID map[Type]map[string]*Plugin +} + +// NewPluginSet returns an initialized plugin set +func NewPluginSet() *Set { + return &Set{ + byTypeAndID: make(map[Type]map[string]*Plugin), + } +} + +// Add a plugin to the set +func (ps *Set) Add(p *Plugin) error { + if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok { + ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{ + p.Registration.ID: p, + } + } else if _, idok := byID[p.Registration.ID]; !idok { + byID[p.Registration.ID] = p + } else { + return fmt.Errorf("plugin %v already initialized: %w", p.Registration.URI(), errdefs.ErrAlreadyExists) + } + + ps.ordered = append(ps.ordered, p) + return nil +} + +// Get returns the first plugin by its type +func (ps *Set) Get(t Type) (interface{}, error) { + for _, v := range ps.byTypeAndID[t] { + return v.Instance() + } + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) +} + +// GetAll returns all initialized plugins +func (ps *Set) GetAll() []*Plugin { + return ps.ordered +} + +// Plugins returns plugin set +func (i *InitContext) Plugins() *Set { + return i.plugins +} + +// GetAll plugins in the set +func (i *InitContext) GetAll() []*Plugin { + return i.plugins.GetAll() +} + +// GetByID returns the plugin of the given type and ID +func (i *InitContext) GetByID(t Type, id string) (interface{}, error) { + ps, err := i.GetByType(t) + if err != nil { + return nil, err + } + p, ok := ps[id] + if !ok { + return nil, fmt.Errorf("no %s plugins with id %s: %w", t, id, errdefs.ErrNotFound) + } + return p.Instance() +} + +// GetByType returns all plugins with the specific type. +func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { + p, ok := i.plugins.byTypeAndID[t] + if !ok { + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) + } + + return p, nil +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go new file mode 100644 index 00000000000..eb38c127157 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" + "sync" +) + +var ( + // ErrNoType is returned when no type is specified + ErrNoType = errors.New("plugin: no type") + // ErrNoPluginID is returned when no id is specified + ErrNoPluginID = errors.New("plugin: no id") + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") + // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, + // this allows the plugin loader differentiate between a plugin which is configured + // not to load and one that fails to load. + ErrSkipPlugin = errors.New("skip plugin") + + // ErrInvalidRequires will be thrown if the requirements for a plugin are + // defined in an invalid manner. + ErrInvalidRequires = errors.New("invalid requires") +) + +// IsSkipPlugin returns true if the error is skipping the plugin +func IsSkipPlugin(err error) bool { + return errors.Is(err, ErrSkipPlugin) +} + +// Type is the type of the plugin +type Type string + +func (t Type) String() string { return string(t) } + +const ( + // InternalPlugin implements an internal plugin to containerd + InternalPlugin Type = "io.containerd.internal.v1" + // RuntimePlugin implements a runtime + RuntimePlugin Type = "io.containerd.runtime.v1" + // RuntimePluginV2 implements a runtime v2 + RuntimePluginV2 Type = "io.containerd.runtime.v2" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" + // GRPCPlugin implements a grpc service + GRPCPlugin Type = "io.containerd.grpc.v1" + // TTRPCPlugin implements a ttrpc shim service + TTRPCPlugin Type = "io.containerd.ttrpc.v1" + // SnapshotPlugin implements a snapshotter + SnapshotPlugin Type = "io.containerd.snapshotter.v1" + // TaskMonitorPlugin implements a task monitor + TaskMonitorPlugin Type = "io.containerd.monitor.v1" + // DiffPlugin implements a differ + DiffPlugin Type = "io.containerd.differ.v1" + // MetadataPlugin implements a metadata store + MetadataPlugin Type = "io.containerd.metadata.v1" + // ContentPlugin implements a content store + ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" + // EventPlugin implements event handling + EventPlugin Type = "io.containerd.event.v1" + // TracingProcessorPlugin implements a open telemetry span processor + TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" +) + +const ( + // RuntimeLinuxV1 is the legacy linux runtime + RuntimeLinuxV1 = "io.containerd.runtime.v1.linux" + // RuntimeRuncV1 is the runc runtime that supports a single container + RuntimeRuncV1 = "io.containerd.runc.v1" + // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim + RuntimeRuncV2 = "io.containerd.runc.v2" +) + +// Registration contains information for registering a plugin +type Registration struct { + // Type of the plugin + Type Type + // ID of the plugin + ID string + // Config specific to the plugin + Config interface{} + // Requires is a list of plugins that the registered plugin requires to be available + Requires []Type + + // InitFn is called when initializing a plugin. The registration and + // context are passed in. The init function may modify the registration to + // add exports, capabilities and platform support declarations. + InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool +} + +// Init the registered plugin +func (r *Registration) Init(ic *InitContext) *Plugin { + p, err := r.InitFn(ic) + return &Plugin{ + Registration: r, + Config: ic.Config, + Meta: ic.Meta, + instance: p, + err: err, + } +} + +// URI returns the full plugin URI +func (r *Registration) URI() string { + return fmt.Sprintf("%s.%s", r.Type, r.ID) +} + +var register = struct { + sync.RWMutex + r []*Registration +}{} + +// Load loads all plugins at the provided path into containerd +func Load(path string) (err error) { + defer func() { + if v := recover(); v != nil { + rerr, ok := v.(error) + if !ok { + rerr = fmt.Errorf("%s", v) + } + err = rerr + } + }() + return loadPlugins(path) +} + +// Register allows plugins to register +func Register(r *Registration) { + register.Lock() + defer register.Unlock() + + if r.Type == "" { + panic(ErrNoType) + } + if r.ID == "" { + panic(ErrNoPluginID) + } + if err := checkUnique(r); err != nil { + panic(err) + } + + for _, requires := range r.Requires { + if requires == "*" && len(r.Requires) != 1 { + panic(ErrInvalidRequires) + } + } + + register.r = append(register.r, r) +} + +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + +// Graph returns an ordered list of registered plugins for initialization. +// Plugins in disableList specified by id will be disabled. +func Graph(filter DisableFilter) (ordered []*Registration) { + register.RLock() + defer register.RUnlock() + + for _, r := range register.r { + if filter(r) { + r.Disable = true + } + } + + added := map[*Registration]bool{} + for _, r := range register.r { + if r.Disable { + continue + } + children(r, added, &ordered) + if !added[r] { + ordered = append(ordered, r) + added[r] = true + } + } + return ordered +} + +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { + for _, r := range register.r { + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) + if !added[r] { + *ordered = append(*ordered, r) + added[r] = true + } + } + } + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go new file mode 100644 index 00000000000..0df0669d29a --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -0,0 +1,63 @@ +//go:build go1.8 && !windows && amd64 && !static_build && !gccgo +// +build go1.8,!windows,amd64,!static_build,!gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "path/filepath" + "plugin" + "runtime" +) + +// loadPlugins loads all plugins for the OS and Arch +// that containerd is built for inside the provided path +func loadPlugins(path string) error { + abs, err := filepath.Abs(path) + if err != nil { + return err + } + pattern := filepath.Join(abs, fmt.Sprintf( + "*-%s-%s.%s", + runtime.GOOS, + runtime.GOARCH, + getLibExt(), + )) + libs, err := filepath.Glob(pattern) + if err != nil { + return err + } + for _, lib := range libs { + if _, err := plugin.Open(lib); err != nil { + return err + } + } + return nil +} + +// getLibExt returns a platform specific lib extension for +// the platform that containerd is running on +func getLibExt() string { + switch runtime.GOOS { + case "windows": + return "dll" + default: + return "so" + } +} diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go similarity index 67% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/containerd/plugin/plugin_other.go index 6e40a9c7d7f..a2883bbbadf 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,3 +1,6 @@ +//go:build !go1.8 || windows || !amd64 || static_build || gccgo +// +build !go1.8 windows !amd64 static_build gccgo + /* Copyright The containerd Authors. @@ -14,17 +17,9 @@ limitations under the License. */ -package sys - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() +package plugin -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +func loadPlugins(path string) error { + // plugins not supported until 1.8 + return nil +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 0998639b030..6fa97dfdca9 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -338,11 +338,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index c14aacca99b..9d3a904237e 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -18,6 +18,7 @@ package shim import ( "context" + "errors" "flag" "fmt" "io" @@ -30,21 +31,15 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" shimapi "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Client for a shim server -type Client struct { - service shimapi.TaskService - context context.Context - signals chan os.Signal -} - // Publisher for events type Publisher interface { events.Publisher @@ -53,22 +48,37 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { - ID string + ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry ContainerdBinary string Address string TTRPCAddress string } +type StopStatus struct { + Pid int + ExitStatus int + ExitedAt time.Time +} + // Init func for the creation of a shim server +// TODO(2.0): Remove init function type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface +// TODO(2.0): Remove unified shim interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) StartShim(ctx context.Context, opts StartOpts) (string, error) } +// Manager is the interface which manages the shim process +type Manager interface { + Name() string + Start(ctx context.Context, id string, opts StartOpts) (string, error) + Stop(ctx context.Context, id string) (StopStatus, error) +} + // OptsKey is the context key for the Opts value. type OptsKey struct{} @@ -91,10 +101,23 @@ type Config struct { NoSetupLogger bool } +type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +type taskService struct { + shimapi.TaskService +} + +func (t taskService) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, t.TaskService) + return nil +} + var ( debugFlag bool versionFlag bool - idFlag string + id string namespaceFlag string socketFlag string bundlePath string @@ -111,7 +134,7 @@ func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&id, "id", "", "id of the task") flag.StringVar(&socketFlag, "socket", "", "socket path to serve") flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") @@ -136,35 +159,85 @@ func setRuntime() { } } -func setLogger(ctx context.Context, id string) error { - logrus.SetFormatter(&logrus.TextFormatter{ +func setLogger(ctx context.Context, id string) (context.Context, error) { + l := log.G(ctx) + l.Logger.SetFormatter(&logrus.TextFormatter{ TimestampFormat: log.RFC3339NanoFixed, FullTimestamp: true, }) if debugFlag { - logrus.SetLevel(logrus.DebugLevel) + l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { - return err + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return ctx, err } - logrus.SetOutput(f) - return nil + l.Logger.SetOutput(f) + return log.WithLogger(ctx, l), nil } // Run initializes and runs a shim server -func Run(id string, initFunc Init, opts ...BinaryOpts) { +// TODO(2.0): Remove function +func Run(name string, initFunc Init, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + + ctx := context.Background() + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", name)) + + if err := run(ctx, nil, initFunc, name, config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", name, err) + os.Exit(1) + } +} + +// TODO(2.0): Remove this type +type shimToManager struct { + shim Shim + name string +} + +func (stm shimToManager) Name() string { + return stm.name +} + +func (stm shimToManager) Start(ctx context.Context, id string, opts StartOpts) (string, error) { + opts.ID = id + return stm.shim.StartShim(ctx, opts) +} + +func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error) { + // shim must already have id + dr, err := stm.shim.Cleanup(ctx) + if err != nil { + return StopStatus{}, err + } + return StopStatus{ + Pid: int(dr.Pid), + ExitStatus: int(dr.ExitStatus), + ExitedAt: dr.ExitedAt, + }, nil +} + +// RunManager initialzes and runs a shim server +// TODO(2.0): Rename to Run +func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config for _, o := range opts { o(&config) } - if err := run(id, initFunc, config); err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", manager.Name())) + + if err := run(ctx, manager, nil, "", config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", manager.Name(), err) os.Exit(1) } } -func run(id string, initFunc Init, config Config) error { +func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { fmt.Printf("%s:\n", os.Args[0]) @@ -182,12 +255,12 @@ func run(id string, initFunc Init, config Config) error { setRuntime() signals, err := setupSignals(config) - if err != nil { + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { + if err := subreaper(); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } } @@ -199,27 +272,49 @@ func run(id string, initFunc Init, config Config) error { } defer publisher.Close() - ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = namespaces.WithNamespace(ctx, namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) - ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) - ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher, cancel) - if err != nil { - return err + ctx, sd := shutdown.WithShutdown(ctx) + defer sd.Shutdown() + + if manager == nil { + service, err := initFunc(ctx, id, publisher, sd.Shutdown) + if err != nil { + return err + } + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return taskService{service}, nil + }, + }) + manager = shimToManager{ + shim: service, + name: name, + } } + // Handle explicit actions switch action { case "delete": - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) - go handleSignals(ctx, logger, signals) - response, err := service.Cleanup(ctx) + go reap(ctx, logger, signals) + ss, err := manager.Stop(ctx, id) if err != nil { return err } - data, err := proto.Marshal(response) + data, err := proto.Marshal(&shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, + }) if err != nil { return err } @@ -229,12 +324,12 @@ func run(id string, initFunc Init, config Config) error { return nil case "start": opts := StartOpts{ - ID: idFlag, ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, } - address, err := service.StartShim(ctx, opts) + + address, err := manager.Start(ctx, id, opts) if err != nil { return err } @@ -242,46 +337,120 @@ func run(id string, initFunc Init, config Config) error { return err } return nil - default: - if !config.NoSetupLogger { - if err := setLogger(ctx, idFlag); err != nil { - return err - } + } + + if !config.NoSetupLogger { + ctx, err = setLogger(ctx, id) + if err != nil { + return err } - client := NewShimClient(ctx, service, signals) - if err := client.Serve(); err != nil { - if err != context.Canceled { - return err + } + + plugin.Register(&plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "shutdown", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return sd, nil + }, + }) + + // Register event plugin + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "publisher", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return publisher, nil + }, + }) + + var ( + initialized = plugin.NewPluginSet() + ttrpcServices = []ttrpcService{} + ) + plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + // NOTE: Root is empty since the shim does not support persistent storage, + // shim plugins should make use state directory for writing files to disk. + // The state directory will be destroyed when the shim if cleaned up or + // on reboot + "", + bundlePath, + ) + initContext.Address = addressFlag + initContext.TTRPCAddress = ttrpcAddress + + // load the plugin specific configuration if it is provided + //TODO: Read configuration passed into shim, or from state directory? + //if p.Config != nil { + // pc, err := config.Decode(p) + // if err != nil { + // return nil, err + // } + // initContext.Config = pc + //} + + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return fmt.Errorf("could not add plugin result to plugin set: %w", err) + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } + continue } - // NOTE: If the shim server is down(like oom killer), the address - // socket might be leaking. - if address, err := ReadAddress("address"); err == nil { - _ = RemoveSocket(address) + if src, ok := instance.(ttrpcService); ok { + logrus.WithField("id", id).Debug("registering ttrpc service") + ttrpcServices = append(ttrpcServices, src) + } + } + + server, err := newServer() + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return fmt.Errorf("failed creating server: %w", err) + } + + for _, srv := range ttrpcServices { + if err := srv.RegisterTTRPC(server); err != nil { + return fmt.Errorf("failed to register service: %w", err) } + } - select { - case <-publisher.Done(): - return nil - case <-time.After(5 * time.Second): - return errors.New("publisher not closed") + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != shutdown.ErrShutdown { + return err } } -} -// NewShimClient creates a new shim server client -func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { - s := &Client{ - service: svc, - context: ctx, - signals: signals, + // NOTE: If the shim server is down(like oom killer), the address + // socket might be leaking. + if address, err := ReadAddress("address"); err == nil { + _ = RemoveSocket(address) + } + + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") } - return s } -// Serve the shim server -func (s *Client) Serve() error { +// serve serves the ttrpc API over a unix socket in the current working directory +// and blocks until the context is canceled +func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error { dump := make(chan os.Signal, 32) setupDumpStacks(dump) @@ -289,18 +458,19 @@ func (s *Client) Serve() error { if err != nil { return err } - server, err := newServer() - if err != nil { - return errors.Wrap(err, "failed creating server") - } - logrus.Debug("registering ttrpc server") - shimapi.RegisterTaskService(server, s.service) - - if err := serve(s.context, server, socketFlag); err != nil { + l, err := serveListener(socketFlag) + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } - logger := logrus.WithFields(logrus.Fields{ + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, @@ -310,24 +480,9 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(s.context, logger, s.signals) -} -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(ctx context.Context, server *ttrpc.Server, path string) error { - l, err := serveListener(path) - if err != nil { - return err - } - go func() { - defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") - } - }() - return nil + go handleExitSignals(ctx, logger, shutdown) + return reap(ctx, logger, signals) } func dumpStacks(logger *logrus.Entry) { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go index 314b45cb548..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go index 4bc636280d0..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index a61b6420892..e2dab0931ea 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func serveListener(path string) (net.Listener, error) { path = "[inherited from parent]" } else { if len(path) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) } l, err = net.Listen("unix", path) } @@ -70,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { @@ -78,6 +79,8 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si case <-ctx.Done(): return ctx.Err() case s := <-signals: + // Exit signals are handled separately from this loop + // They get registered with this channel so that we can ignore such signals for short-running actions (e.g. `delete`) switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -89,6 +92,22 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si } } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { + ch := make(chan os.Signal, 32) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case s := <-ch: + logger.WithField("signal", s).Debugf("Caught exit signal") + cancel() + return + case <-ctx.Done(): + return + } + } +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifoDup2(ctx, "log", unix.O_WRONLY, 0700, int(os.Stderr.Fd())) } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 7339eb2a2e4..4b098ab1630 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package shim import ( "context" + "errors" "io" "net" "os" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -48,10 +46,13 @@ func serveListener(path string) (net.Listener, error) { return nil, errors.New("not supported") } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { return errors.New("not supported") } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 52bfaa9ce71..28ac9d1e799 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -19,26 +19,32 @@ package shim import ( "bytes" "context" + "errors" "fmt" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strings" - "sync" "time" "github.com/containerd/containerd/namespaces" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) -var runtimePaths sync.Map +type CommandConfig struct { + Runtime string + Address string + TTRPCAddress string + Path string + SchedCore bool + Args []string + Opts *types.Any +} // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -49,67 +55,23 @@ func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAdd } args := []string{ "-namespace", ns, - "-address", containerdAddress, + "-address", config.Address, "-publish-binary", self, } - args = append(args, cmdArgs...) - name := BinaryName(runtime) - if name == "" { - return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) - } - - var cmdPath string - cmdPathI, cmdPathFound := runtimePaths.Load(name) - if cmdPathFound { - cmdPath = cmdPathI.(string) - } else { - var lerr error - binaryPath := BinaryPath(runtime) - if _, serr := os.Stat(binaryPath); serr == nil { - cmdPath = binaryPath - } - - if cmdPath == "" { - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - // LookPath only finds current directory matches based on - // the callers current directory but the caller is not - // likely in the same directory as the containerd - // executables. Instead match the calling binaries path - // (containerd) and see if they are side by side. If so - // execute the shim found there. - testPath := filepath.Join(filepath.Dir(self), name) - if _, serr := os.Stat(testPath); serr == nil { - cmdPath = testPath - } - if cmdPath == "" { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) - } - } - } - } - } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } - if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { - // We didn't store cmdPath we loaded an already cached value. Use it. - cmdPath = cmdPathI.(string) - } - } - - cmd := exec.Command(cmdPath, args...) - cmd.Dir = path + args = append(args, config.Args...) + cmd := exec.CommandContext(ctx, config.Runtime, args...) + cmd.Dir = config.Path cmd.Env = append( os.Environ(), "GOMAXPROCS=2", - fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), ) + if config.SchedCore { + cmd.Env = append(cmd.Env, "SCHED_CORE=1") + } cmd.SysProcAttr = getSysProcAttr() - if opts != nil { - d, err := proto.Marshal(opts) + if config.Opts != nil { + d, err := proto.Marshal(config.Opts) if err != nil { return nil, err } @@ -196,7 +158,7 @@ func ReadAddress(path string) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index f956b0986bd..4e2309a8069 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -29,10 +30,9 @@ import ( "syscall" "time" + "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) const ( @@ -53,16 +53,16 @@ func AdjustOOMScore(pid int) error { parent := os.Getppid() score, err := sys.GetOOMScoreAdj(parent) if err != nil { - return errors.Wrap(err, "get parent OOM score") + return fmt.Errorf("get parent OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(pid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } -const socketRoot = "/run/containerd" +const socketRoot = defaults.DefaultStateDir // SocketAddress returns a socket address func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { @@ -76,7 +76,7 @@ func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { // AnonDialer returns a dialer for a socket func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // AnonReconnectDialer returns a dialer for an existing socket on reconnection @@ -90,19 +90,25 @@ func NewSocket(address string) (*net.UnixListener, error) { sock = socket(address) path = sock.path() ) - if !sock.isAbstract() { + + isAbstract := sock.isAbstract() + + if !isAbstract { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := os.Chmod(path, 0600); err != nil { - os.Remove(sock.path()) - l.Close() - return nil, err + + if !isAbstract { + if err := os.Chmod(path, 0600); err != nil { + os.Remove(sock.path()) + l.Close() + return nil, err + } } return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index 325c290043f..b9042841b1a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,13 +18,13 @@ package shim import ( "context" + "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) const shimBinaryFormat = "containerd-shim-%s-%s.exe" @@ -40,9 +40,9 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error c, err := winio.DialPipeContext(ctx, address) if os.IsNotExist(err) { - return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + return nil, fmt.Errorf("npipe not found on reconnect: %w", os.ErrNotExist) } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } else if err != nil { return nil, err } @@ -65,14 +65,14 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { if os.IsNotExist(err) { select { case <-serveTimer.C: - return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + return nil, fmt.Errorf("pipe not found before timeout: %w", os.ErrNotExist) default: // Wait 10ms for the shim to serve and try again. time.Sleep(10 * time.Millisecond) continue } } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } return nil, err } diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go index 28d6c2cabc6..73a57013ff1 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go index db3cf702f49..a71a9cd7e93 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -19,14 +20,14 @@ package sys import ( - "io/ioutil" + "os" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { - dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index d8329af9fbc..805a7a736fa 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index a9198ef399a..87ebacc2006 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,6 +17,7 @@ package sys import ( + "fmt" "os" "path/filepath" "regexp" @@ -29,7 +28,6 @@ import ( "unsafe" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -270,7 +268,7 @@ func ForceRemoveAll(path string) error { snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { if err := cleanupWCOWLayers(snapshotDir); err != nil { - return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) } } @@ -280,12 +278,22 @@ func ForceRemoveAll(path string) error { func cleanupWCOWLayers(root string) error { // See snapshots/windows/windows.go getSnapshotDir() var layerNums []int + var rmLayerNums []int if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if path != root && info.IsDir() { - if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { - layerNums = append(layerNums, layerNum) + name := filepath.Base(path) + if strings.HasPrefix(name, "rm-") { + layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) + if err != nil { + return err + } + rmLayerNums = append(rmLayerNums, layerNum) } else { - return err + layerNum, err := strconv.Atoi(name) + if err != nil { + return err + } + layerNums = append(layerNums, layerNum) } return filepath.SkipDir } @@ -295,8 +303,14 @@ func cleanupWCOWLayers(root string) error { return err } - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) + for _, rmLayerNum := range rmLayerNums { + if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { + return err + } + } + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) for _, layerNum := range layerNums { if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { return err @@ -311,19 +325,20 @@ func cleanupWCOWLayer(layerPath string) error { HomeDir: filepath.Dir(layerPath), } - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. + // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { - return errors.Wrapf(err, "failed to unprepare %s", layerPath) + if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { + return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) } } if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to deactivate %s", layerPath) + return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) } if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to destroy %s", layerPath) + return fmt.Errorf("failed to destroy %s: %w", layerPath, err) } return nil diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go deleted file mode 100644 index a21045529ae..00000000000 --- a/vendor/github.com/containerd/containerd/sys/mount_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// FMountat performs mount from the provided directory. -func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return errors.Wrap(errno, "failed to fork thread") - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") - } - - if status != 0 { - return errors.Wrap(status, "failed to mount") - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go index 82a347c6f72..bb2a3eafb4f 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ b/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -18,7 +18,6 @@ package sys import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -69,7 +68,7 @@ func SetOOMScore(pid, score int) error { // no oom score is set, or a sore is set to 0. func GetOOMScoreAdj(pid int) (int, error) { path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index f5d7e9786b8..fa0db5a10e0 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 0033178dfa7..6c4f13b9088 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,12 +20,14 @@ package reaper import ( - "os/exec" + "errors" + "fmt" "sync" + "syscall" "time" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -115,6 +118,38 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { return -1, ErrNoSuchProcess } +// WaitTimeout is used to skip the blocked command and kill the left process. +func (m *Monitor) WaitTimeout(c *exec.Cmd, ec chan runc.Exit, timeout time.Duration) (int, error) { + type exitStatusWrapper struct { + status int + err error + } + + // capacity can make sure that the following goroutine will not be + // blocked if there is no receiver when timeout. + waitCh := make(chan *exitStatusWrapper, 1) + go func() { + defer close(waitCh) + + status, err := m.Wait(c, ec) + waitCh <- &exitStatusWrapper{ + status: status, + err: err, + } + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + syscall.Kill(c.Process.Pid, syscall.SIGKILL) + return 0, fmt.Errorf("timeout %v for cmd(pid=%d): %s, %s", timeout, c.Process.Pid, c.Path, c.Args) + case res := <-waitCh: + return res.status, res.err + } +} + // Subscribe to process exit changes func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index b67cc1fa3a7..367e19cad84 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,11 @@ package sys import ( + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,7 +32,7 @@ import ( func CreateUnixSocket(path string) (net.Listener, error) { // BSDs have a 104 limit if len(path) > 104 { - return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) } if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go index 3ee7679b49b..1ae12bc5114 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go deleted file mode 100644 index 4f03cd6cb0f..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go deleted file mode 100644 index ec3b9df69aa..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go deleted file mode 100644 index 21a666dff86..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b8200307f3a..b5e00c5d820 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.5.8+unknown" + Version = "1.6.4+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 708b2668990..0da3efe4c21 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,9 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" + "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -38,7 +39,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { - rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -307,7 +333,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Import tar file. intar, err := importTar(in) if err != nil { - return nil, errors.Wrap(err, "failed to sort") + return nil, fmt.Errorf("failed to sort: %w", err) } // Sort the tar file respecting to the prioritized files list. @@ -318,7 +344,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri *missedPrioritized = append(*missedPrioritized, l) continue // allow not found } - return nil, errors.Wrap(err, "failed to sort tar entries") + return nil, fmt.Errorf("failed to sort tar entries: %w", err) } } if len(prioritized) == 0 { @@ -371,7 +397,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} pw, err := newCountReader(in) if err != nil { - return nil, errors.Wrap(err, "failed to make position watcher") + return nil, fmt.Errorf("failed to make position watcher: %w", err) } tr := tar.NewReader(pw) @@ -383,7 +409,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err == io.EOF { break } else { - return nil, errors.Wrap(err, "failed to parse tar file") + return nil, fmt.Errorf("failed to parse tar file, %w", err) } } switch cleanEntryName(h.Name) { @@ -420,7 +446,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error { _, okIn := in.get(name) _, okOut := out.get(name) if !okIn && !okOut { - return errors.Wrapf(errNotFound, "file: %q", name) + return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index e56319545e6..921e59ec6ef 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -27,10 +27,10 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -40,7 +40,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/vbatts/tar-split/archive/tar" ) @@ -107,7 +106,7 @@ type Telemetry struct { } // Open opens a stargz file for reading. -// The behaviour is configurable using options. +// The behavior is configurable using options. // // Note that each entry name is normalized as the path that is relative to root. func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { @@ -385,8 +384,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.Digest != "" { d, err := digest.Parse(e.Digest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse regular file digest %q", e.Digest) + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) } regDigestMap[e.Offset] = d } else { @@ -401,8 +399,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.ChunkDigest != "" { d, err := digest.Parse(e.ChunkDigest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse chunk digest %q", e.ChunkDigest) + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) } chunkDigestMap[e.Offset] = d } else { @@ -581,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -647,7 +644,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { } blobPayloadSize, _, _, err := c.ParseFooter(footer) if err != nil { - return nil, errors.Wrapf(err, "failed to parse footer") + return nil, fmt.Errorf("failed to parse footer: %w", err) } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -935,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 7330849cb89..591d7a62e11 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -34,7 +34,6 @@ import ( "strconv" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type gzipCompression struct { @@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t } tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } @@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } zr, err := gzip.NewReader(bytes.NewReader(p)) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() extra := zr.Header.Extra @@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 9224e456dde..8f27dfb3ea2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -28,9 +28,9 @@ import ( "compress/gzip" "crypto/sha256" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "os" "reflect" "sort" @@ -41,7 +41,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // TestingController is Compression with some helper methods necessary for testing. @@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -1062,18 +1061,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT fSize := controller.FooterSize() footer := make([]byte, fSize) if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { - return nil, 0, errors.Wrap(err, "error reading footer") + return nil, 0, fmt.Errorf("error reading footer: %w", err) } _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse footer") + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) } // Decode the TOC JSON tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { - return nil, 0, errors.Wrap(err, "failed to parse TOC") + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) } return decodedJTOC, tocOffset, nil } diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 8bad5b1115e..ccf7be53a89 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -6,7 +6,7 @@ env: #### Global variables used for all tasks #### # Name of the ultimate destination branch for this CI run, PR or post-merge. - DEST_BRANCH: "release-1.23" + DEST_BRANCH: "main" GOPATH: "/var/tmp/go" GOSRC: "${GOPATH}/src/github.com/containers/buildah" # Overrides default location (/tmp/cirrus) for repo clone @@ -19,22 +19,22 @@ env: CIRRUS_CLONE_DEPTH: 50 # Unless set by in_podman.sh, default to operating outside of a podman container IN_PODMAN: 'false' + # root or rootless + PRIV_NAME: root #### #### Cache-image names to test with #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" - UBUNTU_NAME: "ubuntu-2104" - PRIOR_UBUNTU_NAME: "ubuntu-2010" + FEDORA_NAME: "fedora-36" + PRIOR_FEDORA_NAME: "fedora-35" + UBUNTU_NAME: "ubuntu-2110" - IMAGE_SUFFIX: "c6248193773010944" + IMAGE_SUFFIX: "c4955393725038592" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" @@ -76,7 +76,6 @@ meta_task: ${FEDORA_CACHE_IMAGE_NAME} ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} - ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] @@ -131,13 +130,35 @@ vendor_task: - './hack/tree_status.sh' +# Confirm cross-compile ALL architectures on a Mac OS-X VM. +cross_build_task: + name: "Cross Compile" + alias: cross_build + only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' + + osx_instance: + image: 'big-sur-base' + + script: + - brew update + - brew install go + - brew install go-md2man + - brew install gpgme + - go version + - make cross CGO_ENABLED=0 + + binary_artifacts: + path: ./bin/* + + unit_task: name: 'Unit tests w/ $STORAGE_DRIVER' alias: unit - only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' - depends_on: + only_if: *not_docs + depends_on: &smoke_vendor_cross - smoke - vendor + - cross_build timeout_in: 1h @@ -159,8 +180,7 @@ conformance_task: name: 'Build Conformance w/ $STORAGE_DRIVER' alias: conformance only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross gce_instance: image_name: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -177,85 +197,11 @@ conformance_task: conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}' -# Confirm cross-compile ALL architectures on a Mac OS-X VM. -cross_build_task: - name: "Cross Compile" - alias: cross_build - only_if: *not_docs - depends_on: - - unit - - osx_instance: - image: 'big-sur-base' - - script: - - brew update - - brew install go - - brew install go-md2man - - brew install gpgme - - go version - - make cross CGO_ENABLED=0 - - binary_artifacts: - path: ./bin/* - - -static_build_task: - name: "Static Build" - alias: static_build - only_if: *not_docs - depends_on: - - unit - - gce_instance: - image_name: "${FEDORA_CACHE_IMAGE_NAME}" - cpu: 8 - memory: 12 - disk: 200 - - env: - NIX_FQIN: "docker.io/nixos/nix:latest" - - init_script: | - set -ex - setenforce 0 - growpart /dev/sda 1 || true - resize2fs /dev/sda1 || true - yum -y install podman - - nix_cache: - folder: '.cache' - fingerprint_script: cat nix/* - - build_script: | - set -ex - mkdir -p .cache - mv .cache /nix - if [[ -z $(ls -A /nix) ]]; then - podman run --rm --privileged -i -v /:/mnt \ - $NIX_FQIN \ - cp -rfT /nix /mnt/nix - fi - podman run --rm --privileged -i -v /nix:/nix \ - -v ${PWD}:${PWD} -w ${PWD} \ - $NIX_FQIN \ - nix --print-build-logs --option cores 8 \ - --option max-jobs 8 build --file nix/ - - binaries_artifacts: - path: "result/bin/buildah" - - save_cache_script: | - mv /nix .cache - chown -Rf $(whoami) .cache - - integration_task: name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER" alias: integration only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross matrix: # VFS @@ -271,10 +217,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' # OVERLAY - env: DISTRO_NV: "${FEDORA_NAME}" @@ -288,10 +230,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' gce_instance: image_name: "$IMAGE_NAME" @@ -314,13 +252,50 @@ integration_task: package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages' golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang' +integration_rootless_task: + name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER" + alias: integration_rootless + only_if: *not_docs + depends_on: *smoke_vendor_cross + + matrix: + # Running rootless tests on overlay + # OVERLAY + - env: + DISTRO_NV: "${FEDORA_NAME}" + IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${PRIOR_FEDORA_NAME}" + IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${UBUNTU_NAME}" + IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + + gce_instance: + image_name: "$IMAGE_NAME" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}' + + binary_artifacts: + path: ./bin/* + + always: + <<: *standardlogs in_podman_task: name: "Containerized Integration" alias: in_podman only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross env: # This is key, cause the scripts to re-execute themselves inside a container. @@ -356,7 +331,6 @@ success_task: - cross_build - integration - in_podman - - static_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore index d98205316c2..939ce6ef52d 100644 --- a/vendor/github.com/containers/buildah/.gitignore +++ b/vendor/github.com/containers/buildah/.gitignore @@ -1,11 +1,12 @@ docs/buildah*.1 +docs/*.5 /bin /buildah /imgtype /build/ -tests/tools/build +/tests/tools/build Dockerfile* !/tests/bud/*/Dockerfile* !/tests/conformance/**/Dockerfile* *.swp -result +/result/ diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index 0c7e3100793..af0b10c76b2 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -7,18 +7,7 @@ run: # Don't exceed number of threads available when running under CI concurrency: 4 linters: - enable-all: true - disable: - # All these break for one reason or another - - dupl - - funlen - - gochecknoglobals - - gochecknoinits - - goconst - - gocritic - - gocyclo - - gosec - - lll - - maligned - - prealloc - - scopelint + enable: + - revive + - unconvert + - unparam diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 685c4f4d36a..36fa66893bb 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,10 +2,318 @@ # Changelog -## v1.23.1 (2021-09-27) +## v1.26.1 (2022-05-04) - Vendor containers/common v0.44.2 - post-1.23 branch fixups + Make `buildah build --label foo` create an empty "foo" label again + Bump to v1.27.0-dev + +## v1.26.0 (2022-05-04) + + imagebuildah,build: move deepcopy of args before we spawn goroutine + Vendor in containers/storage v1.40.2 + buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + help output: get more consistent about option usage text + Handle OS version and features flags + buildah build: --annotation and --label should remove values + buildah build: add a --env + buildah: deep copy options.Args before performing concurrent build/stage + test: inline platform and builtinargs behaviour + vendor: bump imagebuilder to master/009dbc6 + build: automatically set correct TARGETPLATFORM where expected + build(deps): bump github.com/fsouza/go-dockerclient + Vendor in containers/(common, storage, image) + imagebuildah, executor: process arg variables while populating baseMap + buildkit: add support for custom build output with --output + Cirrus: Update CI VMs to F36 + fix staticcheck linter warning for deprecated function + Fix docs build on FreeBSD + build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + copier.unwrapError(): update for Go 1.16 + copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + copier.Put(): write to read-only directories + build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + Rename $TESTSDIR (the plural one), step 4 of 3 + Rename $TESTSDIR (the plural one), step 3 of 3 + Rename $TESTSDIR (the plural one), step 2 of 3 + Rename $TESTSDIR (the plural one), step 1 of 3 + build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + Ed's periodic test cleanup + using consistent lowercase 'invalid' word in returned err msg + Update vendor of containers/(common,storage,image) + use etchosts package from c/common + run: set actual hostname in /etc/hostname to match docker parity + update c/common to latest main + Update vendor of containers/(common,storage,image) + Stop littering + manifest-create: allow creating manifest list from local image + Update vendor of storage,common,image + Bump golang.org/x/crypto to 7b82a4e + Initialize network backend before first pull + oci spec: change special mount points for namespaces + tests/helpers.bash: assert handle corner cases correctly + buildah: actually use containers.conf settings + integration tests: learn to start a dummy registry + Fix error check to work on Podman + buildah build should accept at most one arg + tests: reduce concurrency for flaky bud-multiple-platform-no-run + vendor in latest containers/common,image,storage + manifest-add: allow override arch,variant while adding image + Remove a stray `\` from .containerenv + Vendor in latest opencontainers/selinux v1.10.1 + build, commit: allow removing default identity labels + Create shorter names for containers based on image IDs + test: skip rootless on cgroupv2 in root env + fix hang when oci runtime fails + Set permissions for GitHub actions + copier test: use correct UID/GID in test archives + run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + Bump back to v1.26.0-dev + build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + Included the URL to check the SHA + +## v1.25.1 (2022-03-30) + + buildah: create WORKDIR with USER permissions + vendor: update github.com/openshift/imagebuilder + copier: attempt to open the dir before adding it + Updated dependabot to get updates for GitHub actions. + Switch most calls to filepath.Walk to filepath.WalkDir + build: allow --no-cache and --layers so build cache can be overrided + build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + Bump to v1.26.0-dev + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +## v1.25.0 (2022-03-25) + + install: drop RHEL/CentOS 7 doc + build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + Bump c/storage to v1.39.0 in main + Add a test for CVE-2022-27651 + build(deps): bump github.com/docker/docker + Bump github.com/prometheus/client_golang to v1.11.1 + [CI:DOCS] man pages: sort flags, and keep them that way + build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + Don't pollute + network setup: increase timeout to 4 minutes + do not set the inheritable capabilities + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + parse: convert exposed GetVolumes to internal only + buildkit: mount=type=cache support locking external cache store + .in support: improve error message when cpp is not installed + buildah image: install cpp + build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + build(deps): bump github.com/docker/docker + Add --no-hosts flag to eliminate use of /etc/hosts within containers + test: remove skips for rootless users + test: unshare mount/umount if test is_rootless + tests/copy: read correct containers.conf + build(deps): bump github.com/docker/distribution + cirrus: add seperate task and matrix for rootless + tests: skip tests for rootless which need unshare + buildah: test rootless integration + vendor: bump c/storage to main/93ce26691863 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + tests/copy: initialize the network, too + [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + use c/image/pkg/blobcache + vendor c/image/v5@v5.20.0 + add: ensure the context directory is an absolute path + executor: docker builds must inherit healthconfig from base if any + docs: Remove Containerfile and containeringore + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + helpers.bash: Use correct syntax + speed up combination-namespaces test + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Bump back to 1.25.0-dev + build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +## v1.24.2 (2022-02-16) + + Increase subuid/subgid to 65535 + history: only add proxy vars to history if specified + run_linux: use --systemd-cgroup + buildah: new global option --cgroup-manager + Makefile: build with systemd when available + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + Bump c/common to v0.47.4 + Cirrus: Use updated VM images + conformance: add a few "replace-directory-with-symlink" tests + Bump back to v1.25.0-dev + +## v1.24.1 (2022-02-03) + + executor: Add support for inline --platform within Dockerfile + caps: fix buildah run --cap-add=all + Update vendor of openshift/imagebuilder + Bump version of containers/image and containers/common + Update vendor of containers/common + System tests: fix accidental vandalism of source dir + build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + imagebuildah.BuildDockerfiles(): create the jobs semaphore + build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + overlay: always honor mountProgram + overlay: move mount program invocation to separate function + overlay: move mount program lookup to separate function + Bump to v1.25.0-dev [NO TESTS NEEDED] + +## v1.24.0 (2022-01-26) + + Update vendor of containers/common + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Github-workflow: Report both failures and errors. + build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + Update docs/buildah-build.1.md + [CI:DOCS] Fix typos and improve language + buildah bud --network add support for custom networks + Make pull commands be consistent + docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + Vendor in latest containers/image + Run codespell on code + .github/dependabot.yml: add tests/tools go.mod + CI: rm git-validation, add GHA job to validate PRs + tests/tools: bump go-md2man to v2.0.1 + tests/tools/Makefile: simplify + tests/tools: bump onsi/ginkgo to v1.16.5 + vendor: bump c/common and others + mount: add support for custom upper and workdir with overlay mounts + linux: fix lookup for runtime + overlay: add MountWithOptions to API which extends support for advanced overlay + Allow processing of SystemContext from FlagSet + .golangci.yml: enable unparam linter + util/resolveName: rm bool return + tests/tools: bump golangci-lint + .gitignore: fixups + all: fix capabilities.NewPid deprecation warnings + bind/mount.go: fix linter comment + all: fix gosimple warning S1039 + tests/e2e/buildah_suite_test.go: fix gosimple warnings + imagebuildah/executor.go: fix gosimple warning + util.go: fix gosimple warning + build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + Enable git-daemon tests + Allow processing of id options from FlagSet + Cirrus: Re-order tasks for more parallelism + Cirrus: Freshen VM images + Fix platform handling for empty os/arch values + Allow processing of network options from FlagSet + Fix permissions on secrets directory + Update containers/image and containers/common + bud.bats: use a local git daemon for the git protocol test + Allow processing of common options from FlagSet + Cirrus: Run int. tests in parallel with unit + vendor c/common + Fix default CNI paths + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + multi-stage: enable mounting stages across each other with selinux enabled + executor: Share selinux label of first stage with other stages in a build + buildkit: add from field to bind and cache mounts so images can be used as source + Use config.ProxyEnv from containers/common + use libnetwork from c/common for networking + setup the netns in the buildah parent process + build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + build: fix libsubid test + Allow callers to replace the ContainerSuffix + parse: allow parsing anomaly non-human value for memory control group + .cirrus: remove static_build from ci + stage_executor: re-use all possible layers from cache for squashed builds + build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + Allow rootless buildah to set resource limits on cgroup V2 + build(deps): bump github.com/docker/docker + tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + Wire logger through to config + copier.Put: check for is-not-a-directory using lstat, not stat + Turn on rootless cgroupv2 tests + Grab all of the containers.conf settings for namespaces. + image: set MediaType in OCI manifests + copier: RemoveAll possibly-directories + Simple README fix + images: accept multiple filter with logical AND + build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + UPdate vendor of container/storage + build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + Make LocalIP public function so Podman can use it + Fix UnsetEnv for buildah bud + Tests should rely only on static/unchanging images + run: ensure that stdio pipes are labeled correctly + build(deps): bump github.com/docker/docker + Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + chroot: don't use the generate default seccomp filter for unit tests + build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + ssh-agent: Increase timeout before we explicitly close connection + docs/tutorials: update + Clarify that manifest defaults to localhost as the registry name + "config": remove a stray bit of debug output + "commit": fix a flag typo + Fix an error message: unlocking vs locking + Expand the godoc for CommonBuildOptions.Secrets + chroot: accept an "rw" option + Add --unsetenv option to buildah commit and build + define.TempDirForURL(): show CombinedOutput when a command fails + config: support the variant field + rootless: do not bind mount /sys if not needed + Fix tutorial to specify command on buildah run line + build: history should not contain ARG values + docs: Use guaranteed path for go-md2man + run: honor --network=none from builder if nothing specified + networkpolicy: Should be enabled instead of default when explictly set + Add support for env var secret sources + build(deps): bump github.com/docker/docker + fix: another non-portable shebang + Rootless containers users should use additional groups + Support overlayfs path contains colon + Report ignorefile location when no content added + Add support for host.containers.internal in the /etc/hosts + build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + imagebuildah: fix nil deref + buildkit: add support for mount=type=cache + Default secret mode to 400 + [CI:DOCS] Include manifest example usage + docs: update buildah-from, buildah-pull 'platform' option compatibility notes + docs: update buildah-build 'platform' option compatibility notes + De-dockerize the man page as much as possible + [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + docs: Fix and Update Containerfile man page with supported mount types + mount: add tmpcopyup to tmpfs mount option + buildkit: Add support for --mount=type=tmpfs + build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + Fix command doc links in README.md + build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + build: Add support for buildkit like --mount=type=bind + Bump containerd to v1.5.7 + build(deps): bump github.com/docker/docker + tests: stop pulling php, composer + Fix .containerignore link file + Cirrus: Fix defunct package metadata breaking cache + build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + buildah build: add --all-platforms + Add man page for Containerfile and .containerignore + Plumb the remote logger throughut Buildah + Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + Run: Cleanup run directory after every RUN step + build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + Makefile: check for `-race` using `-mod=vendor` + imagebuildah: fix an attempt to write to a nil map + push: support to specify the compression format + conformance: allow test cases to specify dockerUseBuildKit + build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + unmarshalConvertedConfig(): handle zstd compression + tests/copy/copy: wire up compression options + Update to github.com/vbauerster/mpb v7.1.5 + Add flouthoc to OWNERS + build: Add additional step nodes when labels are modified + Makefile: turn on race detection whenever it's available + conformance: add more tests for exclusion short-circuiting + Update VM Images + Drop prior-ubuntu testing + Bump to v1.24.0-dev ## v1.23.0 (2021-09-13) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 27e4ade6d67..95ce322b81a 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -3,7 +3,7 @@ export GOPROXY=https://proxy.golang.org APPARMORTAG := $(shell hack/apparmor_tag.sh) STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) -TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin @@ -12,6 +12,8 @@ BUILDFLAGS := -tags "$(BUILDTAGS)" BUILDAH := buildah GO := go +GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) +GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi) GO110 := 1.10 GOVERSION := $(findstring $(GO110),$(shell go version)) # test for go module support @@ -22,6 +24,7 @@ else export GO_BUILD=$(GO) build export GO_TEST=$(GO) test endif +RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) @@ -33,8 +36,8 @@ RUNC_COMMIT := v1.0.0-rc8 LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= -BUILDAH_LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -65,14 +68,15 @@ static: cp -rfp ./result/bin/* ./bin/ bin/buildah: $(SOURCES) cmd/buildah/*.go - $(GO_BUILD) $(BUILDAH_LDFLAGS) -gcflags "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah + $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah .PHONY: buildah buildah: bin/buildah -LINUX_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^linux/))) -DARWIN_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^darwin/))) -WINDOWS_CROSS_TARGETS = $(addsuffix .exe,$(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^windows/)))) +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) +DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) +WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) .PHONY: cross cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) @@ -107,7 +111,6 @@ codespell: .PHONY: validate validate: install.tools ./tests/validate/whitespace.sh - ./tests/validate/git-validation.sh ./hack/xref-helpmsgs-manpages ./tests/validate/pr-should-include-tests ./tests/validate/buildahimages-are-sane @@ -164,14 +167,14 @@ test-integration: install.tools cd tests; ./test_runner.sh tests/testreport/testreport: tests/testreport/testreport.go - $(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go + $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go .PHONY: test-unit test-unit: tests/testreport/testreport - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m tmp=$(shell mktemp -d) ; \ mkdir -p $$tmp/root $$tmp/runroot; \ - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS index 1e070e396e6..2eb9ea84c3e 100644 --- a/vendor/github.com/containers/buildah/OWNERS +++ b/vendor/github.com/containers/buildah/OWNERS @@ -2,6 +2,7 @@ approvers: - TomSweeneyRedHat - ashley-cui - cevich + - flouthoc - giuseppe - lsm5 - nalind diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 95c8a9a7b35..512d3f87368 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -77,7 +77,9 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh): ```bash $ cat > lighttpd.sh <<"EOF" -#!/usr/bin/env bash -x +#!/usr/bin/env bash + +set -x ctr1=$(buildah from "${1:-fedora}") @@ -103,27 +105,27 @@ $ sudo ./lighttpd.sh ## Commands | Command | Description | | ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | -| [buildah-build(1)](/docs/buildah-build.md) | Build an image using instructions from Containerfiles or Dockerfiles. | -| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | -| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | -| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | -| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | -| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | -| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | -| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. | -| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | -| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | -| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. | -| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | -| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | -| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | -| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | -| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | -| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | -| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | -| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | -| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | +| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. | +| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. | +| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. | +| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information | **Future goals include:** * more CI tests diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index f17e3a9c943..8aa53a2920f 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -47,8 +47,10 @@ type AddAndCopyOptions struct { // If the sources include directory trees, Hasher will be passed // tar-format archives of the directory trees. Hasher io.Writer - // Excludes is the contents of the .dockerignore file. + // Excludes is the contents of the .containerignore file. Excludes []string + // IgnoreFile is the path to the .containerignore file. + IgnoreFile string // ContextDir is the base directory for content being copied and // Excludes patterns. ContextDir string @@ -199,6 +201,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if err != nil { return errors.Wrapf(err, "error determining current working directory") } + } else { + if !filepath.IsAbs(options.ContextDir) { + contextDir, err = filepath.Abs(options.ContextDir) + if err != nil { + return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir) + } + } } // Figure out what sorts of sources we have. @@ -564,7 +573,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } } if itemsCopied == 0 { - return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out)", localSourceStat.Glob, len(localSourceStat.Globbed)) + excludesFile := "" + if options.IgnoreFile != "" { + excludesFile = " using " + options.IgnoreFile + } + return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile) } } return nil @@ -642,3 +655,37 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3 } return owner.UID, owner.GID, nil } + +// EnsureContainerPathAs creates the specified directory owned by USER +// with the file mode set to MODE. +func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + + uid, gid := uint32(0), uint32(0) + if user != "" { + if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil { + uid = uidForCopy + gid = gidForCopy + } + } + + destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} + opts := copier.MkdirOptions{ + ChmodNew: mode, + ChownNew: idPair, + UIDMap: destUIDMap, + GIDMap: destGIDMap, + } + return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts) + +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 789233405e2..0e45d12c2c8 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -270,7 +270,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { } return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) } - if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { // nolint:unconvert (required for some OS/arch combinations) + if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations) logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) continue } diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index f760d252767..2d81a8c5e35 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -13,6 +13,7 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" @@ -154,6 +155,10 @@ type Builder struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use when running processes in the container with non-host user namespaces. IDMappingOptions define.IDMappingOptions // Capabilities is a list of capabilities to use when running commands in the container. @@ -257,6 +262,8 @@ type BuilderOptions struct { // or "scratch" to indicate that the container should not be based on // an image. FromImage string + // ContainerSuffix is the suffix to add for generated container names + ContainerSuffix string // Container is a desired name for the build container. Container string // PullPolicy decides whether or not we should pull the image that @@ -271,6 +278,8 @@ type BuilderOptions struct { // to store copies of layer blobs that we pull down, if any. It should // already exist. BlobDirectory string + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` // Mount signals to NewBuilder() that the container should be mounted // immediately. Mount bool @@ -307,6 +316,10 @@ type BuilderOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace. IDMappingOptions *define.IDMappingOptions // Capabilities is a list of capabilities to use when @@ -317,7 +330,7 @@ type BuilderOptions struct { Format string // Devices are the additional devices to add to the containers Devices define.ContainerDevices - //DefaultEnv for containers + // DefaultEnv is deprecated and ignored. DefaultEnv []string // MaxPullRetries is the maximum number of attempts we'll make to pull // any one image from the external registry if the first attempt fails. @@ -327,6 +340,10 @@ type BuilderOptions struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string + // MountLabel is the SELinux mount label associated with the container + MountLabel string } // ImportOptions are used to initialize a Builder from an existing container @@ -396,6 +413,12 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { if b.Type != containerType { return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) } + + netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath) + if err != nil { + return nil, err + } + b.NetworkInterface = netInt b.store = store b.fixupConfig(nil) b.setupLogger() diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 8926b2e6f37..127c674bfff 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,6 +1,308 @@ -- Changelog for v1.23.1 (2021-09-27) - * Vendor containers/common v0.44.2 - * post-1.23 branch fixups +- Changelog for v1.26.1 (2022-05-04) + * Make `buildah build --label foo` create an empty "foo" label again + * Bump to v1.27.0-dev + +- Changelog for v1.26.0 (2022-05-04) + * imagebuildah,build: move deepcopy of args before we spawn goroutine + * Vendor in containers/storage v1.40.2 + * buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + * help output: get more consistent about option usage text + * Handle OS version and features flags + * buildah build: --annotation and --label should remove values + * buildah build: add a --env + * buildah: deep copy options.Args before performing concurrent build/stage + * test: inline platform and builtinargs behaviour + * vendor: bump imagebuilder to master/009dbc6 + * build: automatically set correct TARGETPLATFORM where expected + * build(deps): bump github.com/fsouza/go-dockerclient + * Vendor in containers/(common, storage, image) + * imagebuildah, executor: process arg variables while populating baseMap + * buildkit: add support for custom build output with --output + * Cirrus: Update CI VMs to F36 + * fix staticcheck linter warning for deprecated function + * Fix docs build on FreeBSD + * build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + * copier.unwrapError(): update for Go 1.16 + * copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + * copier.Put(): write to read-only directories + * build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + * Rename $TESTSDIR (the plural one), step 4 of 3 + * Rename $TESTSDIR (the plural one), step 3 of 3 + * Rename $TESTSDIR (the plural one), step 2 of 3 + * Rename $TESTSDIR (the plural one), step 1 of 3 + * build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + * Ed's periodic test cleanup + * using consistent lowercase 'invalid' word in returned err msg + * Update vendor of containers/(common,storage,image) + * use etchosts package from c/common + * run: set actual hostname in /etc/hostname to match docker parity + * update c/common to latest main + * Update vendor of containers/(common,storage,image) + * Stop littering + * manifest-create: allow creating manifest list from local image + * Update vendor of storage,common,image + * Bump golang.org/x/crypto to 7b82a4e + * Initialize network backend before first pull + * oci spec: change special mount points for namespaces + * tests/helpers.bash: assert handle corner cases correctly + * buildah: actually use containers.conf settings + * integration tests: learn to start a dummy registry + * Fix error check to work on Podman + * buildah build should accept at most one arg + * tests: reduce concurrency for flaky bud-multiple-platform-no-run + * vendor in latest containers/common,image,storage + * manifest-add: allow override arch,variant while adding image + * Remove a stray `\` from .containerenv + * Vendor in latest opencontainers/selinux v1.10.1 + * build, commit: allow removing default identity labels + * Create shorter names for containers based on image IDs + * test: skip rootless on cgroupv2 in root env + * fix hang when oci runtime fails + * Set permissions for GitHub actions + * copier test: use correct UID/GID in test archives + * run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + * Bump back to v1.26.0-dev + * build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + * Included the URL to check the SHA + +- Changelog for v1.25.1 (2022-03-30) + * buildah: create WORKDIR with USER permissions + * vendor: update github.com/openshift/imagebuilder + * copier: attempt to open the dir before adding it + * Updated dependabot to get updates for GitHub actions. + * Switch most calls to filepath.Walk to filepath.WalkDir + * build: allow --no-cache and --layers so build cache can be overrided + * build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + * Bump to v1.26.0-dev + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +- Changelog for v1.25.0 (2022-03-25) + * install: drop RHEL/CentOS 7 doc + * build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + * Bump c/storage to v1.39.0 in main + * Add a test for CVE-2022-27651 + * build(deps): bump github.com/docker/docker + * Bump github.com/prometheus/client_golang to v1.11.1 + * [CI:DOCS] man pages: sort flags, and keep them that way + * build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + * Don't pollute + * network setup: increase timeout to 4 minutes + * do not set the inheritable capabilities + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + * parse: convert exposed GetVolumes to internal only + * buildkit: mount=type=cache support locking external cache store + * .in support: improve error message when cpp is not installed + * buildah image: install cpp + * build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + * build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + * build(deps): bump github.com/docker/docker + * Add --no-hosts flag to eliminate use of /etc/hosts within containers + * test: remove skips for rootless users + * test: unshare mount/umount if test is_rootless + * tests/copy: read correct containers.conf + * build(deps): bump github.com/docker/distribution + * cirrus: add seperate task and matrix for rootless + * tests: skip tests for rootless which need unshare + * buildah: test rootless integration + * vendor: bump c/storage to main/93ce26691863 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + * tests/copy: initialize the network, too + * [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + * build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + * use c/image/pkg/blobcache + * vendor c/image/v5@v5.20.0 + * add: ensure the context directory is an absolute path + * executor: docker builds must inherit healthconfig from base if any + * docs: Remove Containerfile and containeringore + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + * helpers.bash: Use correct syntax + * speed up combination-namespaces test + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Bump back to 1.25.0-dev + * build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +- Changelog for v1.24.2 (2022-02-16) + * Increase subuid/subgid to 65535 + * history: only add proxy vars to history if specified + * run_linux: use --systemd-cgroup + * buildah: new global option --cgroup-manager + * Makefile: build with systemd when available + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + * Bump c/common to v0.47.4 + * Cirrus: Use updated VM images + * conformance: add a few "replace-directory-with-symlink" tests + * Bump back to v1.25.0-dev + +- Changelog for v1.24.1 (2022-02-03) + * executor: Add support for inline --platform within Dockerfile + * caps: fix buildah run --cap-add=all + * Update vendor of openshift/imagebuilder + * Bump version of containers/image and containers/common + * Update vendor of containers/common + * System tests: fix accidental vandalism of source dir + * build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + * imagebuildah.BuildDockerfiles(): create the jobs semaphore + * build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + * overlay: always honor mountProgram + * overlay: move mount program invocation to separate function + * overlay: move mount program lookup to separate function + * Bump to v1.25.0-dev [NO TESTS NEEDED] + +- Changelog for v1.24.0 (2022-01-26) + * Update vendor of containers/common + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Github-workflow: Report both failures and errors. + * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + * Update docs/buildah-build.1.md + * [CI:DOCS] Fix typos and improve language + * buildah bud --network add support for custom networks + * Make pull commands be consistent + * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + * Vendor in latest containers/image + * Run codespell on code + * .github/dependabot.yml: add tests/tools go.mod + * CI: rm git-validation, add GHA job to validate PRs + * tests/tools: bump go-md2man to v2.0.1 + * tests/tools/Makefile: simplify + * tests/tools: bump onsi/ginkgo to v1.16.5 + * vendor: bump c/common and others + * mount: add support for custom upper and workdir with overlay mounts + * linux: fix lookup for runtime + * overlay: add MountWithOptions to API which extends support for advanced overlay + * Allow processing of SystemContext from FlagSet + * .golangci.yml: enable unparam linter + * util/resolveName: rm bool return + * tests/tools: bump golangci-lint + * .gitignore: fixups + * all: fix capabilities.NewPid deprecation warnings + * bind/mount.go: fix linter comment + * all: fix gosimple warning S1039 + * tests/e2e/buildah_suite_test.go: fix gosimple warnings + * imagebuildah/executor.go: fix gosimple warning + * util.go: fix gosimple warning + * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + * Enable git-daemon tests + * Allow processing of id options from FlagSet + * Cirrus: Re-order tasks for more parallelism + * Cirrus: Freshen VM images + * Fix platform handling for empty os/arch values + * Allow processing of network options from FlagSet + * Fix permissions on secrets directory + * Update containers/image and containers/common + * bud.bats: use a local git daemon for the git protocol test + * Allow processing of common options from FlagSet + * Cirrus: Run int. tests in parallel with unit + * vendor c/common + * Fix default CNI paths + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + * multi-stage: enable mounting stages across each other with selinux enabled + * executor: Share selinux label of first stage with other stages in a build + * buildkit: add from field to bind and cache mounts so images can be used as source + * Use config.ProxyEnv from containers/common + * use libnetwork from c/common for networking + * setup the netns in the buildah parent process + * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + * build: fix libsubid test + * Allow callers to replace the ContainerSuffix + * parse: allow parsing anomaly non-human value for memory control group + * .cirrus: remove static_build from ci + * stage_executor: re-use all possible layers from cache for squashed builds + * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + * Allow rootless buildah to set resource limits on cgroup V2 + * build(deps): bump github.com/docker/docker + * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + * Wire logger through to config + * copier.Put: check for is-not-a-directory using lstat, not stat + * Turn on rootless cgroupv2 tests + * Grab all of the containers.conf settings for namespaces. + * image: set MediaType in OCI manifests + * copier: RemoveAll possibly-directories + * Simple README fix + * images: accept multiple filter with logical AND + * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + * UPdate vendor of container/storage + * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + * Make LocalIP public function so Podman can use it + * Fix UnsetEnv for buildah bud + * Tests should rely only on static/unchanging images + * run: ensure that stdio pipes are labeled correctly + * build(deps): bump github.com/docker/docker + * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + * chroot: don't use the generate default seccomp filter for unit tests + * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + * ssh-agent: Increase timeout before we explicitly close connection + * docs/tutorials: update + * Clarify that manifest defaults to localhost as the registry name + * "config": remove a stray bit of debug output + * "commit": fix a flag typo + * Fix an error message: unlocking vs locking + * Expand the godoc for CommonBuildOptions.Secrets + * chroot: accept an "rw" option + * Add --unsetenv option to buildah commit and build + * define.TempDirForURL(): show CombinedOutput when a command fails + * config: support the variant field + * rootless: do not bind mount /sys if not needed + * Fix tutorial to specify command on buildah run line + * build: history should not contain ARG values + * docs: Use guaranteed path for go-md2man + * run: honor --network=none from builder if nothing specified + * networkpolicy: Should be enabled instead of default when explictly set + * Add support for env var secret sources + * build(deps): bump github.com/docker/docker + * fix: another non-portable shebang + * Rootless containers users should use additional groups + * Support overlayfs path contains colon + * Report ignorefile location when no content added + * Add support for host.containers.internal in the /etc/hosts + * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + * imagebuildah: fix nil deref + * buildkit: add support for mount=type=cache + * Default secret mode to 400 + * [CI:DOCS] Include manifest example usage + * docs: update buildah-from, buildah-pull 'platform' option compatibility notes + * docs: update buildah-build 'platform' option compatibility notes + * De-dockerize the man page as much as possible + * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + * docs: Fix and Update Containerfile man page with supported mount types + * mount: add tmpcopyup to tmpfs mount option + * buildkit: Add support for --mount=type=tmpfs + * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + * Fix command doc links in README.md + * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + * build: Add support for buildkit like --mount=type=bind + * Bump containerd to v1.5.7 + * build(deps): bump github.com/docker/docker + * tests: stop pulling php, composer + * Fix .containerignore link file + * Cirrus: Fix defunct package metadata breaking cache + * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + * buildah build: add --all-platforms + * Add man page for Containerfile and .containerignore + * Plumb the remote logger throughut Buildah + * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + * Run: Cleanup run directory after every RUN step + * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + * Makefile: check for `-race` using `-mod=vendor` + * imagebuildah: fix an attempt to write to a nil map + * push: support to specify the compression format + * conformance: allow test cases to specify dockerUseBuildKit + * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + * unmarshalConvertedConfig(): handle zstd compression + * tests/copy/copy: wire up compression options + * Update to github.com/vbauerster/mpb v7.1.5 + * Add flouthoc to OWNERS + * build: Add additional step nodes when labels are modified + * Makefile: turn on race detection whenever it's available + * conformance: add more tests for exclusion short-circuiting + * Update VM Images + Drop prior-ubuntu testing + * Bump to v1.24.0-dev - Changelog for v1.23.0 (2021-09-13) * Vendor in containers/common v0.44.0 diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index e6f28e81a1c..9ff7c933d08 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "os" "os/exec" + "os/signal" "path/filepath" "runtime" "strconv" @@ -159,10 +160,24 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade // Start the grandparent subprocess. cmd := unshare.Command(runUsingChrootCommand) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + interrupted := make(chan os.Signal, 100) + cmd.Hook = func(int) error { + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + return nil + } + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) confwg.Add(1) go func() { @@ -173,6 +188,8 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err == nil { return conferr } @@ -238,7 +255,7 @@ func runUsingChrootMain() { // Set the kernel's lock to "unlocked". locked := 0 if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { - logrus.Errorf("error locking PTY descriptor: %v", err) + logrus.Errorf("error unlocking PTY descriptor: %v", err) os.Exit(1) } // Get a handle for the other end. @@ -571,6 +588,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io // Start the parent subprocess. cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} @@ -593,10 +611,19 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io } cmd.OOMScoreAdj = spec.Process.OOMScoreAdj cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + interrupted := make(chan os.Signal, 100) cmd.Hook = func(int) error { for _, f := range closeOnceRunning { f.Close() } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() return nil } @@ -609,6 +636,8 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io }() err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { @@ -792,11 +821,27 @@ func runUsingChrootExecMain() { // Actually run the specified command. cmd := exec.Command(args[0], args[1:]...) + setPdeathsig(cmd) cmd.Env = options.Spec.Process.Env cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr cmd.Dir = cwd logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) - if err = cmd.Run(); err != nil { + interrupted := make(chan os.Signal, 100) + if err = cmd.Start(); err != nil { + fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err) + } + go func() { + for range interrupted { + if err := cmd.Process.Signal(syscall.SIGKILL); err != nil { + logrus.Infof("%v while attempting to send SIGKILL to child process", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + err = cmd.Wait() + signal.Stop(interrupted) + close(interrupted) + if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { @@ -883,46 +928,49 @@ func setApparmorProfile(spec *specs.Spec) error { // setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. func setCapabilities(spec *specs.Spec, keepCaps ...string) error { - currentCaps, err := capability.NewPid(0) + currentCaps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } - caps, err := capability.NewPid(0) + if err := currentCaps.Load(); err != nil { + return errors.Wrapf(err, "error loading capabilities") + } + caps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } capMap := map[capability.CapType][]string{ capability.BOUNDING: spec.Process.Capabilities.Bounding, capability.EFFECTIVE: spec.Process.Capabilities.Effective, - capability.INHERITABLE: spec.Process.Capabilities.Inheritable, + capability.INHERITABLE: []string{}, capability.PERMITTED: spec.Process.Capabilities.Permitted, capability.AMBIENT: spec.Process.Capabilities.Ambient, } knownCaps := capability.List() - caps.Clear(capability.CAPS | capability.BOUNDS | capability.AMBS) + noCap := capability.Cap(-1) for capType, capList := range capMap { for _, capToSet := range capList { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } caps.Set(capType, cap) } for _, capToSet := range keepCaps { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } if currentCaps.Get(capType, cap) { @@ -1191,21 +1239,33 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } requestFlags := bindFlags expectedFlags := uintptr(0) - if util.StringInSlice("nodev", m.Options) { - requestFlags |= unix.MS_NODEV - expectedFlags |= unix.ST_NODEV - } - if util.StringInSlice("noexec", m.Options) { - requestFlags |= unix.MS_NOEXEC - expectedFlags |= unix.ST_NOEXEC - } - if util.StringInSlice("nosuid", m.Options) { - requestFlags |= unix.MS_NOSUID - expectedFlags |= unix.ST_NOSUID - } - if util.StringInSlice("ro", m.Options) { - requestFlags |= unix.MS_RDONLY - expectedFlags |= unix.ST_RDONLY + for _, option := range m.Options { + switch option { + case "nodev": + requestFlags |= unix.MS_NODEV + expectedFlags |= unix.ST_NODEV + case "dev": + requestFlags &= ^uintptr(unix.MS_NODEV) + expectedFlags &= ^uintptr(unix.ST_NODEV) + case "noexec": + requestFlags |= unix.MS_NOEXEC + expectedFlags |= unix.ST_NOEXEC + case "exec": + requestFlags &= ^uintptr(unix.MS_NOEXEC) + expectedFlags &= ^uintptr(unix.ST_NOEXEC) + case "nosuid": + requestFlags |= unix.MS_NOSUID + expectedFlags |= unix.ST_NOSUID + case "suid": + requestFlags &= ^uintptr(unix.MS_NOSUID) + expectedFlags &= ^uintptr(unix.ST_NOSUID) + case "ro": + requestFlags |= unix.MS_RDONLY + expectedFlags |= unix.ST_RDONLY + case "rw": + requestFlags &= ^uintptr(unix.MS_RDONLY) + expectedFlags &= ^uintptr(unix.ST_RDONLY) + } } switch m.Type { case "bind": @@ -1404,3 +1464,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } return undoBinds, nil } + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index 1ca0a159ebc..f130f7a22fd 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -3,6 +3,9 @@ package chroot import ( + "io/ioutil" + + "github.com/containers/common/pkg/seccomp" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" libseccomp "github.com/seccomp/libseccomp-golang" @@ -171,3 +174,27 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index a5b74bf092c..f33dd254a35 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -13,3 +13,11 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index bbf1727fbd5..ca597e22230 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -101,6 +101,9 @@ type CommitOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // UnsetEnvs is a list of environments to not add to final image. + // Deprecated: use UnsetEnv() before committing instead. + UnsetEnvs []string } var ( @@ -227,6 +230,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { var ( imgID string + src types.ImageReference ) // If we weren't given a name, build a destination reference using a @@ -298,7 +302,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest)) // Build an image reference from which we can copy the finished image. - src, err := b.makeImageRef(options) + src, err = b.makeContainerImageRef(options) if err != nil { return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) } diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index a3da64e6bed..e009ed7631d 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,12 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + "github.com/containers/buildah/util" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" @@ -28,18 +31,24 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) } if wantedManifestMIMEType != actualManifestMIMEType { + layerInfos := img.LayerInfos() + for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about + layerInfos[i].CompressionOperation = types.Compress + layerInfos[i].CompressionAlgorithm = &compression.Gzip + } updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{ + LayerInfos: layerInfos, + }) + if err != nil { + return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference())) + } + secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: wantedManifestMIMEType, - InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But… - Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes. - LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all. - LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch. - }, }) if err != nil { return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType) } - img = updatedImg + img = secondUpdatedImg } config, err := img.ConfigBlob(ctx) if err != nil { @@ -126,6 +135,10 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) { } else { b.SetArchitecture(runtime.GOARCH) } + // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it + ps := platforms.Normalize(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()}) + b.SetArchitecture(ps.Architecture) + b.SetVariant(ps.Variant) } if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" { b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) @@ -190,6 +203,69 @@ func (b *Builder) SetOS(os string) { b.Docker.OS = os } +// OSVersion returns a version of the OS on which the container, or a container +// built using an image built from this container, is intended to be run. +func (b *Builder) OSVersion() string { + return b.OCIv1.OSVersion +} + +// SetOSVersion sets the version of the OS on which the container, or a +// container built using an image built from this container, is intended to be +// run. +func (b *Builder) SetOSVersion(version string) { + b.OCIv1.OSVersion = version + b.Docker.OSVersion = version +} + +// OSFeatures returns a list of OS features which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) OSFeatures() []string { + return copyStringSlice(b.OCIv1.OSFeatures) +} + +// SetOSFeature adds a feature of the OS which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) SetOSFeature(feature string) { + if !util.StringInSlice(feature, b.OCIv1.OSFeatures) { + b.OCIv1.OSFeatures = append(b.OCIv1.OSFeatures, feature) + } + if !util.StringInSlice(feature, b.Docker.OSFeatures) { + b.Docker.OSFeatures = append(b.Docker.OSFeatures, feature) + } +} + +// UnsetOSFeature removes a feature of the OS which the container, or a +// container built using an image built from this container, depends on the OS +// supplying. +func (b *Builder) UnsetOSFeature(feature string) { + if util.StringInSlice(feature, b.OCIv1.OSFeatures) { + features := make([]string, 0, len(b.OCIv1.OSFeatures)) + for _, f := range b.OCIv1.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.OCIv1.OSFeatures = features + } + if util.StringInSlice(feature, b.Docker.OSFeatures) { + features := make([]string, 0, len(b.Docker.OSFeatures)) + for _, f := range b.Docker.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.Docker.OSFeatures = features + } +} + +// ClearOSFeatures clears the list of features of the OS which the container, +// or a container built using an image built from this container, depends on +// the OS supplying. +func (b *Builder) ClearOSFeatures() { + b.OCIv1.OSFeatures = []string{} + b.Docker.OSFeatures = []string{} +} + // Architecture returns a name of the architecture on which the container, or a // container built using an image built from this container, is intended to be // run. @@ -205,6 +281,21 @@ func (b *Builder) SetArchitecture(arch string) { b.Docker.Architecture = arch } +// Variant returns a name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) Variant() string { + return b.OCIv1.Variant +} + +// SetVariant sets the name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) SetVariant(variant string) { + b.Docker.Variant = variant + b.OCIv1.Variant = variant +} + // Maintainer returns contact information for the person who built the image. func (b *Builder) Maintainer() string { return b.OCIv1.Author @@ -247,7 +338,7 @@ func (b *Builder) ClearOnBuild() { // discarded when writing images using OCIv1 formats. func (b *Builder) SetOnBuild(onBuild string) { if onBuild != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) + b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) } b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) } @@ -279,7 +370,7 @@ func (b *Builder) Shell() []string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetShell(shell []string) { if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) + b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) } b.Docker.Config.Shell = copyStringSlice(shell) @@ -516,7 +607,7 @@ func (b *Builder) Domainname() string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetDomainname(name string) { if name != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) + b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) } b.Docker.Config.Domainname = name } @@ -593,7 +684,7 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) { b.Docker.Config.Healthcheck = nil if config != nil { if b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("Healthcheck is not supported for OCI image format and will be ignored. Must use `docker` format") + b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format") } b.Docker.Config.Healthcheck = &docker.HealthConfig{ Test: copyStringSlice(config.Test), diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 1823e523840..00aa29ccc44 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -4,8 +4,10 @@ import ( "archive/tar" "bytes" "encoding/json" + stderrors "errors" "fmt" "io" + "io/fs" "io/ioutil" "net" "os" @@ -343,6 +345,9 @@ type PutOptions struct { ChmodDirs *os.FileMode // set permissions on newly-created directories ChownFiles *idtools.IDPair // set ownership of newly-created files ChmodFiles *os.FileMode // set permissions on newly-created files + StripSetuidBit bool // strip the setuid bit off of items being written + StripSetgidBit bool // strip the setgid bit off of items being written + StripStickyBit bool // strip the sticky bit off of items being written StripXattrs bool // don't bother trying to set extended attributes of items being copied IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes IgnoreDevices bool // ignore items which are character or block devices @@ -1179,10 +1184,10 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa // we don't expand any of the contents that are archives options := req.GetOptions options.ExpandArchives = false - walkfn := func(path string, info os.FileInfo, err error) error { + walkfn := func(path string, d fs.DirEntry, err error) error { if err != nil { if options.IgnoreUnreadable && errorIsPermission(err) { - if info != nil && info.IsDir() { + if info != nil && d.IsDir() { return filepath.SkipDir } return nil @@ -1192,8 +1197,8 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } return errors.Wrapf(err, "copier: get: error reading %q", path) } - if info.Mode()&os.ModeType == os.ModeSocket { - logrus.Warningf("copier: skipping socket %q", info.Name()) + if d.Type() == os.ModeSocket { + logrus.Warningf("copier: skipping socket %q", d.Name()) return nil } // compute the path of this item @@ -1216,7 +1221,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return err } if skip { - if info.IsDir() { + if d.IsDir() { // if there are no "include // this anyway" patterns at // all, we don't need to @@ -1254,17 +1259,21 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } // if it's a symlink, read its target symlinkTarget := "" - if info.Mode()&os.ModeType == os.ModeSymlink { + if d.Type() == os.ModeSymlink { target, err := os.Readlink(path) if err != nil { return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path) } symlinkTarget = target } + info, err := d.Info() + if err != nil { + return err + } // if it's a directory and we're staying on one device, and it's on a // different device than the one we started from, skip its contents var ok error - if info.Mode().IsDir() && req.GetOptions.NoCrossDevice { + if d.IsDir() && req.GetOptions.NoCrossDevice { if !sameDevice(topInfo, info) { ok = filepath.SkipDir } @@ -1282,7 +1291,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return ok } // walk the directory tree, checking/adding items individually - if err := filepath.Walk(item, walkfn); err != nil { + if err := filepath.WalkDir(item, walkfn); err != nil { return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item) } itemsCopied++ @@ -1461,6 +1470,13 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str return errors.Wrapf(err, "error opening file for adding its contents to archive") } defer f.Close() + } else if hdr.Typeflag == tar.TypeDir { + // open the directory file first to make sure we can access it. + f, err = os.Open(contentPath) + if err != nil { + return errors.Wrapf(err, "error opening directory for adding its contents to archive") + } + defer f.Close() } // output the header if err = tw.WriteHeader(hdr); err != nil { @@ -1520,6 +1536,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID } } + directoryModes := make(map[string]os.FileMode) ensureDirectoryUnderRoot := func(directory string) error { rel, err := convertToRelSubdirectory(req.Root, directory) if err != nil { @@ -1533,8 +1550,10 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM if err = lchown(path, defaultDirUID, defaultDirGID); err != nil { return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID) } - if err = os.Chmod(path, defaultDirMode); err != nil { - return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, defaultDirMode) + // make a conditional note to set this directory's permissions + // later, but not if we already had an explictly-provided mode + if _, ok := directoryModes[path]; !ok { + directoryModes[path] = defaultDirMode } } else { if !os.IsExist(err) { @@ -1544,6 +1563,20 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } return nil } + makeDirectoryWriteable := func(directory string) error { + st, err := os.Lstat(directory) + if err != nil { + return errors.Wrapf(err, "copier: put: error reading permissions of directory %q", directory) + } + mode := st.Mode() & os.ModePerm + if _, ok := directoryModes[directory]; !ok { + directoryModes[directory] = mode + } + if err = os.Chmod(directory, 0o700); err != nil { + return errors.Wrapf(err, "copier: put: error making directory %q writable", directory) + } + return nil + } createFile := func(path string, tr *tar.Reader) (int64, error) { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) if err != nil && os.IsExist(err) { @@ -1553,7 +1586,21 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } } if err = os.RemoveAll(path); err != nil { - return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + if os.IsPermission(err) { + if err := makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err + } + err = os.RemoveAll(path) + } + if err != nil { + return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + } + } + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + } + if err != nil && os.IsPermission(err) { + if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err } f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) } @@ -1597,6 +1644,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err) } } + for directory, mode := range directoryModes { + if err := os.Chmod(directory, mode); err != nil { + logrus.Debugf("error setting permissions of %q to 0%o: %v", directory, uint32(mode), err) + } + } }() ignoredItems := make(map[string]struct{}) tr := tar.NewReader(bulkReader) @@ -1637,6 +1689,15 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return err } // figure out what the permissions should be + if req.PutOptions.StripSetuidBit && hdr.Mode&cISUID == cISUID { + hdr.Mode &^= cISUID + } + if req.PutOptions.StripSetgidBit && hdr.Mode&cISGID == cISGID { + hdr.Mode &^= cISGID + } + if req.PutOptions.StripStickyBit && hdr.Mode&cISVTX == cISVTX { + hdr.Mode &^= cISVTX + } if hdr.Typeflag == tar.TypeDir { if req.PutOptions.ChmodDirs != nil { hdr.Mode = int64(*req.PutOptions.ChmodDirs) @@ -1660,7 +1721,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // only check the length if there wasn't an error, which we'll // check along with errors for other types of entries if err == nil && written != hdr.Size { - return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size) + return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) } case tar.TypeLink: var linkTarget string @@ -1681,7 +1742,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Link(linkTarget, path) } } @@ -1696,7 +1757,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)) } } @@ -1711,7 +1772,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))) } } @@ -1726,14 +1787,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))) } } case tar.TypeDir: if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { var st os.FileInfo - if st, err = os.Stat(path); err == nil && !st.IsDir() { + if st, err = os.Lstat(path); err == nil && !st.IsDir() { // it's not a directory, so remove it and mkdir if err = os.Remove(path); err == nil { err = os.Mkdir(path, 0700) @@ -1751,6 +1812,9 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM atime: hdr.AccessTime, mtime: hdr.ModTime, }) + // set the mode here unconditionally, in case the directory is in + // the archive more than once for whatever reason + directoryModes[path] = mode case tar.TypeFifo: if err = mkfifo(path, 0600); err != nil && os.IsExist(err) { if req.PutOptions.NoOverwriteDirNonDir { @@ -1758,7 +1822,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mkfifo(path, 0600) } } @@ -1778,8 +1842,12 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM if err = lchown(path, hdr.Uid, hdr.Gid); err != nil { return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid) } - // set permissions, except for symlinks, since we don't have lchmod - if hdr.Typeflag != tar.TypeSymlink { + // set permissions, except for symlinks, since we don't + // have an lchmod, and directories, which we'll fix up + // on our way out so that we don't get tripped up by + // directories which we're not supposed to be able to + // write to, but which we'll need to create content in + if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir { if err = os.Chmod(path, mode); err != nil { return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode) } @@ -1895,3 +1963,15 @@ func copierHandlerRemove(req request) *response { } return &response{Error: "", Remove: removeResponse{}} } + +func unwrapError(err error) error { + e := errors.Cause(err) + for e != nil { + err = e + e = stderrors.Unwrap(err) + if e == err { + break + } + } + return err +} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go deleted file mode 100644 index ebbad08b991..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_112.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go113 - -package copier - -import ( - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - return errors.Cause(err) -} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go deleted file mode 100644 index cd0d0fb6871..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_113.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go113 - -package copier - -import ( - stderror "errors" - - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - e := errors.Cause(err) - for e != nil { - err = e - e = errors.Unwrap(err) - } - return err -} diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go index c757adcc887..aef0f440381 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs.go +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -16,7 +16,9 @@ const ( ) var ( - relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + initialXattrListSize = 64 * 1024 + initialXattrValueSize = 64 * 1024 ) // isRelevantXattr checks if "attribute" matches one of the attribute patterns @@ -35,7 +37,7 @@ func isRelevantXattr(attribute string) bool { // Lgetxattrs returns a map of the relevant extended attributes set on the given file. func Lgetxattrs(path string) (map[string]string, error) { maxSize := 64 * 1024 * 1024 - listSize := 64 * 1024 + listSize := initialXattrListSize var list []byte for listSize < maxSize { list = make([]byte, listSize) @@ -61,7 +63,7 @@ func Lgetxattrs(path string) (map[string]string, error) { m := make(map[string]string) for _, attribute := range strings.Split(string(list), string('\000')) { if isRelevantXattr(attribute) { - attributeSize := 64 * 1024 + attributeSize := initialXattrValueSize var attributeValue []byte for attributeSize < maxSize { attributeValue = make([]byte, attributeSize) diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index cff9a3d830b..568be203cf6 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -4,6 +4,7 @@ import ( "io" "time" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage/pkg/archive" @@ -28,6 +29,8 @@ type CommonBuildOptions struct { CPUSetMems string // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. HTTPProxy bool + // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image. + IdentityLabel types.OptionalBool // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf @@ -36,11 +39,14 @@ type CommonBuildOptions struct { DNSServers []string // DNSOptions is the list of DNS DNSOptions []string - // MemorySwap limits the amount of memory and swap together. - MemorySwap int64 // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". // Recognized field names are "role", "type", and "level". LabelOpts []string + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // NoHosts tells the builder not to create /etc/hosts content when running + // containers. + NoHosts bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. OmitTimestamp bool @@ -70,7 +76,9 @@ type CommonBuildOptions struct { Ulimit []string // Volumes to bind mount into the container Volumes []string - // Secrets are the available secrets to use in a build + // Secrets are the available secrets to use in a build. Each item in the + // slice takes the form "id=foo,src=bar", where both "id" and "src" are + // required, in that order, and "bar" is the name of a file. Secrets []string // SSHSources is the available ssh agent connections to forward in the build SSHSources []string @@ -78,6 +86,8 @@ type CommonBuildOptions struct { // BuildOptions can be used to alter how an image is built. type BuildOptions struct { + // ContainerSuffix it the name to suffix containers with + ContainerSuffix string // ContextDirectory is the default source location for COPY and ADD // commands. ContextDirectory string @@ -113,6 +123,10 @@ type BuildOptions struct { Args map[string]string // Name of the image to write to. Output string + // BuildOutput specifies if any custom build output is selected for following build. + // It allows end user to export recently built rootfs into a directory or tar. + // See the documentation of 'buildah build --output' for the details of the format. + BuildOutput string // Additional tags to add to the image that we write, if we know of a // way to add them. AdditionalTags []string @@ -157,6 +171,10 @@ type BuildOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace // when handling RUN instructions. IDMappingOptions *IDMappingOptions @@ -227,6 +245,8 @@ type BuildOptions struct { RusageLogFile string // Excludes is a list of excludes to be used instead of the .dockerignore file. Excludes []string + // IgnoreFile is a name of the .containerignore file + IgnoreFile string // From is the image name to use to replace the value specified in the first // FROM instruction in the Containerfile From string @@ -234,4 +254,20 @@ type BuildOptions struct { // to build the image for. If this slice has items in it, the OS and // Architecture fields above are ignored. Platforms []struct{ OS, Arch, Variant string } + // AllPlatforms tells the builder to set the list of target platforms + // to match the set of platforms for which all of the build's base + // images are available. If this field is set, Platforms is ignored. + AllPlatforms bool + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string + // Envs is a list of environment variables to set in the final image. + Envs []string + // OSFeatures specifies operating system features the image requires. + // It is typically only set when the OS is "windows". + OSFeatures []string + // OSVersion specifies the exact operating system version the image + // requires. It is typically only set when the OS is "windows". Any + // value set in a base image will be preserved, so this does not + // frequently need to be set. + OSVersion string } diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 4f3ebf01a49..459a161cdd6 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,15 +29,11 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.23.1" + Version = "1.26.1" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" - DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = "/etc/cni/net.d" - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, // suitable for specifying as a value of the PreferredManifestType // member of a CommitOptions structure. It is also the default. @@ -93,6 +89,20 @@ type IDMappingOptions struct { GIDMap []specs.LinuxIDMapping } +// Secret is a secret source that can be used in a RUN +type Secret struct { + ID string + Source string + SourceType string +} + +// BuildOutputOptions contains the the outcome of parsing the value of a build --output flag +type BuildOutputOption struct { + Path string // Only valid if !IsStdout + IsDir bool + IsStdout bool +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along @@ -117,12 +127,12 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Wrapf(err, "error parsing url %q", url) } if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { - err = cloneToDirectory(url, name) + combinedOutput, err := cloneToDirectory(url, name) if err != nil { if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } - return "", "", err + return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) } return name, "", nil } @@ -160,7 +170,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Errorf("unreachable code reached") } -func cloneToDirectory(url, dir string) error { +func cloneToDirectory(url, dir string) ([]byte, error) { gitBranch := strings.Split(url, "#") var cmd *exec.Cmd if len(gitBranch) < 2 { @@ -170,7 +180,7 @@ func cloneToDirectory(url, dir string) error { logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) } - return cmd.Run() + return cmd.CombinedOutput() } func downloadToDirectory(url, dir string) error { diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 561287ac276..b0ed2e4c021 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -151,6 +151,8 @@ type V1Image struct { Config *Config `json:"config,omitempty"` // Architecture is the hardware that the image is build and runs on Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index ef05f37d26e..e3668bd0ddf 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -43,6 +43,15 @@ const ( Dockerv2ImageManifest = define.Dockerv2ImageManifest ) +// ExtractRootfsOptions is consumed by ExtractRootfs() which allows +// users to preserve nature of various modes like setuid, setgid and xattrs +// over the extracted file system objects. +type ExtractRootfsOptions struct { + StripSetuidBit bool // strip the setuid bit off of items being extracted. + StripSetgidBit bool // strip the setgid bit off of items being extracted. + StripXattrs bool // don't record extended attributes of items being extracted. +} + type containerImageRef struct { fromImageName string fromImageID string @@ -150,7 +159,10 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om } // Extract the container's whole filesystem as if it were a single layer. -func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { +// Takes ExtractRootfsOptions as argument which allows caller to configure +// preserve nature of setuid,setgid,sticky and extended attributes +// on extracted rootfs. +func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { var uidMap, gidMap []idtools.IDMap mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) if err != nil { @@ -164,8 +176,11 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) } copierOptions := copier.GetOptions{ - UIDMap: uidMap, - GIDMap: gidMap, + UIDMap: uidMap, + GIDMap: gidMap, + StripSetuidBit: opts.StripSetuidBit, + StripSetgidBit: opts.StripSetgidBit, + StripXattrs: opts.StripXattrs, } err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err @@ -239,6 +254,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, Versioned: specs.Versioned{ SchemaVersion: 2, }, + MediaType: v1.MediaTypeImageManifest, Config: v1.Descriptor{ MediaType: v1.MediaTypeImageConfig, }, @@ -375,7 +391,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System var errChan chan error if i.squash { // Extract the root filesystem as a single layer. - rc, errChan, err = i.extractRootfs() + rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{}) if err != nil { return nil, err } @@ -737,7 +753,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil } -func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, error) { +func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -752,6 +768,10 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err if manifestType == "" { manifestType = define.OCIv1ImageManifest } + + for _, u := range options.UnsetEnvs { + b.UnsetEnv(u) + } oconfig, err := json.Marshal(&b.OCIv1) if err != nil { return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1) @@ -808,3 +828,12 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err } return ref, nil } + +// Extract the container's whole filesystem as if it were a single layer from current builder instance +func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { + src, err := b.makeContainerImageRef(options) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating image reference for container %q to extract its contents", b.ContainerID) + } + return src.extractRootfs(opts) +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index bdb407885b2..cf0a7cfbacf 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -10,15 +10,19 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/util" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/shortnames" istorage "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" "github.com/containers/storage" @@ -58,6 +62,10 @@ type BuildOptions = define.BuildOptions // returns the ID of the built image, and if a name was assigned to it, a // canonical reference for that image. func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = &define.CommonBuildOptions{} + } + if len(paths) == 0 { return "", nil, errors.Errorf("error building: no dockerfiles specified") } @@ -168,8 +176,17 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B files = append(files, b.Bytes()) } - if options.Jobs != nil && *options.Jobs != 0 { - options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + if options.JobSemaphore == nil { + if options.Jobs != nil { + if *options.Jobs < 0 { + return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") + } + if *options.Jobs > 0 { + options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + } + } else { + options.JobSemaphore = semaphore.NewWeighted(1) + } } manifestList := options.Manifest @@ -193,22 +210,44 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B }) } + if options.AllPlatforms { + options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext) + if err != nil { + return "", nil, err + } + } + systemContext := options.SystemContext for _, platform := range options.Platforms { platformContext := *systemContext - platformContext.OSChoice = platform.OS - platformContext.ArchitectureChoice = platform.Arch - platformContext.VariantChoice = platform.Variant + platformSpec := platforms.Normalize(v1.Platform{ + OS: platform.OS, + Architecture: platform.Arch, + Variant: platform.Variant, + }) + // platforms.Normalize converts an empty os value to GOOS + // so we have to check the original value here to not overwrite the default for no reason + if platform.OS != "" { + platformContext.OSChoice = platformSpec.OS + } + if platform.Arch != "" { + platformContext.ArchitectureChoice = platformSpec.Architecture + platformContext.VariantChoice = platformSpec.Variant + } platformOptions := options platformOptions.SystemContext = &platformContext + platformOptions.OS = platformContext.OSChoice + platformOptions.Architecture = platformContext.ArchitectureChoice logPrefix := "" if len(options.Platforms) > 1 { - logPrefix = "[" + platform.OS + "/" + platform.Arch - if platform.Variant != "" { - logPrefix += "/" + platform.Variant - } - logPrefix += "] " + logPrefix = "[" + platforms.Format(platformSpec) + "] " + } + // Deep copy args to prevent concurrent read/writes over Args. + argsCopy := make(map[string]string) + for key, value := range options.Args { + argsCopy[key] = value } + platformOptions.Args = argsCopy builds.Go(func() error { thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files) if err != nil { @@ -217,12 +256,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B id, ref = thisID, thisRef instancesLock.Lock() instances = append(instances, instance{ - ID: thisID, - Platform: v1.Platform{ - OS: platformContext.OSChoice, - Architecture: platformContext.ArchitectureChoice, - Variant: platformContext.VariantChoice, - }, + ID: thisID, + Platform: platformSpec, }) instancesLock.Unlock() return nil @@ -311,6 +346,40 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr warnOnUnsetBuildArgs(logger, mainNode, options.Args) + // --platform was explicitly selected for this build + // so set correct TARGETPLATFORM in args if it is not + // already selected by the user. + if options.SystemContext.OSChoice != "" && options.SystemContext.ArchitectureChoice != "" { + // os component from --platform string populates TARGETOS + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETOS"]; !ok { + options.Args["TARGETOS"] = options.SystemContext.OSChoice + } + // arch component from --platform string populates TARGETARCH + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETARCH"]; !ok { + options.Args["TARGETARCH"] = options.SystemContext.ArchitectureChoice + } + // variant component from --platform string populates TARGETVARIANT + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETVARIANT"]; !ok { + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETVARIANT"] = options.SystemContext.VariantChoice + } + } + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETPLATFORM"]; !ok { + // buildkit parity: TARGETPLATFORM should be always created + // from SystemContext and not `TARGETOS` and `TARGETARCH` because + // users can always override values of `TARGETOS` and `TARGETARCH` + // but `TARGETPLATFORM` should be set independent of those values. + options.Args["TARGETPLATFORM"] = options.SystemContext.OSChoice + "/" + options.SystemContext.ArchitectureChoice + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETPLATFORM"] = options.Args["TARGETPLATFORM"] + "/" + options.SystemContext.VariantChoice + } + } + } + for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { @@ -318,6 +387,35 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } + + // Check if any modifications done to labels + // add them to node-layer so it becomes regular + // layer. + // Reason: Docker adds label modification as + // last step which can be processed as regular + // steps and if no modification is done to layers + // its easier to re-use cached layers. + if len(options.Labels) > 0 { + for _, labelSpec := range options.Labels { + label := strings.SplitN(labelSpec, "=", 2) + labelLine := "" + key := label[0] + value := "" + if len(label) > 1 { + value = label[1] + } + // check from only empty key since docker supports empty value + if key != "" { + labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value) + additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine)) + if err != nil { + return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps") + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + } + } + exec, err := newExecutor(logger, logPrefix, store, options, mainNode) if err != nil { return "", nil, errors.Wrapf(err, "error creating build executor") @@ -373,8 +471,8 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string cppCommand := "cpp" cppPath, err := exec.LookPath(cppCommand) if err != nil { - if os.IsNotExist(err) { - err = errors.Errorf("error: %s support requires %s to be installed", containerfile, cppPath) + if errors.Is(err, exec.ErrNotFound) { + err = fmt.Errorf("error: %v: .in support requires %s to be installed", err, cppCommand) } return nil, err } @@ -400,3 +498,194 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string } return &stdoutBuffer, nil } + +// platformsForBaseImages resolves the names of base images from the +// dockerfiles, and if they are all valid references to manifest lists, returns +// the list of platforms that are supported by all of the base images. +func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { + baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args) + if err != nil { + return nil, errors.Wrapf(err, "determining list of base images") + } + logrus.Debugf("unresolved base images: %v", baseImages) + if len(baseImages) == 0 { + return nil, errors.Wrapf(err, "build uses no non-scratch base images") + } + targetPlatforms := make(map[string]struct{}) + var platformList []struct{ OS, Arch, Variant string } + for baseImageIndex, baseImage := range baseImages { + resolved, err := shortnames.Resolve(systemContext, baseImage) + if err != nil { + return nil, errors.Wrapf(err, "resolving image name %q", baseImage) + } + var manifestBytes []byte + var manifestType string + for _, candidate := range resolved.PullCandidates { + ref, err := docker.NewReference(candidate.Value) + if err != nil { + logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err) + continue + } + src, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err) + continue + } + candidateBytes, candidateType, err := src.GetManifest(ctx, nil) + _ = src.Close() + if err != nil { + logrus.Debugf("reading image manifest for %q: %v", baseImage, err) + continue + } + if !manifest.MIMETypeIsMultiImage(candidateType) { + logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err) + continue + } + if err := candidate.Record(); err != nil { + logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err) + continue + } + baseImage = candidate.Value.String() + manifestBytes, manifestType = candidateBytes, candidateType + break + } + if len(manifestBytes) == 0 { + if len(resolved.PullCandidates) > 0 { + return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage) + } + return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage) + } + if manifestType != v1.MediaTypeImageIndex { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage) + } + list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex) + if err != nil { + return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage) + } + manifestBytes, err = list.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage) + } + } + index, err := manifest.OCI1IndexFromManifest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage) + } + if baseImageIndex == 0 { + // populate the list with the first image's normalized platforms + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + targetPlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + } else { + // prune the list of any normalized platforms this base image doesn't support + imagePlatforms := make(map[string]struct{}) + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + imagePlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + var removed []string + for platform := range targetPlatforms { + if _, present := imagePlatforms[platform]; !present { + removed = append(removed, platform) + logger.Debugf("image %q does not support %q", baseImage, platform) + } + } + for _, remove := range removed { + delete(targetPlatforms, remove) + } + } + if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 { + // extract the list + for platform := range targetPlatforms { + platform, err := platforms.Parse(platform) + if err != nil { + return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform) + } + platformList = append(platformList, struct{ OS, Arch, Variant string }{ + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + logger.Debugf("base images all support %q", platform) + } + } + } + if len(platformList) == 0 { + return nil, errors.New("base images have no platforms in common") + } + return platformList, nil +} + +// baseImages parses the dockerfilecontents, possibly replacing the first +// stage's base image with FROM, and returns the list of base images as +// provided. Each entry in the dockerfilenames slice corresponds to a slice in +// dockerfilecontents. +func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) { + mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) + if err != nil { + return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0]) + } + + for i, d := range dockerfilecontents[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) + if err != nil { + return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i]) + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + + b := imagebuilder.NewBuilder(args) + defaultContainerConfig, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + b.Env = defaultContainerConfig.GetDefaultEnv() + stages, err := imagebuilder.NewStages(mainNode, b) + if err != nil { + return nil, errors.Wrap(err, "error reading multiple stages") + } + var baseImages []string + nicknames := make(map[string]bool) + for stageIndex, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + // If we have a fromOverride, replace the value of + // image name for the first FROM in the Containerfile. + if from != "" { + child.Next.Value = from + from = "" + } + base := child.Next.Value + if base != "scratch" && !nicknames[base] { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + baseImages = append(baseImages, base) + } + } + } + } + node = node.Next // next line + } + if stage.Name != strconv.Itoa(stageIndex) { + nicknames[stage.Name] = true + } + } + return baseImages, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 78606d2b4ec..6b63b5162bd 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -18,6 +18,7 @@ import ( "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -57,6 +58,7 @@ var builtinAllowedBuildArgs = map[string]bool{ // interface. It coordinates the entire build by using one or more // StageExecutors to handle each stage of the build. type Executor struct { + containerSuffix string logger *logrus.Logger stages map[string]*StageExecutor store storage.Store @@ -84,47 +86,57 @@ type Executor struct { configureNetwork define.NetworkConfigurationPolicy cniPluginPath string cniConfigDir string - idmappingOptions *define.IDMappingOptions - commonBuildOptions *define.CommonBuildOptions - defaultMountsFilePath string - iidfile string - squash bool - labels []string - annotations []string - layers bool - useCache bool - removeIntermediateCtrs bool - forceRmIntermediateCtrs bool - imageMap map[string]string // Used to map images that we create to handle the AS construct. - containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. - baseMap map[string]bool // Holds the names of every base image, as given. - rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. - blobDirectory string - excludes []string - unusedArgs map[string]struct{} - capabilities []string - devices define.ContainerDevices - signBy string - architecture string - timestamp *time.Time - os string - maxPullPushRetries int - retryPullPushDelay time.Duration - ociDecryptConfig *encconfig.DecryptConfig - lastError error - terminatedStage map[string]error - stagesLock sync.Mutex - stagesSemaphore *semaphore.Weighted - jobs int - logRusage bool - rusageLogFile io.Writer - imageInfoLock sync.Mutex - imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs - fromOverride string - manifest string - secrets map[string]string - sshsources map[string]*sshagent.Source - logPrefix string + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + networkInterface nettypes.ContainerNetwork + idmappingOptions *define.IDMappingOptions + commonBuildOptions *define.CommonBuildOptions + defaultMountsFilePath string + iidfile string + squash bool + labels []string + annotations []string + layers bool + noHosts bool + useCache bool + removeIntermediateCtrs bool + forceRmIntermediateCtrs bool + imageMap map[string]string // Used to map images that we create to handle the AS construct. + containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. + baseMap map[string]bool // Holds the names of every base image, as given. + rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + blobDirectory string + excludes []string + ignoreFile string + unusedArgs map[string]struct{} + capabilities []string + devices define.ContainerDevices + signBy string + architecture string + timestamp *time.Time + os string + maxPullPushRetries int + retryPullPushDelay time.Duration + ociDecryptConfig *encconfig.DecryptConfig + lastError error + terminatedStage map[string]error + stagesLock sync.Mutex + stagesSemaphore *semaphore.Weighted + logRusage bool + rusageLogFile io.Writer + imageInfoLock sync.Mutex + imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs + fromOverride string + manifest string + secrets map[string]define.Secret + sshsources map[string]*sshagent.Source + logPrefix string + unsetEnvs []string + processLabel string // Shares processLabel of first stage container with containers of other stages in same build + mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build + buildOutput string // Specifies instructions for any custom build output + osVersion string + osFeatures []string + envs []string } type imageTypeAndHistoryAndDiffIDs struct { @@ -143,7 +155,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o excludes := options.Excludes if len(excludes) == 0 { - excludes, err = imagebuilder.ParseDockerignore(options.ContextDirectory) + excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile) if err != nil { return nil, err } @@ -179,10 +191,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o if err != nil { return nil, err } - jobs := 1 - if options.Jobs != nil { - jobs = *options.Jobs - } writer := options.ReportWriter if options.Quiet { @@ -203,11 +211,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } exec := Executor{ + containerSuffix: options.ContainerSuffix, logger: logger, stages: make(map[string]*StageExecutor), store: store, contextDir: options.ContextDirectory, excludes: excludes, + ignoreFile: options.IgnoreFile, pullPolicy: options.PullPolicy, registry: options.Registry, ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, @@ -231,6 +241,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o configureNetwork: options.ConfigureNetwork, cniPluginPath: options.CNIPluginPath, cniConfigDir: options.CNIConfigDir, + networkInterface: options.NetworkInterface, idmappingOptions: options.IDMappingOptions, commonBuildOptions: options.CommonBuildOpts, defaultMountsFilePath: options.DefaultMountsFilePath, @@ -239,6 +250,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o labels: append([]string{}, options.Labels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, + noHosts: options.CommonBuildOpts.NoHosts, useCache: !options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, @@ -259,7 +271,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o ociDecryptConfig: options.OciDecryptConfig, terminatedStage: make(map[string]error), stagesSemaphore: options.JobSemaphore, - jobs: jobs, logRusage: options.LogRusage, rusageLogFile: rusageLogFile, imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), @@ -268,6 +279,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o secrets: secrets, sshsources: sshsources, logPrefix: logPrefix, + unsetEnvs: append([]string{}, options.UnsetEnvs...), + buildOutput: options.BuildOutput, + osVersion: options.OSVersion, + osFeatures: append([]string{}, options.OSFeatures...), + envs: append([]string{}, options.Envs...), } if exec.err == nil { exec.err = os.Stderr @@ -292,9 +308,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // and value, or just an argument, since they can be // separated by either "=" or whitespace. list := strings.SplitN(arg.Value, "=", 2) - if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused { - delete(exec.unusedArgs, list[0]) - } + delete(exec.unusedArgs, list[0]) } } break @@ -353,7 +367,7 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) { found := false for _, otherStage := range stages { - if otherStage.Name == name || fmt.Sprintf("%d", otherStage.Position) == name { + if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name { found = true break } @@ -521,9 +535,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image lastErr = err } } + cleanupStages = nil b.stagesLock.Unlock() - cleanupStages = nil // Clean up any builders that we used to get data from images. for _, builder := range b.containerMap { if err := builder.Delete(); err != nil { @@ -595,11 +609,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } base := child.Next.Value if base != "scratch" { - // TODO: this didn't undergo variable and arg - // expansion, so if the AS clause in another - // FROM instruction uses argument values, - // we might not record the right value here. - b.baseMap[base] = true + userArgs := argsMapToSlice(stage.Builder.Args) + baseWithArg, err := imagebuilder.ProcessWord(base, userArgs) + if err != nil { + return "", nil, errors.Wrapf(err, "while replacing arg variables with values for format %q", base) + } + b.baseMap[baseWithArg] = true logrus.Debugf("base for stage %d: %q", stageIndex, base) } } @@ -628,36 +643,53 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image Error error } - ch := make(chan Result) + ch := make(chan Result, len(stages)) if b.stagesSemaphore == nil { - jobs := int64(b.jobs) - if jobs < 0 { - return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") - } else if jobs == 0 { - jobs = int64(len(stages)) - } - - b.stagesSemaphore = semaphore.NewWeighted(jobs) + b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages))) } var wg sync.WaitGroup wg.Add(len(stages)) go func() { + cancel := false for stageIndex := range stages { index := stageIndex // Acquire the semaphore before creating the goroutine so we are sure they // run in the specified order. if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + cancel = true b.lastError = err - return + ch <- Result{ + Index: index, + Error: err, + } + wg.Done() + continue } + b.stagesLock.Lock() + cleanupStages := cleanupStages + b.stagesLock.Unlock() go func() { defer b.stagesSemaphore.Release(1) defer wg.Done() + if cancel || cleanupStages == nil { + var err error + if stages[index].Name != strconv.Itoa(index) { + err = errors.Errorf("not building stage %d: build canceled", index) + } else { + err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) + } + ch <- Result{ + Index: index, + Error: err, + } + return + } stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) if stageErr != nil { + cancel = true ch <- Result{ Index: index, Error: stageErr, @@ -684,7 +716,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image b.stagesLock.Lock() b.terminatedStage[stage.Name] = r.Error - b.terminatedStage[fmt.Sprintf("%d", stage.Position)] = r.Error + b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error if r.Error != nil { b.stagesLock.Unlock() diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index ad0caed2877..01b70369bb9 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -15,8 +15,12 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" "github.com/containers/buildah/util" + config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -25,6 +29,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -47,21 +52,22 @@ import ( // If we're naming the result of the build, only the last stage will apply that // name to the image that it produces. type StageExecutor struct { - ctx context.Context - executor *Executor - log func(format string, args ...interface{}) - index int - stages imagebuilder.Stages - name string - builder *buildah.Builder - preserved int - volumes imagebuilder.VolumeSet - volumeCache map[string]string - volumeCacheInfo map[string]os.FileInfo - mountPoint string - output string - containerIDs []string - stage *imagebuilder.Stage + ctx context.Context + executor *Executor + log func(format string, args ...interface{}) + index int + stages imagebuilder.Stages + name string + builder *buildah.Builder + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + mountPoint string + output string + containerIDs []string + stage *imagebuilder.Stage + argsFromContainerfile []string } // Preserve informs the stage executor that from this point on, it needs to @@ -401,6 +407,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err PreserveOwnership: preserveOwnership, ContextDir: contextDir, Excludes: copyExcludes, + IgnoreFile: s.executor.ignoreFile, IDMappingOptions: idMappingOptions, StripSetuidBit: stripSetuid, StripSetgidBit: stripSetgid, @@ -412,10 +419,67 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err return nil } +// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided +// Stage can automatically cleanup this mounts when a stage is removed +// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run. +// stages mounted here will we used be Run(). +func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { + stageMountPoints := make(map[string]internal.StageMountDetails) + for _, flag := range mountList { + if strings.Contains(flag, "from") { + arr := strings.SplitN(flag, ",", 2) + if len(arr) < 2 { + return nil, errors.Errorf("Invalid --mount command: %s", flag) + } + tokens := strings.Split(arr[1], ",") + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "from": + if len(kv) == 1 { + return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument") + } + if kv[1] == "" { + return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value") + } + from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + if fromErr != nil { + return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1]) + } + // If the source's name corresponds to the + // result of an earlier stage, wait for that + // stage to finish being built. + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return nil, err + } + if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint} + break + } else { + mountPoint, err := s.getImageRootfs(s.ctx, from) + if err != nil { + return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from) + } + stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} + break + } + default: + continue + } + } + } + } + return stageMountPoints, nil +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { logrus.Debugf("RUN %#v, %#v", run, config) + stageMountPoints, err := s.runStageMountPoints(run.Mounts) + if err != nil { + return err + } if s.builder == nil { return errors.Errorf("no build container available") } @@ -433,12 +497,14 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Hostname: config.Hostname, Runtime: s.executor.runtime, Args: s.executor.runtimeArgs, + NoHosts: s.executor.noHosts, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", Mounts: append([]Mount{}, s.executor.transientMounts...), Env: config.Env, User: config.User, WorkingDir: config.WorkingDir, Entrypoint: config.Entrypoint, + ContextDir: s.executor.contextDir, Cmd: config.Cmd, Stdin: stdin, Stdout: s.executor.out, @@ -449,6 +515,8 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Secrets: s.executor.secrets, SSHSources: s.executor.sshsources, RunMounts: run.Mounts, + StageMountPoints: stageMountPoints, + SystemContext: s.executor.systemContext, } if config.NetworkDisabled { options.ConfigureNetwork = buildah.NetworkDisabled @@ -533,20 +601,38 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } } + builderSystemContext := s.executor.systemContext + // get platform string from stage + if stage.Builder.Platform != "" { + os, arch, variant, err := parse.Platform(stage.Builder.Platform) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform) + } + if arch != "" || variant != "" { + builderSystemContext.ArchitectureChoice = arch + builderSystemContext.VariantChoice = variant + } + if os != "" { + builderSystemContext.OSChoice = os + } + } + builderOptions := buildah.BuilderOptions{ Args: ib.Args, FromImage: from, PullPolicy: pullPolicy, + ContainerSuffix: s.executor.containerSuffix, Registry: s.executor.registry, BlobDirectory: s.executor.blobDirectory, SignaturePolicyPath: s.executor.signaturePolicyPath, ReportWriter: s.executor.reportWriter, - SystemContext: s.executor.systemContext, + SystemContext: builderSystemContext, Isolation: s.executor.isolation, NamespaceOptions: s.executor.namespaceOptions, ConfigureNetwork: s.executor.configureNetwork, CNIPluginPath: s.executor.cniPluginPath, CNIConfigDir: s.executor.cniConfigDir, + NetworkInterface: s.executor.networkInterface, IDMappingOptions: s.executor.idmappingOptions, CommonBuildOpts: s.executor.commonBuildOptions, DefaultMountsFilePath: s.executor.defaultMountsFilePath, @@ -556,6 +642,9 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo MaxPullRetries: s.executor.maxPullPushRetries, PullRetryDelay: s.executor.retryPullPushDelay, OciDecryptConfig: s.executor.ociDecryptConfig, + Logger: s.executor.logger, + ProcessLabel: s.executor.processLabel, + MountLabel: s.executor.mountLabel, } builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) @@ -563,6 +652,16 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo return nil, errors.Wrapf(err, "error creating build container") } + // If executor's ProcessLabel and MountLabel is empty means this is the first stage + // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages + // Doing this will ensure and one stage in same build can mount another stage even if `selinux` + // is enabled. + + if s.executor.mountLabel == "" && s.executor.processLabel == "" { + s.executor.mountLabel = builder.MountLabel + s.executor.processLabel = builder.ProcessLabel + } + if initializeIBConfig { volumes := map[string]struct{}{} for _, v := range builder.Volumes() { @@ -582,6 +681,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo Volumes: volumes, WorkingDir: builder.WorkDir(), Entrypoint: builder.Entrypoint(), + Healthcheck: (*docker.HealthConfig)(builder.Healthcheck()), Labels: builder.Labels(), Shell: builder.Shell(), StopSignal: builder.StopSignal(), @@ -673,8 +773,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, checkForLayers := s.executor.layers && s.executor.useCache moreStages := s.index < len(s.stages)-1 lastStage := !moreStages - imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)]) - rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)]) + imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) + rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) // If the base image's name corresponds to the result of an earlier // stage, make sure that stage has finished building an image, and @@ -971,7 +1071,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } - if cacheID != "" && !(s.executor.squash && lastInstruction) { + // We want to save history for other layers during a squashed build. + // Toggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + squashToggle := false + // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // So only perform commit if its the lastInstruction of lastStage. + if cacheID != "" { logCacheHit(cacheID) // A suitable cached image was found, so we can just // reuse it. If we need to add a name to the resulting @@ -985,6 +1091,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } } else { + if s.executor.squash { + // We want to save history for other layers during a squashed build. + // squashToggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + s.executor.squash = false + squashToggle = true + } // We're not going to find any more cache hits, so we // can stop looking for them. checkForLayers = false @@ -996,6 +1109,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } } + + // Perform final squash for this build as we are one the, + // last instruction of last stage + if (s.executor.squash || squashToggle) && lastInstruction && lastStage { + s.executor.squash = true + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step) + } + } + logImageID(imgID) // Update our working container to be based off of the cached @@ -1110,10 +1234,15 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri } switch strings.ToUpper(node.Value) { case "ARG": - buildArgs := s.getBuildArgs() + for _, variable := range strings.Fields(node.Original) { + if variable != "ARG" { + s.argsFromContainerfile = append(s.argsFromContainerfile, variable) + } + } + buildArgs := s.getBuildArgsKey() return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": - buildArgs := s.getBuildArgs() + buildArgs := s.getBuildArgsResolvedForRun() if buildArgs != "" { return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] } @@ -1131,10 +1260,71 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri // getBuildArgs returns a string of the build-args specified during the build process // it excludes any build-args that were not used in the build process -func (s *StageExecutor) getBuildArgs() string { - buildArgs := s.stage.Builder.Arguments() - sort.Strings(buildArgs) - return strings.Join(buildArgs, " ") +// values for args are overridden by the values specified using ENV. +// Reason: Values from ENV will always override values specified arg. +func (s *StageExecutor) getBuildArgsResolvedForRun() string { + var envs []string + configuredEnvs := make(map[string]string) + dockerConfig := s.stage.Builder.Config() + + for _, env := range dockerConfig.Env { + splitv := strings.SplitN(env, "=", 2) + if len(splitv) == 2 { + configuredEnvs[splitv[0]] = splitv[1] + } + } + + for key, value := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + // if value was in image it will be given higher priority + // so please embed that into build history + _, inImage := configuredEnvs[key] + if inImage { + envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key])) + } else { + // By default everything must be added to history. + // Following variable is configured to false only for special cases. + addToHistory := true + + // Following value is being assigned from build-args, + // check if this key belongs to any of the predefined allowlist args e.g Proxy Variables + // and if that arg is not manually set in Containerfile/Dockerfile + // then don't write its value to history. + // Following behaviour ensures parity with docker/buildkit. + for _, variable := range config.ProxyEnv { + if key == variable { + // found in predefined args + // so don't add to history + // unless user did explicit `ARG ` + addToHistory = false + for _, processedArg := range s.argsFromContainerfile { + if key == processedArg { + addToHistory = true + } + } + } + } + if addToHistory { + envs = append(envs, fmt.Sprintf("%s=%s", key, value)) + } + } + } + } + sort.Strings(envs) + return strings.Join(envs, " ") +} + +// getBuildArgs key returns set args are key which were specified during the build process +// following function will be exclusively used by build history +func (s *StageExecutor) getBuildArgsKey() string { + var envs []string + for key := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + envs = append(envs, key) + } + } + sort.Strings(envs) + return strings.Join(envs, " ") } // tagExistingImage adds names to an image already in the store @@ -1259,8 +1449,18 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. +// or commit via any custom exporter if specified. func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { ib := s.stage.Builder + var buildOutputOption define.BuildOutputOption + if s.executor.buildOutput != "" { + var err error + logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) + buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to parse build output") + } + } var imageRef types.ImageReference if output != "" { imageRef2, err := s.executor.resolveNameToImageRef(output) @@ -1285,6 +1485,17 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer if s.executor.os != "" { s.builder.SetOS(s.executor.os) } + if s.executor.osVersion != "" { + s.builder.SetOSVersion(s.executor.osVersion) + } + for _, osFeatureSpec := range s.executor.osFeatures { + switch { + case strings.HasSuffix(osFeatureSpec, "-"): + s.builder.UnsetOSFeature(strings.TrimSuffix(osFeatureSpec, "-")) + default: + s.builder.SetOSFeature(osFeatureSpec) + } + } s.builder.SetUser(config.User) s.builder.ClearPorts() for p := range config.ExposedPorts { @@ -1294,6 +1505,28 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer spec := strings.SplitN(envSpec, "=", 2) s.builder.SetEnv(spec[0], spec[1]) } + for _, envSpec := range s.executor.envs { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + getenv := func(name string) string { + for _, envvar := range s.builder.Env() { + val := strings.SplitN(envvar, "=", 2) + if len(val) == 2 && val[0] == name { + return val[1] + } + } + logrus.Errorf("error expanding variable %q: no value set in image", name) + return name + } + env[1] = os.Expand(env[1], getenv) + s.builder.SetEnv(env[0], env[1]) + } else { + s.builder.SetEnv(env[0], os.Getenv(env[0])) + } + } + for _, envSpec := range s.executor.unsetEnvs { + s.builder.UnsetEnv(envSpec) + } s.builder.SetCmd(config.Cmd) s.builder.ClearVolumes() for v := range config.Volumes { @@ -1323,6 +1556,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer for k, v := range config.Labels { s.builder.SetLabel(k, v) } + if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue { + s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + } for _, labelSpec := range s.executor.labels { label := strings.SplitN(labelSpec, "=", 2) if len(label) > 1 { @@ -1331,7 +1567,6 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.SetLabel(label[0], "") } } - s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) for _, annotationSpec := range s.executor.annotations { annotation := strings.SplitN(annotationSpec, "=", 2) if len(annotation) > 1 { @@ -1365,6 +1600,39 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, } + // generate build output + if s.executor.buildOutput != "" { + extractRootfsOpts := buildah.ExtractRootfsOptions{} + if unshare.IsRootless() { + // In order to maintain as much parity as possible + // with buildkit's version of --output and to avoid + // unsafe invocation of exported executables it was + // decided to strip setuid,setgid and extended attributes. + // Since modes like setuid,setgid leaves room for executable + // to get invoked with different file-system permission its safer + // to strip them off for unpriviledged invocation. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + extractRootfsOpts.StripSetuidBit = true + extractRootfsOpts.StripSetgidBit = true + extractRootfsOpts.StripXattrs = true + } + rc, errChan, err := s.builder.ExtractRootfs(options, extractRootfsOpts) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to extract rootfs from given container image") + } + defer rc.Close() + err = internalUtil.ExportFromReader(rc, buildOutputOption) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to export build output") + } + if errChan != nil { + err = <-errChan + if err != nil { + return "", nil, err + } + } + + } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err @@ -1381,5 +1649,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } func (s *StageExecutor) EnsureContainerPath(path string) error { - return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{}) + return s.builder.EnsureContainerPathAs(path, "", nil) +} + +func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return s.builder.EnsureContainerPathAs(path, user, mode) } diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 598e407a86d..90c018fa497 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -10,3 +10,13 @@ import ( func InitReexec() bool { return buildah.InitReexec() } + +// argsMapToSlice returns the contents of a map[string]string as a slice of keys +// and values joined with "=". +func argsMapToSlice(m map[string]string) []string { + s := make([]string, 0, len(m)) + for k, v := range m { + s = append(s, k+"="+v) + } + return s +} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 0029a95e27b..a4fcee47686 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -83,6 +83,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system return nil, err } + netInt, err := getNetworkInterface(store, "", "") + if err != nil { + return nil, err + } + builder := &Builder{ store: store, Type: containerType, @@ -100,6 +105,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system UIDMap: uidmap, GIDMap: gidmap, }, + NetworkInterface: netInt, } if err := builder.initConfig(ctx, image, systemContext); err != nil { diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index f0bf92ddf33..12b69c9ba60 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,10 +11,12 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/util" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,8 +45,10 @@ func Info(store storage.Store) ([]InfoData, error) { func hostInfo() map[string]interface{} { info := map[string]interface{}{} - info["os"] = runtime.GOOS - info["arch"] = runtime.GOARCH + ps := platforms.Normalize(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH}) + info["os"] = ps.OS + info["arch"] = ps.Architecture + info["variant"] = ps.Variant info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 2a09821f3c1..02a81be6fb8 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -20,32 +20,6 @@ lags the upstream release. sudo yum -y install buildah ``` -The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable) -provides updated packages for CentOS 8 and CentOS 8 Stream. - -```bash -# CentOS 8 -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah - -# CentOS 8 Stream -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah -``` - - #### [Debian](https://debian.org) The buildah package is available in @@ -127,26 +101,6 @@ sudo apt-get -y update sudo apt-get -y install buildah ``` -If you would prefer newer (though not as well-tested) packages, -the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah) -provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS). -The packages in Kubic project repos are more frequently updated than the one in Ubuntu's official repositories, due to how Debian/Ubuntu works. -Checkout the Kubic project page for a list of supported Ubuntu version and architecture combinations. -The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian). - -CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo -OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts. - - -```bash -. /etc/os-release -sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${ID^}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${ID^}_${VERSION_ID}/Release.key -O Release.key -sudo apt-key add - < Release.key -sudo apt-get update -qq -sudo apt-get -qq -y install buildah -``` - # Building from scratch ## System Requirements @@ -254,9 +208,7 @@ Then to install Buildah on Fedora follow the steps in this example: ### RHEL, CentOS -In RHEL and CentOS 7, ensure that you are subscribed to the `rhel-7-server-rpms`, -`rhel-7-server-extras-rpms`, `rhel-7-server-optional-rpms` and `EPEL` repositories, then -run this command: +In RHEL and CentOS, run this command to install the build dependencies: ``` yum -y install \ @@ -278,11 +230,6 @@ run this command: The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above. -*NOTE:* Buildah on RHEL or CentOS version 7.* is not supported running as non-root due to -these systems not having newuidmap or newgidmap installed. It is possible to pull -the shadow-utils source RPM from Fedora 29 and build and install from that in order to -run Buildah as non-root on these systems. - ### openSUSE On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go new file mode 100644 index 00000000000..ec463821547 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -0,0 +1,608 @@ +package parse + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" + // mount=type=cache allows users to lock a cache store while its being used by another build + BuildahCacheLockfile = "buildah-cache-lockfile" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") + errDuplicateDest = errors.New("duplicate mount destination") +) + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: TypeBind, + } + + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, "", err + } + newMount.Destination = kv[1] + setDest = true + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", errors.Wrapf(errBadMntOption, kv[0]) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, []string, error) { + var err error + var mode uint64 + lockedTargets := make([]string, 0) + var ( + setDest bool + setShared bool + setReadOnly bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + // sharing mode + sharing := "shared" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "sharing": + sharing = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, lockedTargets, err + } + newMount.Destination = kv[1] + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode") + } + case "uid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid") + } + case "gid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid") + } + default: + return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, lockedTargets, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, lockedTargets, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory + cacheParent := filepath.Join(getTempDir(), BuildahCacheDir) + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory") + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory") + } + } + + switch sharing { + case "locked": + // lock parent cache + lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked") + } + // Will be unlocked after the RUN step is executed. + lockfile.Lock() + lockedTargets = append(lockedTargets, filepath.Join(newMount.Source, BuildahCacheLockfile)) + case "shared": + // do nothing since default is `shared` + break + default: + // error out for unknown values + return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing) + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, lockedTargets, err + } + newMount.Options = opts + + return newMount, lockedTargets, nil +} + +// ValidateVolumeMountHostDir validates the host path of buildah --volume +func ValidateVolumeMountHostDir(hostDir string) error { + if !filepath.IsAbs(hostDir) { + return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + } + if _, err := os.Stat(hostDir); err != nil { + return errors.WithStack(err) + } + return nil +} + +// RevertEscapedColon converts "\:" to ":" +func RevertEscapedColon(source string) string { + return strings.ReplaceAll(source, "\\:", ":") +} + +// SplitStringWithColonEscape splits string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + result := make([]string, 0, 3) + sb := &strings.Builder{} + for idx, r := range str { + if r == ':' { + // the colon is backslash-escaped + if idx-1 > 0 && str[idx-1] == '\\' { + sb.WriteRune(r) + } else { + // os.Stat will fail if path contains escaped colon + result = append(result, RevertEscapedColon(sb.String())) + sb.Reset() + } + } else { + sb.WriteRune(r) + } + } + if sb.Len() > 0 { + result = append(result, RevertEscapedColon(sb.String())) + } + return result +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// Volume parses the input of --volume +func Volume(volume string) (specs.Mount, error) { + mount := specs.Mount{} + arr := SplitStringWithColonEscape(volume) + if len(arr) < 2 { + return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + } + if err := ValidateVolumeMountHostDir(arr[0]); err != nil { + return mount, err + } + if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { + return mount, err + } + mountOptions := "" + if len(arr) > 2 { + mountOptions = arr[2] + if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { + return mount, err + } + } + mountOpts := strings.Split(mountOptions, ",") + mount.Source = arr[0] + mount.Destination = arr[1] + mount.Type = "rbind" + mount.Options = mountOpts + return mount, nil +} + +// GetVolumes gets the volumes from --volume and --mount +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []string, error) { + unifiedMounts, mountedImages, lockedTargets, err := getMounts(ctx, store, mounts, contextDir) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + return finalMounts, mountedImages, lockedTargets, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []string, error) { + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + lockedTargets := make([]string, 0) + + errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + if len(arr) < 2 { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + kv := strings.Split(arr[0], "=") + // TODO: type is not explicitly required in Docker. + // If not specified, it defaults to "volume". + if len(kv) != 2 || kv[0] != "type" { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + + tokens := strings.Split(arr[1], ",") + switch kv[1] { + case TypeBind: + mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, lockedPaths, err := GetCacheMount(tokens, store, "", nil) + lockedTargets = lockedPaths + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1]) + } + } + + return finalMounts, mountedImages, lockedTargets, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.Errorf("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} + +/* This is internal function and could be changed at any time */ +/* for external usage please refer to buildah/pkg/parse.GetTempDir() */ +func getTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go new file mode 100644 index 00000000000..8ddff99fb75 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -0,0 +1,11 @@ +package internal + +// Types is internal packages are suspected to change with releases avoid using these outside of buildah + +// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor +// StageExecutor has ability to mount stages/images in current context and +// automatically clean them up. +type StageMountDetails struct { + IsStage bool // tells if mountpoint returned from stage executor is stage or image + MountPoint string // mountpoint of stage/image +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go new file mode 100644 index 00000000000..691d89d65f7 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -0,0 +1,81 @@ +package util + +import ( + "io" + "os" + "path/filepath" + + "github.com/containers/buildah/define" + "github.com/containers/common/libimage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" +) + +// LookupImage returns *Image to corresponding imagename or id +func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) { + systemContext := ctx + if systemContext == nil { + systemContext = &types.SystemContext{} + } + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, err + } + localImage, _, err := runtime.LookupImage(image, nil) + if err != nil { + return nil, err + } + return localImage, nil +} + +// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout. +func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { + var err error + if !filepath.IsAbs(opts.Path) { + opts.Path, err = filepath.Abs(opts.Path) + if err != nil { + return err + } + } + if opts.IsDir { + // In order to keep this feature as close as possible to + // buildkit it was decided to preserve ownership when + // invoked as root since caller already has access to artifacts + // therefore we can preserve ownership as is, however for rootless users + // ownership has to be changed so exported artifacts can still + // be accessible by unpriviledged users. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + noLChown := false + if unshare.IsRootless() { + noLChown = true + } + + err = os.MkdirAll(opts.Path, 0700) + if err != nil { + return errors.Wrapf(err, "failed while creating the destination path %q", opts.Path) + } + + err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown}) + if err != nil { + return errors.Wrapf(err, "failed while performing untar at %q", opts.Path) + } + } else { + outFile := os.Stdout + if !opts.IsStdout { + outFile, err = os.Create(opts.Path) + if err != nil { + return errors.Wrapf(err, "failed while creating destination tar at %q", opts.Path) + } + defer outFile.Close() + } + _, err = io.Copy(outFile, input) + if err != nil { + return errors.Wrapf(err, "failed while performing copy to %q", opts.Path) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 85a0f0b31b0..03b2de662d2 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/image" @@ -16,6 +15,7 @@ import ( "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/openshift/imagebuilder" @@ -49,6 +49,15 @@ func getImageName(name string, img *storage.Image) string { func imageNamePrefix(imageName string) string { prefix := imageName + if d, err := digest.Parse(imageName); err == nil { + prefix = d.Encoded() + if len(prefix) > 12 { + prefix = prefix[:12] + } + } + if stringid.ValidateID(prefix) == nil { + prefix = stringid.TruncateID(prefix) + } s := strings.Split(prefix, ":") if len(s) > 0 { prefix = s[0] @@ -113,6 +122,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions options.FromImage = "" } + if options.NetworkInterface == nil { + // create the network interface + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath) + if err != nil { + return nil, err + } + } + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) if options.FromImage != "" && options.FromImage != "scratch" { @@ -134,14 +154,11 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions pullOptions.OciDecryptConfig = options.OciDecryptConfig pullOptions.SignaturePolicyPath = options.SignaturePolicyPath pullOptions.Writer = options.ReportWriter + pullOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) maxRetries := uint(options.MaxPullRetries) pullOptions.MaxRetries = &maxRetries - if options.BlobDirectory != "" { - pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } - pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions) if err != nil { return nil, err @@ -197,6 +214,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions } name := "working-container" + if options.ContainerSuffix != "" { + name = options.ContainerSuffix + } if options.Container != "" { name = options.Container } else { @@ -216,9 +236,20 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions conflict := 100 for { + + var flags map[string]interface{} + // check if we have predefined ProcessLabel and MountLabel + // this could be true if this is another stage in a build + if options.ProcessLabel != "" && options.MountLabel != "" { + flags = map[string]interface{}{ + "ProcessLabel": options.ProcessLabel, + "MountLabel": options.MountLabel, + } + } coptions := storage.ContainerOptions{ LabelOpts: options.CommonBuildOpts.LabelOpts, IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions), + Flags: flags, Volatile: true, } container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions) @@ -283,13 +314,15 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions UIDMap: uidmap, GIDMap: gidmap, }, - Capabilities: copyStringSlice(options.Capabilities), - CommonBuildOpts: options.CommonBuildOpts, - TopLayer: topLayer, - Args: options.Args, - Format: options.Format, - TempVolumes: map[string]bool{}, - Devices: options.Devices, + Capabilities: copyStringSlice(options.Capabilities), + CommonBuildOpts: options.CommonBuildOpts, + TopLayer: topLayer, + Args: copyStringStringMap(options.Args), + Format: options.Format, + TempVolumes: map[string]bool{}, + Devices: options.Devices, + Logger: options.Logger, + NetworkInterface: options.NetworkInterface, } if options.Mount { diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 8dadec13084..fa60619ac62 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -1,39 +1,8 @@ package blobcache import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/containers/buildah/docker" - "github.com/containers/common/libimage" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/compression" - "github.com/containers/image/v5/transports" + imageBlobCache "github.com/containers/image/v5/pkg/blobcache" "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - _ types.ImageReference = &blobCacheReference{} - _ types.ImageSource = &blobCacheSource{} - _ types.ImageDestination = &blobCacheDestination{} -) - -const ( - compressedNote = ".compressed" - decompressedNote = ".decompressed" ) // BlobCache is an object which saves copies of blobs that are written to it while passing them @@ -52,517 +21,11 @@ type BlobCache interface { ClearCache() error } -type blobCacheReference struct { - reference types.ImageReference - // WARNING: The contents of this directory may be accessed concurrently, - // both within this process and by multiple different processes - directory string - compress types.LayerCompression -} - -type blobCacheSource struct { - reference *blobCacheReference - source types.ImageSource - sys types.SystemContext - // this mutex synchronizes the counters below - mu sync.Mutex - cacheHits int64 - cacheMisses int64 - cacheErrors int64 -} - -type blobCacheDestination struct { - reference *blobCacheReference - destination types.ImageDestination -} - -func makeFilename(blobSum digest.Digest, isConfig bool) string { - if isConfig { - return blobSum.String() + ".config" - } - return blobSum.String() -} - -// CacheLookupReferenceFunc wraps a BlobCache into a -// libimage.LookupReferenceFunc to allow for using a BlobCache during -// image-copy operations. -func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { - // NOTE: this prevents us from moving BlobCache around and generalizes - // the libimage API. - return func(ref types.ImageReference) (types.ImageReference, error) { - ref, err := NewBlobCache(ref, directory, compress) - if err != nil { - return nil, errors.Wrapf(err, "error using blobcache %q", directory) - } - return ref, nil - } -} - // NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are // written to the destination image created from the resulting reference will also be stored -// as-is to the specified directory or a temporary directory. The cache directory's contents -// can be cleared by calling the returned BlobCache()'s ClearCache() method. +// as-is to the specified directory or a temporary directory. // The compress argument controls whether or not the cache will try to substitute a compressed // or different version of a blob when preparing the list of layers when reading an image. func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) { - if directory == "" { - return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) - } - switch compress { - case types.Compress, types.Decompress, types.PreserveOriginal: - // valid value, accept it - default: - return nil, errors.Errorf("unhandled LayerCompression value %v", compress) - } - return &blobCacheReference{ - reference: ref, - directory: directory, - compress: compress, - }, nil -} - -func (r *blobCacheReference) Transport() types.ImageTransport { - return r.reference.Transport() -} - -func (r *blobCacheReference) StringWithinTransport() string { - return r.reference.StringWithinTransport() -} - -func (r *blobCacheReference) DockerReference() reference.Named { - return r.reference.DockerReference() -} - -func (r *blobCacheReference) PolicyConfigurationIdentity() string { - return r.reference.PolicyConfigurationIdentity() -} - -func (r *blobCacheReference) PolicyConfigurationNamespaces() []string { - return r.reference.PolicyConfigurationNamespaces() -} - -func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return r.reference.DeleteImage(ctx, sys) -} - -func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, nil - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig)) - fileInfo, err := os.Stat(filename) - if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { - return true, fileInfo.Size(), nil - } - if !os.IsNotExist(err) { - return false, -1, errors.Wrap(err, "checking size") - } - } - - return false, -1, nil -} - -func (r *blobCacheReference) Directory() string { - return r.directory -} - -func (r *blobCacheReference) ClearCache() error { - f, err := os.Open(r.directory) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - names, err := f.Readdirnames(-1) - if err != nil { - return errors.Wrapf(err, "error reading directory %q", r.directory) - } - for _, name := range names { - pathname := filepath.Join(r.directory, name) - if err = os.RemoveAll(pathname); err != nil { - return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(r)) - } - } - return nil -} - -func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference)) - } - return image.FromSource(ctx, sys, src) -} - -func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - src, err := r.reference.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress) - return &blobCacheSource{reference: r, source: src, sys: *sys}, nil -} - -func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - dest, err := r.reference.NewImageDestination(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory) - return &blobCacheDestination{reference: r, destination: dest}, nil -} - -func (s *blobCacheSource) Reference() types.ImageReference { - return s.reference -} - -func (s *blobCacheSource) Close() error { - logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) - return s.source.Close() -} - -func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) - manifestBytes, err := ioutil.ReadFile(filename) - if err == nil { - s.cacheHits++ - return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil - } - if !os.IsNotExist(err) { - s.cacheErrors++ - return nil, "", errors.Wrap(err, "checking for manifest file") - } - } - s.cacheMisses++ - return s.source.GetManifest(ctx, instanceDigest) -} - -func (s *blobCacheSource) HasThreadSafeGetBlob() bool { - return s.source.HasThreadSafeGetBlob() -} - -func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - present, size, err := s.reference.HasBlob(blobinfo) - if err != nil { - return nil, -1, err - } - if present { - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - s.mu.Lock() - s.cacheHits++ - s.mu.Unlock() - return f, size, nil - } - if !os.IsNotExist(err) { - s.mu.Lock() - s.cacheErrors++ - s.mu.Unlock() - return nil, -1, errors.Wrap(err, "checking for cache") - } - } - } - s.mu.Lock() - s.cacheMisses++ - s.mu.Unlock() - rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) - if err != nil { - return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) - } - return rc, size, nil -} - -func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.source.GetSignatures(ctx, instanceDigest) -} - -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) - } - canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - - infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - if infos == nil { - img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) - if err != nil { - return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - infos = img.LayerInfos() - } - - if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { - replacedInfos := make([]types.BlobInfo, 0, len(infos)) - for _, info := range infos { - var replaceDigest []byte - var err error - blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) - alternate := "" - switch s.reference.compress { - case types.Compress: - alternate = blobFile + compressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - case types.Decompress: - alternate = blobFile + decompressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - } - if err == nil && digest.Digest(replaceDigest).Validate() == nil { - alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) - fileInfo, err := os.Stat(alternate) - if err == nil { - switch info.MediaType { - case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: - switch s.reference.compress { - case types.Compress: - info.MediaType = v1.MediaTypeImageLayerGzip - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - info.MediaType = v1.MediaTypeImageLayer - info.CompressionAlgorithm = nil - } - case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType: - switch s.reference.compress { - case types.Compress: - info.MediaType = manifest.DockerV2Schema2LayerMediaType - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - // nope, not going to suggest anything, it's not allowed by the spec - replacedInfos = append(replacedInfos, info) - continue - } - } - logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) - info.CompressionOperation = s.reference.compress - info.Digest = digest.Digest(replaceDigest) - info.Size = fileInfo.Size() - logrus.Debugf("info = %#v", info) - } - } - replacedInfos = append(replacedInfos, info) - } - infos = replacedInfos - } - - return infos, nil -} - -func (d *blobCacheDestination) Reference() types.ImageReference { - return d.reference -} - -func (d *blobCacheDestination) Close() error { - logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) - return d.destination.Close() -} - -func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { - return d.destination.SupportedManifestMIMETypes() -} - -func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { - return d.destination.SupportsSignatures(ctx) -} - -func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { - return d.destination.DesiredLayerCompression() -} - -func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { - return d.destination.AcceptsForeignLayerURLs() -} - -func (d *blobCacheDestination) MustMatchRuntimeOS() bool { - return d.destination.MustMatchRuntimeOS() -} - -func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { - return d.destination.IgnoresEmbeddedDockerReference() -} - -// Decompress and save the contents of the decompressReader stream into the passed-in temporary -// file. If we successfully save all of the data, rename the file to match the digest of the data, -// and make notes about the relationship between the file that holds a copy of the compressed data -// and this new file. -func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { - defer wg.Done() - // Decompress from and digest the reading end of that pipe. - decompressed, err3 := archive.DecompressStream(decompressReader) - digester := digest.Canonical.Digester() - if err3 == nil { - // Read the decompressed data through the filter over the pipe, blocking until the - // writing end is closed. - _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) - } else { - // Drain the pipe to keep from stalling the PutBlob() thread. - if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { - logrus.Debugf("error draining the pipe: %v", err) - } - } - decompressReader.Close() - decompressed.Close() - tempFile.Close() - // Determine the name that we should give to the uncompressed copy of the blob. - decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) - if err3 == nil { - // Rename the temporary file. - if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { - logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } else { - *alternateDigest = digester.Digest() - // Note the relationship between the two files. - if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { - logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) - } - if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { - logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) - } - } - } else { - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } -} - -func (d *blobCacheDestination) HasThreadSafePutBlob() bool { - return d.destination.HasThreadSafePutBlob() -} - -func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - var tempfile *os.File - var err error - var n int - var alternateDigest digest.Digest - var closer io.Closer - wg := new(sync.WaitGroup) - needToWait := false - compression := archive.Uncompressed - if inputInfo.Digest != "" { - filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err == nil { - stream = io.TeeReader(stream, tempfile) - defer func() { - if err == nil { - if err = os.Rename(tempfile.Name(), filename); err != nil { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) - } - } else { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - } - tempfile.Close() - }() - } else { - logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) - } - if !isConfig { - initial := make([]byte, 8) - n, err = stream.Read(initial) - if n > 0 { - // Build a Reader that will still return the bytes that we just - // read, for PutBlob()'s sake. - stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) - if n >= len(initial) { - compression = archive.DetectCompression(initial[:n]) - } - if compression == archive.Gzip { - // The stream is compressed, so create a file which we'll - // use to store a decompressed copy. - decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err2 != nil { - logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) - decompressedTemp.Close() - } else { - // Write a copy of the compressed data to a pipe, - // closing the writing end of the pipe after - // PutBlob() returns. - decompressReader, decompressWriter := io.Pipe() - closer = decompressWriter - stream = io.TeeReader(stream, decompressWriter) - // Let saveStream() close the reading end and handle the temporary file. - wg.Add(1) - needToWait = true - go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) - } - } - } - } - } - newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) - if closer != nil { - closer.Close() - } - if needToWait { - wg.Wait() - } - if err != nil { - return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) - } - if alternateDigest.Validate() == nil { - logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) - } else { - logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) - } - return newBlobInfo, nil -} - -func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) - if err != nil || present { - return present, reusedInfo, err - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - defer f.Close() - uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) - if err != nil { - return false, types.BlobInfo{}, err - } - return true, uploadedInfo, nil - } - } - - return false, types.BlobInfo{}, nil -} - -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) - } else { - filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) - if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { - logrus.Warnf("error saving manifest as %q: %v", filename, err) - } - } - return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) -} - -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - return d.destination.PutSignatures(ctx, signatures, instanceDigest) -} - -func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.destination.Commit(ctx, unparsedToplevel) + return imageBlobCache.NewBlobCache(ref, directory, compress) } diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 8ee4ab6d1f6..c325bc5cfb3 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -18,6 +18,40 @@ import ( "golang.org/x/sys/unix" ) +// Options type holds various configuration options for overlay +// MountWithOptions accepts following type so it is easier to specify +// more verbose configuration for overlay mount. +type Options struct { + // The Upper directory is normally writable layer in an overlay mount. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to UpperDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + UpperDirOptionFragment string + // The Workdir is used to prepare files as they are switched between the layers. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to WorkDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + WorkDirOptionFragment string + // Graph options relayed from podman, will be responsible for choosing mount program + GraphOpts []string + // Mark if following overlay is read only + ReadOnly bool + // RootUID is not used yet but keeping it here for legacy reasons. + RootUID int + // RootGID is not used yet but keeping it here for legacy reasons. + RootGID int +} + // TempDir generates an overlay Temp directory in the container content func TempDir(containerDir string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay") @@ -65,7 +99,8 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string // from the source system. It then mounts up the source directory on to the // generated mount point and returns the mount point to the caller. func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, false) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } // MountReadOnly creates a subdir of the contentDir based on the source directory @@ -73,26 +108,71 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [ // generated mount point and returns the mount point to the caller. Note that no // upper layer will be created rendering it a read-only mount func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, true) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } -// NOTE: rootUID and rootUID are not yet used. -func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []string, readOnly bool) (mount specs.Mount, Err error) { +// findMountProgram finds if any mount program is specified in the graph options. +func findMountProgram(graphOptions []string) string { + mountMap := map[string]bool{ + ".mount_program": true, + "overlay.mount_program": true, + "overlay2.mount_program": true, + } + + for _, i := range graphOptions { + s := strings.SplitN(i, "=", 2) + if len(s) != 2 { + continue + } + key := s[0] + val := s[1] + if mountMap[key] { + return val + } + } + + return "" +} + +// mountWithMountProgram mount an overlay at mergeDir using the specified mount program +// and overlay options. +func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error { + cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) + + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "exec %s", mountProgram) + } + return nil +} + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { mergeDir := filepath.Join(contentDir, "merge") // Create overlay mount options for rw/ro. var overlayOptions string - if readOnly { + if opts.ReadOnly { // Read-only overlay mounts require two lower layer. lowerTwo := filepath.Join(contentDir, "lower") if err := os.Mkdir(lowerTwo, 0755); err != nil { return mount, err } - overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", source, lowerTwo) + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) } else { // Read-write overlay mounts want a lower, upper and a work layer. workDir := filepath.Join(contentDir, "work") upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + st, err := os.Stat(source) if err != nil { return mount, err @@ -105,45 +185,24 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, err } } - - overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir) + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) } - if unshare.IsRootless() { - mountProgram := "" - - mountMap := map[string]bool{ - ".mount_program": true, - "overlay.mount_program": true, - "overlay2.mount_program": true, - } - - for _, i := range graphOptions { - s := strings.SplitN(i, "=", 2) - if len(s) != 2 { - continue - } - key := s[0] - val := s[1] - if mountMap[key] { - mountProgram = val - break - } + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err } - if mountProgram != "" { - cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) - if err := cmd.Run(); err != nil { - return mount, errors.Wrapf(err, "exec %s", mountProgram) - } + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "bind" - mount.Options = []string{"bind", "slave"} - return mount, nil - } - /* If a mount_program is not specified, fallback to try mount native overlay. */ + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) } @@ -155,6 +214,11 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, nil } +// Convert ":" to "\:", the path which will be overlay mounted need to be escaped +func escapeColon(source string) string { + return strings.ReplaceAll(source, ":", "\\:") +} + // RemoveTemp removes temporary mountpoint and all content from its parent // directory func RemoveTemp(contentDir string) error { diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 685d63d31f9..07986384518 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -9,12 +9,13 @@ import ( "net" "os" "path/filepath" - "runtime" "strconv" "strings" "unicode" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" + internalParse "github.com/containers/buildah/internal/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/types" @@ -22,9 +23,11 @@ import ( "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/spf13/pflag" "golang.org/x/term" ) @@ -37,17 +40,20 @@ const ( TypeBind = "bind" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs = "tmpfs" -) - -var ( - errBadMntOption = errors.Errorf("invalid mount option") - errDuplicateDest = errors.Errorf("duplicate mount destination") - optionArgError = errors.Errorf("must provide an argument for option") - noDestError = errors.Errorf("must set volume destination") + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" ) // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { + return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// CommonBuildOptionsFromFlagSet parses the build options from the bud cli +func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) { var ( memoryLimit int64 memorySwap int64 @@ -55,7 +61,7 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { err error ) - memVal, _ := c.Flags().GetString("memory") + memVal, _ := flags.GetString("memory") if memVal != "" { memoryLimit, err = units.RAMInBytes(memVal) if err != nil { @@ -63,16 +69,25 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } } - memSwapValue, _ := c.Flags().GetString("memory-swap") + memSwapValue, _ := flags.GetString("memory-swap") if memSwapValue != "" { - memorySwap, err = units.RAMInBytes(memSwapValue) - if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory-swap") + if memSwapValue == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(memSwapValue) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } } } - addHost, _ := c.Flags().GetStringSlice("add-host") + noHosts, _ := flags.GetBool("no-hosts") + + addHost, _ := flags.GetStringSlice("add-host") if len(addHost) > 0 { + if noHosts { + return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together") + } for _, host := range addHost { if err := validateExtraHost(host); err != nil { return nil, errors.Wrapf(err, "invalid value for add-host") @@ -82,8 +97,8 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { noDNS = false dnsServers := []string{} - if c.Flag("dns").Changed { - dnsServers, _ = c.Flags().GetStringSlice("dns") + if flags.Changed("dns") { + dnsServers, _ = flags.GetStringSlice("dns") for _, server := range dnsServers { if strings.ToLower(server) == "none" { noDNS = true @@ -95,62 +110,65 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } dnsSearch := []string{} - if c.Flag("dns-search").Changed { - dnsSearch, _ = c.Flags().GetStringSlice("dns-search") + if flags.Changed("dns-search") { + dnsSearch, _ = flags.GetStringSlice("dns-search") if noDNS && len(dnsSearch) > 0 { return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") } } dnsOptions := []string{} - if c.Flag("dns-option").Changed { - dnsOptions, _ = c.Flags().GetStringSlice("dns-option") + if flags.Changed("dns-option") { + dnsOptions, _ = flags.GetStringSlice("dns-option") if noDNS && len(dnsOptions) > 0 { return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") } } - if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil { + if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil { return nil, errors.Wrapf(err, "invalid --shm-size") } - volumes, _ := c.Flags().GetStringArray("volume") + volumes, _ := flags.GetStringArray("volume") if err := Volumes(volumes); err != nil { return nil, err } - cpuPeriod, _ := c.Flags().GetUint64("cpu-period") - cpuQuota, _ := c.Flags().GetInt64("cpu-quota") - cpuShares, _ := c.Flags().GetUint64("cpu-shares") - httpProxy, _ := c.Flags().GetBool("http-proxy") + cpuPeriod, _ := flags.GetUint64("cpu-period") + cpuQuota, _ := flags.GetInt64("cpu-quota") + cpuShares, _ := flags.GetUint64("cpu-shares") + httpProxy, _ := flags.GetBool("http-proxy") + identityLabel, _ := flags.GetBool("identity-label") ulimit := []string{} - if c.Flag("ulimit").Changed { - ulimit, _ = c.Flags().GetStringSlice("ulimit") + if flags.Changed("ulimit") { + ulimit, _ = flags.GetStringSlice("ulimit") } - secrets, _ := c.Flags().GetStringArray("secret") - sshsources, _ := c.Flags().GetStringArray("ssh") + secrets, _ := flags.GetStringArray("secret") + sshsources, _ := flags.GetStringArray("ssh") commonOpts := &define.CommonBuildOptions{ - AddHost: addHost, - CPUPeriod: cpuPeriod, - CPUQuota: cpuQuota, - CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(), - CPUSetMems: c.Flag("cpuset-mems").Value.String(), - CPUShares: cpuShares, - CgroupParent: c.Flag("cgroup-parent").Value.String(), - DNSOptions: dnsOptions, - DNSSearch: dnsSearch, - DNSServers: dnsServers, - HTTPProxy: httpProxy, - Memory: memoryLimit, - MemorySwap: memorySwap, - ShmSize: c.Flag("shm-size").Value.String(), - Ulimit: ulimit, - Volumes: volumes, - Secrets: secrets, - SSHSources: sshsources, - } - securityOpts, _ := c.Flags().GetStringArray("security-opt") + AddHost: addHost, + CPUPeriod: cpuPeriod, + CPUQuota: cpuQuota, + CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), + CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), + CPUShares: cpuShares, + CgroupParent: findFlagFunc("cgroup-parent").Value.String(), + DNSOptions: dnsOptions, + DNSSearch: dnsSearch, + DNSServers: dnsServers, + HTTPProxy: httpProxy, + IdentityLabel: types.NewOptionalBool(identityLabel), + Memory: memoryLimit, + MemorySwap: memorySwap, + NoHosts: noHosts, + ShmSize: findFlagFunc("shm-size").Value.String(), + Ulimit: ulimit, + Volumes: volumes, + Secrets: secrets, + SSHSources: sshsources, + } + securityOpts, _ := flags.GetStringArray("security-opt") if err := parseSecurityOpts(securityOpts, commonOpts); err != nil { return nil, err } @@ -164,7 +182,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti } con := strings.SplitN(opt, "=", 2) if len(con) != 2 { - return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) + return errors.Errorf("invalid --security-opt name=value pair: %q", opt) } switch con[0] { @@ -175,7 +193,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti case "seccomp": commonOpts.SeccompProfilePath = con[1] default: - return errors.Errorf("Invalid --security-opt 2: %q", opt) + return errors.Errorf("invalid --security-opt 2: %q", opt) } } @@ -199,32 +217,14 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti return nil } +// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + return internalParse.SplitStringWithColonEscape(str) +} + // Volume parses the input of --volume func Volume(volume string) (specs.Mount, error) { - mount := specs.Mount{} - arr := strings.SplitN(volume, ":", 3) - if len(arr) < 2 { - return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) - } - if err := validateVolumeMountHostDir(arr[0]); err != nil { - return mount, err - } - if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { - return mount, err - } - mountOptions := "" - if len(arr) > 2 { - mountOptions = arr[2] - if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { - return mount, err - } - } - mountOpts := strings.Split(mountOptions, ",") - mount.Source = arr[0] - mount.Destination = arr[1] - mount.Type = "rbind" - mount.Options = mountOpts - return mount, nil + return internalParse.Volume(volume) } // Volumes validates the host and container paths passed in to the --volume flag @@ -240,236 +240,11 @@ func Volumes(volumes []string) error { return nil } -func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { - finalVolumeMounts := make(map[string]specs.Mount) - - for _, volume := range volumes { - volumeMount, err := Volume(volume) - if err != nil { - return nil, err - } - if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) - } - finalVolumeMounts[volumeMount.Destination] = volumeMount - } - return finalVolumeMounts, nil -} - -// GetVolumes gets the volumes from --volume and --mount -func GetVolumes(volumes []string, mounts []string) ([]specs.Mount, error) { - unifiedMounts, err := getMounts(mounts) - if err != nil { - return nil, err - } - volumeMounts, err := getVolumeMounts(volumes) - if err != nil { - return nil, err - } - for dest, mount := range volumeMounts { - if _, ok := unifiedMounts[dest]; ok { - return nil, errors.Wrapf(errDuplicateDest, dest) - } - unifiedMounts[dest] = mount - } - - finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) - for _, mount := range unifiedMounts { - finalMounts = append(finalMounts, mount) - } - return finalMounts, nil -} - -// getMounts takes user-provided input from the --mount flag and creates OCI -// spec mounts. -// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... -// buildah run --mount type=tmpfs,target=/dev/shm ... -func getMounts(mounts []string) (map[string]specs.Mount, error) { - finalMounts := make(map[string]specs.Mount) - - errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") - - // TODO(vrothberg): the manual parsing can be replaced with a regular expression - // to allow a more robust parsing of the mount format and to give - // precise errors regarding supported format versus supported options. - for _, mount := range mounts { - arr := strings.SplitN(mount, ",", 2) - if len(arr) < 2 { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - kv := strings.Split(arr[0], "=") - // TODO: type is not explicitly required in Docker. - // If not specified, it defaults to "volume". - if len(kv) != 2 || kv[0] != "type" { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - - tokens := strings.Split(arr[1], ",") - switch kv[1] { - case TypeBind: - mount, err := GetBindMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - default: - return nil, errors.Errorf("invalid filesystem type %q", kv[1]) - } - } - - return finalMounts, nil -} - -// GetBindMount parses a single bind mount entry from the --mount flag. -func GetBindMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeBind, - } - - setSource := false - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "bind-nonrecursive": - newMount.Options = append(newMount.Options, "bind") - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) - case "bind-propagation": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, kv[1]) - case "src", "source": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeHostDir(kv[1]); err != nil { - return newMount, err - } - newMount.Source = kv[1] - setSource = true - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - case "consistency": - // Option for OS X only, has no meaning on other platforms - // and can thus be safely ignored. - // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - if !setSource { - newMount.Source = newMount.Destination - } - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, err - } - newMount.Options = opts - - return newMount, nil -} - -// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeTmpfs, - Source: TypeTmpfs, - } - - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "tmpfs-mode": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) - case "tmpfs-size": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) - case "src", "source": - return newMount, errors.Errorf("source is not supported with tmpfs mounts") - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - return newMount, nil -} - // ValidateVolumeHostDir validates a volume mount's source directory func ValidateVolumeHostDir(hostDir string) error { return parse.ValidateVolumeHostDir(hostDir) } -// validates the host path of buildah --volume -func validateVolumeMountHostDir(hostDir string) error { - if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) - } - if _, err := os.Stat(hostDir); err != nil { - return errors.WithStack(err) - } - return nil -} - // ValidateVolumeCtrDir validates a volume mount's destination directory. func ValidateVolumeCtrDir(ctrDir string) error { return parse.ValidateVolumeCtrDir(ctrDir) @@ -508,20 +283,26 @@ func validateIPAddress(val string) (string, error) { // SystemContextFromOptions returns a SystemContext populated with values // per the input parameters provided by the caller for the use in authentication. func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { - certDir, err := c.Flags().GetString("cert-dir") + return SystemContextFromFlagSet(c.Flags(), c.Flag) +} + +// SystemContextFromFlagSet returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) { + certDir, err := flags.GetString("cert-dir") if err != nil { certDir = "" } ctx := &types.SystemContext{ DockerCertPath: certDir, } - tlsVerify, err := c.Flags().GetBool("tls-verify") - if err == nil && c.Flag("tls-verify").Changed { + tlsVerify, err := flags.GetBool("tls-verify") + if err == nil && findFlagFunc("tls-verify").Changed { ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify) ctx.OCIInsecureSkipTLSVerify = !tlsVerify ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify } - disableCompression, err := c.Flags().GetBool("disable-compression") + disableCompression, err := flags.GetBool("disable-compression") if err == nil { if disableCompression { ctx.OCIAcceptUncompressedLayers = true @@ -529,59 +310,59 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.DirForceCompress = true } } - creds, err := c.Flags().GetString("creds") - if err == nil && c.Flag("creds").Changed { + creds, err := flags.GetString("creds") + if err == nil && findFlagFunc("creds").Changed { var err error ctx.DockerAuthConfig, err = AuthConfig(creds) if err != nil { return nil, err } } - sigPolicy, err := c.Flags().GetString("signature-policy") - if err == nil && c.Flag("signature-policy").Changed { + sigPolicy, err := flags.GetString("signature-policy") + if err == nil && findFlagFunc("signature-policy").Changed { ctx.SignaturePolicyPath = sigPolicy } - authfile, err := c.Flags().GetString("authfile") + authfile, err := flags.GetString("authfile") if err == nil { ctx.AuthFilePath = getAuthFile(authfile) } - regConf, err := c.Flags().GetString("registries-conf") - if err == nil && c.Flag("registries-conf").Changed { + regConf, err := flags.GetString("registries-conf") + if err == nil && findFlagFunc("registries-conf").Changed { ctx.SystemRegistriesConfPath = regConf } - regConfDir, err := c.Flags().GetString("registries-conf-dir") - if err == nil && c.Flag("registries-conf-dir").Changed { + regConfDir, err := flags.GetString("registries-conf-dir") + if err == nil && findFlagFunc("registries-conf-dir").Changed { ctx.RegistriesDirPath = regConfDir } - shortNameAliasConf, err := c.Flags().GetString("short-name-alias-conf") - if err == nil && c.Flag("short-name-alias-conf").Changed { + shortNameAliasConf, err := flags.GetString("short-name-alias-conf") + if err == nil && findFlagFunc("short-name-alias-conf").Changed { ctx.UserShortNameAliasConfPath = shortNameAliasConf } ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version) - if c.Flag("os") != nil && c.Flag("os").Changed { + if findFlagFunc("os") != nil && findFlagFunc("os").Changed { var os string - if os, err = c.Flags().GetString("os"); err != nil { + if os, err = flags.GetString("os"); err != nil { return nil, err } ctx.OSChoice = os } - if c.Flag("arch") != nil && c.Flag("arch").Changed { + if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed { var arch string - if arch, err = c.Flags().GetString("arch"); err != nil { + if arch, err = flags.GetString("arch"); err != nil { return nil, err } ctx.ArchitectureChoice = arch } - if c.Flag("variant") != nil && c.Flag("variant").Changed { + if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed { var variant string - if variant, err = c.Flags().GetString("variant"); err != nil { + if variant, err = flags.GetString("variant"); err != nil { return nil, err } ctx.VariantChoice = variant } - if c.Flag("platform") != nil && c.Flag("platform").Changed { + if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed { var specs []string - if specs, err = c.Flags().GetStringSlice("platform"); err != nil { + if specs, err = flags.GetStringSlice("platform"); err != nil { return nil, err } if len(specs) == 0 || specs[0] == "" { @@ -669,7 +450,7 @@ const platformSep = "/" // DefaultPlatform returns the standard platform for the current system func DefaultPlatform() string { - return runtime.GOOS + platformSep + runtime.GOARCH + return platforms.DefaultString() } // Platform separates the platform string into os, arch and variant, @@ -729,10 +510,82 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { }, nil } +// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag. +// Takes `buildOutput` as string and returns BuildOutputOption +func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { + if len(buildOutput) == 1 && buildOutput == "-" { + // Feature parity with buildkit, output tar to stdout + // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + if !strings.Contains(buildOutput, ",") { + // expect default --output + return define.BuildOutputOption{Path: buildOutput, + IsDir: true, + IsStdout: false}, nil + } + isDir := true + isStdout := false + typeSelected := false + pathSelected := false + path := "" + tokens := strings.Split(buildOutput, ",") + for _, option := range tokens { + arr := strings.SplitN(option, "=", 2) + if len(arr) != 2 { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput) + } + switch arr[0] { + case "type": + if typeSelected { + return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0]) + } + typeSelected = true + if arr[1] == "local" { + isDir = true + } else if arr[1] == "tar" { + isDir = false + } else { + return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput) + } + case "dest": + if pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0]) + } + pathSelected = true + path = arr[1] + default: + return define.BuildOutputOption{}, fmt.Errorf("Unrecognized key %q in build output option: %q", arr[0], buildOutput) + } + } + + if !typeSelected || !pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput) + } + + if path == "-" { + if isDir { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput) + } + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + + return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { - user := c.Flag("userns-uid-map-user").Value.String() - group := c.Flag("userns-gid-map-group").Value.String() + return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) +} + +// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping. +func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + user := findFlagFunc("userns-uid-map-user").Value.String() + group := findFlagFunc("userns-gid-map-group").Value.String() // If only the user or group was specified, use the same value for the // other, since we need both in order to initialize the maps using the // names. @@ -751,7 +604,7 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } mappings = submappings } - globalOptions := c.PersistentFlags() + globalOptions := persistentFlags // We'll parse the UID and GID mapping options the same way. buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) @@ -769,8 +622,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed { spec, _ = globalOptions.GetStringSlice(option) } - if c.Flag(option).Changed { - spec, _ = c.Flags().GetStringSlice(option) + if findFlagFunc(option).Changed { + spec, _ = flags.GetStringSlice(option) } idmap, err := parseIDMap(spec) if err != nil { @@ -811,8 +664,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } // If the user specifically requested that we either use or don't use // user namespaces, override that default. - if c.Flag("userns").Changed { - how := c.Flag("userns").Value.String() + if findFlagFunc("userns").Changed { + how := findFlagFunc("userns").Value.String() switch how { case "", "container", "private": usernsOption.Host = false @@ -829,13 +682,6 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } usernsOptions = define.NamespaceOptions{usernsOption} - usernetwork := c.Flags().Lookup("network") - if usernetwork != nil && !usernetwork.Changed { - usernsOptions = append(usernsOptions, define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: usernsOption.Host, - }) - } // If the user requested that we use the host namespace, but also that // we use mappings, that's not going to work. if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { @@ -877,23 +723,30 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) { // NamespaceOptions parses the build options for all namespaces except for user namespace. func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { + return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace. +func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { options := make(define.NamespaceOptions, 0, 7) policy := define.NetworkDefault - for _, what := range []string{string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { - if c.Flags().Lookup(what) != nil && c.Flag(what).Changed { - how := c.Flag(what).Value.String() + for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { + if flags.Lookup(what) != nil && findFlagFunc(what).Changed { + how := findFlagFunc(what).Value.String() switch what { - case "network": - what = string(specs.NetworkNamespace) + case "cgroupns": + what = string(specs.CgroupNamespace) } switch how { case "", "container", "private": logrus.Debugf("setting %q namespace to %q", what, "") + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, }) case "host": logrus.Debugf("setting %q namespace to host", what) + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, Host: true, @@ -910,8 +763,11 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOption } } how = strings.TrimPrefix(how, "ns:") - if _, err := os.Stat(how); err != nil { - return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go + if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { + if _, err := os.Stat(how); err != nil { + return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + } } policy = define.NetworkEnabled logrus.Debugf("setting %q namespace to %q", what, how) @@ -1034,35 +890,60 @@ func GetTempDir() string { } // Secrets parses the --secret flag -func Secrets(secrets []string) (map[string]string, error) { - parsed := make(map[string]string) - invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar") +func Secrets(secrets []string) (map[string]define.Secret, error) { + invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") + parsed := make(map[string]define.Secret) for _, secret := range secrets { - split := strings.Split(secret, ",") - if len(split) > 2 { + tokens := strings.Split(secret, ",") + var id, src, typ string + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "src": + src = kv[1] + case "env": + src = kv[1] + typ = "env" + case "type": + if kv[1] != "file" && kv[1] != "env" { + return nil, errors.New("invalid secret type, must be file or env") + } + typ = kv[1] + } + } + if id == "" { return nil, invalidSyntax } - if len(split) == 2 { - id := strings.Split(split[0], "=") - src := strings.Split(split[1], "=") - if len(split) == 2 && strings.ToLower(id[0]) == "id" && strings.ToLower(src[0]) == "src" { - fullPath, err := filepath.Abs(src[1]) - if err != nil { - return nil, err - } - _, err = os.Stat(fullPath) - if err == nil { - parsed[id[1]] = fullPath - } - if err != nil { - return nil, errors.Wrap(err, "could not parse secrets") - } + if src == "" { + src = id + } + if typ == "" { + if _, ok := os.LookupEnv(id); ok { + typ = "env" } else { - return nil, invalidSyntax + typ = "file" } - } else { - return nil, invalidSyntax } + + if typ == "file" { + fullPath, err := filepath.Abs(src) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + _, err = os.Stat(fullPath) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + src = fullPath + } + newSecret := define.Secret{ + Source: src, + SourceType: typ, + } + parsed[id] = newSecret + } return parsed, nil } @@ -1085,3 +966,20 @@ func SSH(sshSources []string) (map[string]*sshagent.Source, error) { } return parsed, nil } + +func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { + if path != "" { + excludes, err := imagebuilder.ParseIgnore(path) + return excludes, path, err + } + path = filepath.Join(contextDir, ".containerignore") + excludes, err := imagebuilder.ParseIgnore(path) + if os.IsNotExist(err) { + path = filepath.Join(contextDir, ".dockerignore") + excludes, err = imagebuilder.ParseIgnore(path) + } + if os.IsNotExist(err) { + return excludes, "", nil + } + return excludes, path, err +} diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index d02ea2a4d8d..b985cd2b01a 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -111,8 +111,9 @@ func (a *AgentServer) Serve(processLabel string) (string, error) { a.wg.Done() }() // the only way to get agent.ServeAgent is to close the connection it's serving on + // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection. go func() { - time.Sleep(500 * time.Millisecond) + time.Sleep(2000 * time.Millisecond) c.Close() }() } diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 7149ac98631..d203e60651d 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -6,7 +6,6 @@ import ( "time" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" @@ -63,21 +62,27 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s libimageOptions.OciDecryptConfig = options.OciDecryptConfig libimageOptions.AllTags = options.AllTags libimageOptions.RetryDelay = &options.RetryDelay + libimageOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) if options.MaxRetries > 0 { retries := uint(options.MaxRetries) libimageOptions.MaxRetries = &retries } - if options.BlobDirectory != "" { - libimageOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) if err != nil { return "", err } + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + _, err = getNetworkInterface(options.Store, "", "") + if err != nil { + return "", err + } + runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { return "", err diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 78adb69396c..65bda9ca011 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -10,6 +10,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" @@ -20,11 +21,30 @@ import ( "github.com/sirupsen/logrus" ) +// cacheLookupReferenceFunc wraps a BlobCache into a +// libimage.LookupReferenceFunc to allow for using a BlobCache during +// image-copy operations. +func cacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { + // Using a closure here allows us to reference a BlobCache without + // having to explicitly maintain it in the libimage API. + return func(ref types.ImageReference) (types.ImageReference, error) { + if directory == "" { + return ref, nil + } + ref, err := blobcache.NewBlobCache(ref, directory, compress) + if err != nil { + return nil, errors.Wrapf(err, "error using blobcache %q", directory) + } + return ref, nil + } +} + // PushOptions can be used to alter how an image is copied somewhere. type PushOptions struct { // Compression specifies the type of compression which is applied to // layer blobs. The default is to not use compression, but // archive.Gzip is recommended. + // OBSOLETE: Use CompressionFormat instead. Compression archive.Compression // SignaturePolicyPath specifies an override location for the signature // policy which should be used for verifying the new image as it is @@ -71,6 +91,11 @@ type PushOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int } // Push copies the contents of the image to a new location. @@ -84,19 +109,19 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options libimageOptions.RetryDelay = &options.RetryDelay libimageOptions.OciEncryptConfig = options.OciEncryptConfig libimageOptions.OciEncryptLayers = options.OciEncryptLayers + libimageOptions.CompressionFormat = options.CompressionFormat + libimageOptions.CompressionLevel = options.CompressionLevel libimageOptions.PolicyAllowStorage = true if options.Quiet { libimageOptions.Writer = nil } - if options.BlobDirectory != "" { - compress := types.PreserveOriginal - if options.Compression == archive.Gzip { - compress = types.Compress - } - libimageOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, compress) + compress := types.PreserveOriginal + if options.Compression == archive.Gzip { + compress = types.Compress } + libimageOptions.SourceLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, compress) runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 935630cae34..e56aac8c9dc 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -5,7 +5,9 @@ import ( "io" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/image/v5/types" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -83,6 +85,8 @@ type RunOptions struct { Runtime string // Args adds global arguments for the runtime. Args []string + // NoHosts use the images /etc/hosts file + NoHosts bool // NoPivot adds the --no-pivot runtime flag. NoPivot bool // Mounts are additional mount points which we want to provide. @@ -93,6 +97,8 @@ type RunOptions struct { User string // WorkingDir is an override for the working directory. WorkingDir string + // ContextDir is used as the root directory for the source location for mounts that are of type "bind". + ContextDir string // Shell is default shell to run in a container. Shell string // Cmd is an override for the configured default command. @@ -139,20 +145,36 @@ type RunOptions struct { // Devices are the additional devices to add to the containers Devices define.ContainerDevices // Secrets are the available secrets to use in a RUN - Secrets map[string]string + Secrets map[string]define.Secret // SSHSources is the available ssh agents to use in a RUN SSHSources map[string]*sshagent.Source `json:"-"` // RunMounts are mounts for this run. RunMounts for this run // will not show up in subsequent runs. RunMounts []string + // Map of stages and container mountpoint if any from stage executor + StageMountPoints map[string]internal.StageMountDetails + // External Image mounts to be cleaned up. + // Buildah run --mount could mount image before RUN calls, RUN could cleanup + // them up as well + ExternalImageMounts []string + // System context of current build + SystemContext *types.SystemContext + // CgroupManager to use for running OCI containers + CgroupManager string } // RunMountArtifacts are the artifacts created when using a run mount. type runMountArtifacts struct { // RunMountTargets are the run mount targets inside the container RunMountTargets []string + // TmpFiles are artifacts that need to be removed outside the container + TmpFiles []string + // Any external images which were mounted inside container + MountedImages []string // Agents are the ssh agents started Agents []*sshagent.AgentServer // SSHAuthSock is the path to the ssh auth sock inside the container SSHAuthSock string + // LockedTargets to be unlocked if there are any. + LockedTargets []string } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 113c83ef9a2..f52754c544e 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package buildah import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -12,32 +12,43 @@ import ( "net" "os" "os/exec" + "os/signal" "path/filepath" "runtime" "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" - "github.com/containernetworking/cni/libcni" "github.com/containers/buildah/bind" "github.com/containers/buildah/chroot" "github.com/containers/buildah/copier" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/overlay" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/network" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/defaultnet" "github.com/containers/common/pkg/subscriptions" + imagetypes "github.com/containers/image/v5/types" + "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/unshare" + storagetypes "github.com/containers/storage/types" "github.com/docker/go-units" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" @@ -152,11 +163,16 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupTerminal(g, options.Terminal, options.TerminalSize) - configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) + configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) if err != nil { return err } + // rootless and networks are not supported + if len(configureNetworks) > 0 && isolation == IsolationOCIRootless { + return errors.New("cannot use networks as rootless") + } + homeDir, err := b.configureUIDGID(g, mountPoint, options) if err != nil { return err @@ -176,16 +192,19 @@ func (b *Builder) Run(command []string, options RunOptions) error { return err } - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err + uid, gid := spec.Process.User.UID, spec.Process.User.GID + if spec.Linux != nil { + uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid) + if err != nil { + return err + } } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} mode := os.FileMode(0755) coptions := copier.MkdirOptions{ - ChownNew: rootIDPair, + ChownNew: idPair, ChmodNew: &mode, } if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { @@ -196,14 +215,31 @@ func (b *Builder) Run(command []string, options RunOptions) error { namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) volumes := b.Volumes() - if !contains(volumes, "/etc/hosts") { - hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + hostFile := "" + if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) if err != nil { return err } - // Only bind /etc/hosts if there's a network - if options.ConfigureNetwork != define.NetworkDisabled { - bindFiles["/etc/hosts"] = hostFile + bindFiles[config.DefaultHostsFile] = hostFile + } + + // generate /etc/hostname if the user intentionally did not override + if !(contains(volumes, "/etc/hostname")) { + if _, ok := bindFiles["/etc/hostname"]; !ok { + hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) + if err != nil { + return err + } + // Bind /etc/hostname + bindFiles["/etc/hostname"] = hostFile } } @@ -229,7 +265,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootless = 1 } // Populate the .containerenv with container information - containerenv := fmt.Sprintf(`\ + containerenv := fmt.Sprintf(` engine="buildah-%s" name=%q id=%q @@ -247,7 +283,7 @@ rootless=%d bindFiles["/run/.containerenv"] = containerenvPath } - runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts) + runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints) if err != nil { return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) } @@ -256,36 +292,29 @@ rootless=%d spec.Process.Env = append(spec.Process.Env, sshenv) } + // following run was called from `buildah run` + // and some images were mounted for this run + // add them to cleanup artifacts + if len(options.ExternalImageMounts) > 0 { + runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...) + } + defer func() { - if err := cleanupRunMounts(mountPoint, runArtifacts); err != nil { - options.Logger.Errorf("unabe to cleanup run mounts %v", err) + if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil { + options.Logger.Errorf("unable to cleanup run mounts %v", err) } }() defer b.cleanupTempVolumes() - if options.CNIConfigDir == "" { - options.CNIConfigDir = b.CNIConfigDir - if b.CNIConfigDir == "" { - options.CNIConfigDir = define.DefaultCNIConfigDir - } - } - if options.CNIPluginPath == "" { - options.CNIPluginPath = b.CNIPluginPath - if b.CNIPluginPath == "" { - options.CNIPluginPath = define.DefaultCNIPluginPath - } - } - switch isolation { case define.IsolationOCI: var moreCreateArgs []string if options.NoPivot { - moreCreateArgs = []string{"--no-pivot"} - } else { - moreCreateArgs = nil + moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) case IsolationChroot: err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) case IsolationOCIRootless: @@ -293,10 +322,8 @@ rootless=%d if options.NoPivot { moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - if err := setupRootlessSpecChanges(spec, path, b.CommonBuildOpts.ShmSize); err != nil { - return err - } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) default: err = errors.Errorf("don't know how to run this command") } @@ -413,7 +440,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return mounts, nil } -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]string, sshSources map[string]*sshagent.Source, runFileMounts []string) (*runMountArtifacts, error) { +func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) { // Start building a new list of mounts. var mounts []specs.Mount haveMount := func(destination string) bool { @@ -426,79 +453,9 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return false } - ipc := namespaceOptions.Find(string(specs.IPCNamespace)) - hostIPC := ipc == nil || ipc.Host - net := namespaceOptions.Find(string(specs.NetworkNamespace)) - hostNetwork := net == nil || net.Host - user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := (user == nil || user.Host) && !unshare.IsRootless() - - // Copy mounts from the generated list. - mountCgroups := true - specMounts := []specs.Mount{} - for _, specMount := range spec.Mounts { - // Override some of the mounts from the generated list if we're doing different things with namespaces. - if specMount.Destination == "/dev/shm" { - specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777"} - if shmSize != "" { - specMount.Options = append(specMount.Options, "size="+shmSize) - } - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/shm is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/shm", - Type: "bind", - Destination: "/dev/shm", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/dev/mqueue" { - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/mqueue is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/mqueue", - Type: "bind", - Destination: "/dev/mqueue", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/sys" { - if hostNetwork && !hostUser { - mountCgroups = false - if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/sys is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/sys", - Type: "bind", - Destination: "/sys", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, - } - } - } - specMounts = append(specMounts, specMount) - } - - // Add a mount for the cgroups filesystem, unless we're already - // recursively bind mounting all of /sys, in which case we shouldn't - // bother with it. - sysfsMount := []specs.Mount{} - if mountCgroups { - sysfsMount = []specs.Mount{{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, - }} + specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize) + if err != nil { + return nil, err } // Get the list of files we need to bind into the container. @@ -517,12 +474,18 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } + // Get host UID and GID of the container process. + processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) + if err != nil { + return nil, err + } + // Get the list of subscriptions mounts. subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) // Get the list of mounts that are just for this Run() call. // TODO: acui: de-spaghettify run mounts - runMounts, mountArtifacts, err := runSetupRunMounts(runFileMounts, secrets, sshSources, b.MountLabel, cdir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, b.ProcessLabel) + runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID)) if err != nil { return nil, err } @@ -532,11 +495,6 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st if err != nil { return nil, err } - // Get host UID and GID of the container process. - processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) - if err != nil { - return nil, err - } // Get the list of explicitly-specified volume mounts. volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID)) @@ -544,7 +502,12 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } - allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...)) + // prepare list of mount destinations which can be cleaned up safely. + // we can clean bindFiles, subscriptionMounts and specMounts + // everything other than these might have users content + mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + + allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...)) // Add them all, in the preferred order, except where they conflict with something that was previously added. for _, mount := range allMounts { if haveMount(mount.Destination) { @@ -560,6 +523,23 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return mountArtifacts, nil } +// Destinations which can be cleaned up after every RUN +func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { + mountDest := []string{} + for _, mount := range mounts { + // Add all destination to mountArtifacts so that they can be cleaned up later + if mount.Destination != "" { + // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files + // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories. + // everything other than these will be cleanedup + if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") { + mountDest = append(mountDest, mount.Destination) + } + } + } + return mountDest +} + // addResolvConf copies files from host and sets them up to bind mount into container func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) { resolvConf := "/etc/resolv.conf" @@ -664,44 +644,58 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe } // generateHosts creates a containers hosts file -func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { - hostPath := "/etc/hosts" - stat, err := os.Stat(hostPath) +func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) { + conf, err := config.Default() if err != nil { return "", err } - hosts := bytes.NewBufferString("# Generated by Buildah\n") - orig, err := ioutil.ReadFile(hostPath) + path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot) if err != nil { return "", err } - hosts.Write(orig) - for _, host := range addHosts { - // verify the host format - values := strings.SplitN(host, ":", 2) - if len(values) != 2 { - return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) - } - if values[0] == "" { - return "", errors.Errorf("hostname in host entry %q is empty", host) - } - if values[1] == "" { - return "", errors.Errorf("IP address in host entry %q is empty", host) - } - hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) + + targetfile := filepath.Join(rdir, "hosts") + if err := etchosts.New(&etchosts.Params{ + BaseFile: path, + ExtraHosts: b.CommonBuildOpts.AddHost, + HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil), + TargetFile: targetfile, + }); err != nil { + return "", err } - if hostname != "" { - hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s\n", hostname))) - hosts.Write([]byte(fmt.Sprintf("::1 %s\n", hostname))) + uid := 0 + gid := 0 + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID } - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { - return "", errors.Wrapf(err, "error writing /etc/hosts into the container") + if err = os.Chown(targetfile, uid, gid); err != nil { + return "", err } - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if err := label.Relabel(targetfile, b.MountLabel, false); err != nil { + return "", err + } + + return targetfile, nil +} + +// generateHostname creates a containers /etc/hostname file +func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) { + var err error + hostnamePath := "/etc/hostname" + + var hostnameBuffer bytes.Buffer + hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname))) + + cfile := filepath.Join(rdir, filepath.Base(hostnamePath)) + if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil { + return "", errors.Wrapf(err, "error writing /etc/hostname into the container") + } + + uid := 0 + gid := 0 if chownOpts != nil { uid = chownOpts.UID gid = chownOpts.GID @@ -736,7 +730,8 @@ func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, termina } } -func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { +func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, + containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { if options.Logger == nil { options.Logger = logrus.StandardLogger() } @@ -772,11 +767,10 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe runtime := options.Runtime if runtime == "" { runtime = util.Runtime() - - localRuntime := util.FindLocalRuntime(runtime) - if localRuntime != "" { - runtime = localRuntime - } + } + localRuntime := util.FindLocalRuntime(runtime) + if localRuntime != "" { + runtime = localRuntime } // Default to just passing down our stdio. @@ -795,7 +789,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if err = unix.Pipe(finishCopy); err != nil { return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") } - finishedCopy := make(chan struct{}) + finishedCopy := make(chan struct{}, 1) var pargs []string if spec.Process != nil { pargs = spec.Process.Args @@ -820,6 +814,9 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { return 1, err } + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} // Set stdio to our pipes. @@ -839,26 +836,36 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe } } + runtimeArgs := options.Args[:] + if options.CgroupManager == config.SystemdCgroupsManager { + runtimeArgs = append(runtimeArgs, "--systemd-cgroup") + } + // Build the commands that we'll execute. pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) create := exec.Command(runtime, args...) + setPdeathsig(create) create.Dir = bundlePath stdin, stdout, stderr := getCreateStdio() create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr - if create.SysProcAttr == nil { - create.SysProcAttr = &syscall.SysProcAttr{} - } args = append(options.Args, "start", containerName) start := exec.Command(runtime, args...) + setPdeathsig(start) start.Dir = bundlePath start.Stderr = os.Stderr - args = append(options.Args, "kill", containerName) - kill := exec.Command(runtime, args...) - kill.Dir = bundlePath - kill.Stderr = os.Stderr + kill := func(signal string) *exec.Cmd { + args := append(options.Args, "kill", containerName) + if signal != "" { + args = append(args, signal) + } + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + return kill + } args = append(options.Args, "delete", containerName) del := exec.Command(runtime, args...) @@ -891,7 +898,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if err != nil { return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) } - stopped := false + var stopped uint32 var reaping sync.WaitGroup reaping.Add(1) go func() { @@ -902,17 +909,20 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe wstatus = 0 options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) } - stopped = true + atomic.StoreUint32(&stopped, 1) }() if configureNetwork { - teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, pargs) - if teardown != nil { - defer teardown() - } - if err != nil { + if _, err := containerCreateW.Write([]byte{1}); err != nil { return 1, err } + containerCreateW.Close() + logrus.Debug("waiting for parent start message") + b := make([]byte, 1) + if _, err := containerStartR.Read(b); err != nil { + return 1, errors.Wrap(err, "did not get container start message from parent") + } + containerStartR.Close() } if copyPipes { @@ -935,14 +945,24 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe return 1, errors.Wrapf(err, "error from %s starting container", runtime) } defer func() { - if !stopped { - if err2 := kill.Run(); err2 != nil { - options.Logger.Infof("error from %s stopping container: %v", runtime, err2) + if atomic.LoadUint32(&stopped) == 0 { + if err := kill("").Run(); err != nil { + options.Logger.Infof("error from %s stopping container: %v", runtime, err) } + atomic.StoreUint32(&stopped, 1) } }() // Wait for the container to exit. + interrupted := make(chan os.Signal, 100) + go func() { + for range interrupted { + if err := kill("SIGKILL").Run(); err != nil { + logrus.Errorf("%v sending SIGKILL", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) for { now := time.Now() var state specs.State @@ -952,7 +972,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe stat.Stderr = os.Stderr stateOutput, err := stat.Output() if err != nil { - if stopped { + if atomic.LoadUint32(&stopped) != 0 { // container exited break } @@ -964,23 +984,25 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe switch state.Status { case "running": case "stopped": - stopped = true + atomic.StoreUint32(&stopped, 1) default: return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } select { case <-finishedCopy: - stopped = true + atomic.StoreUint32(&stopped, 1) case <-time.After(time.Until(now.Add(100 * time.Millisecond))): continue } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } } + signal.Stop(interrupted) + close(interrupted) // Close the writing end of the stop-handling-stdio notification pipe. unix.Close(finishCopy[1]) @@ -1066,7 +1088,8 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { unix.CloseOnExec(fd) } - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") + setPdeathsig(cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} @@ -1107,76 +1130,16 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { }, nil } -func runConfigureNetwork(isolation define.Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { - var netconf, undo []*libcni.NetworkConfigList - +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { if isolation == IsolationOCIRootless { - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { - return setupRootlessNetwork(pid) - } + teardown, err = setupRootlessNetwork(pid) + return teardown, nil, err } - confdir := options.CNIConfigDir - // Create a default configuration if one is not present. - // Need to pull containers.conf settings for this one. - containersConf, err := config.Default() - if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") - } - if err := defaultnet.Create(containersConf.Network.DefaultNetwork, containersConf.Network.DefaultSubnet, confdir, confdir, containersConf.Engine.MachineEnabled); err != nil { - logrus.Errorf("Failed to created default CNI network: %v", err) + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} } - // Scan for CNI configuration files. - files, err := libcni.ConfFiles(confdir, []string{".conf"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) - } - lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) - } - logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) - // Read the CNI configuration files. - for _, file := range files { - nc, err := libcni.ConfFromFile(file) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) - } - if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { - if nc.Network.Name == "" { - logrus.Debugf("configuration in %q has no name, skipping it", file) - } else { - logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) - } - continue - } - if nc.Network == nil { - continue - } - cl, err := libcni.ConfListFromConf(nc) - if err != nil { - return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) - } - logrus.Debugf("using network configuration from %q", file) - netconf = append(netconf, cl) - } - for _, list := range lists { - cl, err := libcni.ConfListFromFile(list) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) - } - if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { - if cl.Name == "" { - logrus.Debugf("configuration list in %q has no name, skipping it", list) - } else { - logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) - } - continue - } - logrus.Debugf("using network configuration list from %q", list) - netconf = append(netconf, cl) - } // Make sure we can access the container's network namespace, // even after it exits, to successfully tear down the // interfaces. Ensure this by opening a handle to the network @@ -1185,40 +1148,35 @@ func runConfigureNetwork(isolation define.Isolation, options RunOptions, configu netns := fmt.Sprintf("/proc/%d/ns/net", pid) netFD, err := unix.Open(netns, unix.O_RDONLY, 0) if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace for %v", command) + return nil, nil, errors.Wrapf(err, "error opening network namespace") } mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Build our search path for the plugins. - pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) - cni := libcni.CNIConfig{Path: pluginPaths} - // Configure the interfaces. - rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) - teardown = func() { - for _, nc := range undo { - if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { - options.Logger.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) - } + + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), } - unix.Close(netFD) } - for i, nc := range netconf { - // Build the runtime config for use with this network configuration. - rtconf[nc] = &libcni.RuntimeConf{ - ContainerID: containerName, - NetNS: mynetns, - IfName: fmt.Sprintf("if%d", i), - Args: [][2]string{}, - CapabilityArgs: map[string]interface{}{}, - } - // Bring it up. - _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) + + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, + } + netStatus, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, nil, err + } + + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) if err != nil { - return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) + options.Logger.Errorf("failed to cleanup network: %v", err) } - // Add it to the list of networks to take down when the container process exits. - undo = append([]*libcni.NetworkConfigList{nc}, undo...) } - return teardown, nil + + return teardown, netStatus, nil } func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer @@ -1248,6 +1206,7 @@ func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, } stdio.Done() finishedCopy <- struct{}{} + close(finishedCopy) }() // Map describing where data on an incoming descriptor should go. relayMap := make(map[int]int) @@ -1575,8 +1534,24 @@ func runUsingRuntimeMain() { os.Exit(1) } + // open the pipes used to communicate with the parent process + var containerCreateW *os.File + var containerStartR *os.File + if options.ConfigureNetwork { + containerCreateW = os.NewFile(4, "containercreatepipe") + if containerCreateW == nil { + fmt.Fprintf(os.Stderr, "could not open fd 4\n") + os.Exit(1) + } + containerStartR = os.NewFile(5, "containerstartpipe") + if containerStartR == nil { + fmt.Fprintf(os.Stderr, "could not open fd 5\n") + os.Exit(1) + } + } + // Run the container, start to finish. - status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName) + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) if err != nil { fmt.Fprintf(os.Stderr, "error running container: %v\n", err) os.Exit(1) @@ -1690,7 +1665,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti return configureNetwork, configureNetworks, configureUTS, nil } -func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { return false, nil, err @@ -1701,8 +1676,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) namespaceOptions.AddOrReplace(options.NamespaceOptions...) networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder if networkPolicy == NetworkDefault { networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy + } } configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) @@ -1793,7 +1777,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { var foundrw, foundro, foundz, foundZ, foundO, foundU bool - var rootProp string + var rootProp, upperDir, workDir string for _, opt := range options { switch opt { case "rw": @@ -1811,6 +1795,19 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, case "private", "rprivate", "slave", "rslave", "shared", "rshared": rootProp = opt } + + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } + } + if strings.HasPrefix(opt, "workdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] + } + } } if !foundrw && !foundro { options = append(options, "rw") @@ -1831,6 +1828,10 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } } if foundO { + if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { + return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") + } + containerDir, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { return specs.Mount{}, err @@ -1841,7 +1842,14 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir) } - overlayMount, err := overlay.Mount(contentDir, host, container, rootUID, rootGID, b.store.GraphOptions()) + overlayOpts := overlay.Options{RootUID: rootUID, + RootGID: rootGID, + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) if err == nil { b.TempVolumes[contentDir] = true } @@ -1882,7 +1890,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, // Bind mount volumes given by the user when the container was created for _, i := range volumeMounts { var options []string - spliti := strings.Split(i, ":") + spliti := parse.SplitStringWithColonEscape(i) if len(spliti) > 2 { options = strings.Split(spliti[2], ",") } @@ -1935,9 +1943,6 @@ func setupCapAdd(g *generate.Generator, caps ...string) error { if err := g.AddProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error adding %q to the effective capability set", cap) } - if err := g.AddProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) - } if err := g.AddProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) } @@ -1956,9 +1961,6 @@ func setupCapDrop(g *generate.Generator, caps ...string) error { if err := g.DropProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error removing %q from the effective capability set", cap) } - if err := g.DropProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) - } if err := g.DropProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) } @@ -2079,18 +2081,8 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions g.ClearProcessEnv() if b.CommonBuildOpts.HTTPProxy { - for _, envSpec := range []string{ - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "ftp_proxy", - "FTP_PROXY", - "no_proxy", - "NO_PROXY", - } { - envVal := os.Getenv(envSpec) - if envVal != "" { + for _, envSpec := range config.ProxyEnv { + if envVal, ok := os.LookupEnv(envSpec); ok { g.AddProcessEnv(envSpec, envVal) } } @@ -2104,88 +2096,162 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions } } -func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error { - spec.Process.User.AdditionalGids = nil - spec.Linux.Resources = nil +func addOrReplaceMount(moutns []specs.Mount, mount specs.Mount) []spec.Mount { + for i := range moutns { + if moutns[i].Destination == mount.Destination { + moutns[i] = mount + return moutns + } + } + return append(moutns, mount) +} - emptyDir := filepath.Join(bundleDir, "empty") - if err := os.Mkdir(emptyDir, 0); err != nil { - return err +// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces +// logic taken from podman and adapted for buildah +// https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178 +func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) { + mounts := spec.Mounts + isRootless := unshare.IsRootless() + isNewUserns := false + isNetns := false + isPidns := false + isIpcns := false + + for _, namespace := range spec.Linux.Namespaces { + switch namespace.Type { + case specs.NetworkNamespace: + isNetns = true + case specs.UserNamespace: + isNewUserns = true + case specs.PIDNamespace: + isPidns = true + case specs.IPCNamespace: + isIpcns = true + } + } + + addCgroup := true + // mount sys when root and no userns or when both netns and userns are private + canMountSys := (!isRootless && !isNewUserns) || (isNetns && isNewUserns) + if !canMountSys { + addCgroup = false + sys := "/sys" + sysMnt := specs.Mount{ + Destination: sys, + Type: "bind", + Source: sys, + Options: []string{bind.NoBindOption, "rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"}, + } + mounts = addOrReplaceMount(mounts, sysMnt) } - // Replace /sys with a read-only bind mount. - mounts := []specs.Mount{ - { - Source: "/dev", - Destination: "/dev", - Type: "tmpfs", - Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Type: "mqueue", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "pts", + gid5Available := true + if isRootless { + _, gids, err := unshare.GetHostIDMappings("") + if err != nil { + return nil, err + } + gid5Available = checkIdsGreaterThan5(gids) + } + if gid5Available && len(spec.Linux.GIDMappings) > 0 { + gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings) + } + if !gid5Available { + // If we have no GID mappings, the gid=5 default option would fail, so drop it. + devPts := specs.Mount{ Destination: "/dev/pts", Type: "devpts", - Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, - }, - { + Source: "devpts", + Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}, + } + mounts = addOrReplaceMount(mounts, devPts) + } + + isUserns := isNewUserns || isRootless + + if isUserns && !isIpcns { + devMqueue := "/dev/mqueue" + devMqueueMnt := specs.Mount{ + Destination: devMqueue, + Type: "bind", + Source: devMqueue, + Options: []string{bind.NoBindOption, "bind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, devMqueueMnt) + } + if isUserns && !isPidns { + proc := "/proc" + procMount := specs.Mount{ + Destination: proc, + Type: "bind", + Source: proc, + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, procMount) + } + + if addCgroup { + cgroupMnt := specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "rw"}, + } + mounts = addOrReplaceMount(mounts, cgroupMnt) + } + + // if userns and host ipc bind mount shm + if isUserns && !isIpcns { + // bind mount /dev/shm when it exists + if _, err := os.Stat("/dev/shm"); err == nil { + shmMount := specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, shmMount) + } + } else if shmSize != "" { + shmMount := specs.Mount{ Source: "shm", Destination: "/dev/shm", Type: "tmpfs", - Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", fmt.Sprintf("size=%s", shmSize)}, - }, - { - Source: "/proc", - Destination: "/proc", - Type: "proc", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "/sys", - Destination: "/sys", - Type: "bind", - Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, - }, - } - // Cover up /sys/fs/cgroup, if it exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - } - // Keep anything that isn't under /dev, /proc, or /sys. - for i := range spec.Mounts { - if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || - spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || - spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { - continue + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=" + shmSize}, } - mounts = append(mounts, spec.Mounts[i]) + mounts = addOrReplaceMount(mounts, shmMount) } - spec.Mounts = mounts - return nil + + return mounts, nil +} + +func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool { + for _, r := range ids { + if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size { + return true + } + } + return false } -func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { +func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, + moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) { var confwg sync.WaitGroup config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - ConfigureNetworks: configureNetworks, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - Isolation: isolation, + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, }) if conferr != nil { return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) } cmd := reexec.Command(runUsingRuntimeCommand) + setPdeathsig(cmd) cmd.Dir = bundlePath cmd.Stdin = options.Stdin if cmd.Stdin == nil { @@ -2212,14 +2278,102 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run } confwg.Done() }() + + // create network configuration pipes + var containerCreateR, containerCreateW fileCloser + var containerStartR, containerStartW fileCloser + if configureNetwork { + containerCreateR.file, containerCreateW.file, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerCreateR.Close() + defer containerCreateW.Close() + + containerStartR.file, containerStartW.file, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerStartR.Close() + defer containerStartW.Close() + cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file} + } + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) defer preader.Close() defer pwriter.Close() - err = cmd.Run() - if err != nil { - err = errors.Wrapf(err, "error while running runtime") + if err := cmd.Start(); err != nil { + return errors.Wrapf(err, "error while starting runtime") + } + + interrupted := make(chan os.Signal, 100) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + if configureNetwork { + // we already passed the fd to the child, now close the writer so we do not hang if the child closes it + containerCreateW.Close() + if err := waitForSync(containerCreateR.file); err != nil { + // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() + // close the pipes here so that the child will not hang forever + containerCreateR.Close() + containerStartW.Close() + logrus.Errorf("did not get container create message from subprocess: %v", err) + } else { + pidFile := filepath.Join(bundlePath, "pid") + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + + teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + if teardown != nil { + defer teardown() + } + if err != nil { + return err + } + + // only add hosts if we manage the hosts file + if hostsFile != "" { + var entries etchosts.HostEntries + if netstatus != nil { + entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName) + } else { + // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah + entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}} + } + // make sure to sync this with (b *Builder) generateHosts() + err = etchosts.Add(hostsFile, entries) + if err != nil { + return err + } + } + + logrus.Debug("network namespace successfully setup, send start message to child") + _, err = containerStartW.file.Write([]byte{1}) + if err != nil { + return err + } + } + } + + if err := cmd.Wait(); err != nil { + return errors.Wrapf(err, "error while running runtime") } confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err == nil { return conferr } @@ -2229,37 +2383,43 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run return err } +// fileCloser is a helper struct to prevent closing the file twice in the code +// users must call (fileCloser).Close() and not fileCloser.File.Close() +type fileCloser struct { + file *os.File + closed bool +} + +func (f *fileCloser) Close() { + if !f.closed { + if err := f.file.Close(); err != nil { + logrus.Errorf("failed to close file: %v", err) + } + f.closed = true + } +} + +// waitForSync waits for a maximum of 4 minutes to read something from the file +func waitForSync(pipeR *os.File) error { + if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil { + return err + } + b := make([]byte, 16) + _, err := pipeR.Read(b) + return err +} + func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { switch isolation { case IsolationOCIRootless: - if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of an IPC namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.IPCNamespace)}) - _, err := exec.LookPath("slirp4netns") - hostNetworking := err != nil - networkNamespacePath := "" - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { - hostNetworking = ns.Host - networkNamespacePath = ns.Path - if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) { - logrus.Debugf("Disabling network namespace configuration.") - networkNamespacePath = "" - } - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: hostNetworking, - Path: networkNamespacePath, - }) - if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a PID namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) - if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a user namespace.") + // only change the netns if the caller did not set it + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil { + if _, err := exec.LookPath("slirp4netns"); err != nil { + // if slirp4netns is not installed we have to use the hosts net namespace + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) + } } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.UserNamespace)}) + fallthrough case IsolationOCI: pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) @@ -2273,27 +2433,18 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp // DefaultNamespaceOptions returns the default namespace settings from the // runtime-tools generator library. func DefaultNamespaceOptions() (define.NamespaceOptions, error) { - options := define.NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: true}, - {Name: string(specs.IPCNamespace), Host: true}, - {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: true}, - {Name: string(specs.PIDNamespace), Host: true}, - {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: true}, - } - g, err := generate.New("linux") + cfg, err := config.Default() if err != nil { - return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") + return nil, errors.Wrapf(err, "failed to get container config") } - spec := g.Config - if spec.Linux != nil { - for _, ns := range spec.Linux.Namespaces { - options.AddOrReplace(define.NamespaceOption{ - Name: string(ns.Type), - Path: ns.Path, - }) - } + options := define.NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, + {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, + {Name: string(specs.MountNamespace), Host: false}, + {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"}, + {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, + {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "host"}, + {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, } return options, nil } @@ -2308,15 +2459,14 @@ func contains(volumes []string, v string) bool { } type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - ConfigureNetworks []string - MoreCreateArgs []string - ContainerName string - Isolation define.Isolation + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + MoreCreateArgs []string + ContainerName string + Isolation define.Isolation } func init() { @@ -2324,13 +2474,16 @@ func init() { } // runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs -func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources map[string]*sshagent.Source, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) ([]spec.Mount, *runMountArtifacts, error) { +func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) { mountTargets := make([]string, 0, 10) + tmpFiles := make([]string, 0, len(mounts)) + mountImages := make([]string, 0, 10) finalMounts := make([]specs.Mount, 0, len(mounts)) agents := make([]*sshagent.AgentServer, 0, len(mounts)) sshCount := 0 defaultSSHSock := "" tokens := []string{} + lockedTargets := []string{} for _, mount := range mounts { arr := strings.SplitN(mount, ",", 2) @@ -2344,17 +2497,19 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // For now, we only support type secret. switch kv[1] { case "secret": - mount, err := getSecretMount(tokens, secrets, mountlabel, containerWorkingDir, uidmap, gidmap) + mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap) if err != nil { return nil, nil, err } if mount != nil { finalMounts = append(finalMounts, *mount) mountTargets = append(mountTargets, mount.Destination) - + if envFile != "" { + tmpFiles = append(tmpFiles, envFile) + } } case "ssh": - mount, agent, err := getSSHMount(tokens, sshCount, sshSources, mountlabel, uidmap, gidmap, processLabel) + mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel) if err != nil { return nil, nil, err } @@ -2368,28 +2523,102 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} sshCount++ } + case "bind": + mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + // only perform cleanup if image was mounted ignore everything else + if image != "" { + mountImages = append(mountImages, image) + } + case "tmpfs": + mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + case "cache": + mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + lockedTargets = lockedPaths default: return nil, nil, errors.Errorf("invalid mount type %q", kv[1]) } } artifacts := &runMountArtifacts{ RunMountTargets: mountTargets, + TmpFiles: tmpFiles, Agents: agents, + MountedImages: mountImages, SSHAuthSock: defaultSSHSock, + LockedTargets: lockedTargets, } return finalMounts, artifacts, nil } -func getSecretMount(tokens []string, secrets map[string]string, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, error) { +func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) { + if contextDir == "" { + return nil, "", errors.New("Context Directory for current run invocation is not configured") + } + var optionMounts []specs.Mount + mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, image, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, image, err + } + return &volumes[0], image, nil +} + +func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) { + var optionMounts []specs.Mount + mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, lockedTargets, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, lockedTargets, err + } + return &volumes[0], lockedTargets, nil +} + +func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) { errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") if len(tokens) == 0 { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } var err error var id, target string var required bool var uid, gid uint32 - var mode uint32 = 400 + var mode uint32 = 0400 for _, val := range tokens { kv := strings.SplitN(val, "=", 2) switch kv[0] { @@ -2400,76 +2629,94 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin case "required": required, err = strconv.ParseBool(kv[1]) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } case "mode": mode64, err := strconv.ParseUint(kv[1], 8, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } mode = uint32(mode64) case "uid": uid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } uid = uint32(uid64) case "gid": gid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } gid = uint32(gid64) default: - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } } if id == "" { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } // Default location for secretis is /run/secrets/id if target == "" { target = "/run/secrets/" + id } - src, ok := secrets[id] + secr, ok := secrets[id] if !ok { if required { - return nil, errors.Errorf("secret required but no secret with id %s found", id) + return nil, "", errors.Errorf("secret required but no secret with id %s found", id) } - return nil, nil + return nil, "", nil } + var data []byte + var envFile string + var ctrFileOnHost string - // Copy secrets to container working dir, since we need to chmod, chown and relabel it - // for the container user and we don't want to mess with the original file - ctrFileOnHost := filepath.Join(containerWorkingDir, "secrets", id) - _, err = os.Stat(ctrFileOnHost) - if os.IsNotExist(err) { - data, err := ioutil.ReadFile(src) + switch secr.SourceType { + case "env": + data = []byte(os.Getenv(secr.Source)) + tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*") if err != nil { - return nil, err + return nil, "", err } - if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0644); err != nil { - return nil, err + envFile = tmpFile.Name() + ctrFileOnHost = tmpFile.Name() + case "file": + data, err = ioutil.ReadFile(secr.Source) + if err != nil { + return nil, "", err } - if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { - return nil, err + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + _, err = os.Stat(ctrFileOnHost) + if !os.IsNotExist(err) { + return nil, "", err } + default: + return nil, "", errors.New("invalid source secret type") + } + + // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, + // chown and relabel it for the container user and we don't want to mess with the original file + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + return nil, "", err + } + if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { + return nil, "", err } if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil { - return nil, err + return nil, "", err } hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { - return nil, err + return nil, "", err } if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { - return nil, err + return nil, "", err } if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { - return nil, err + return nil, "", err } newMount := specs.Mount{ Destination: target, @@ -2477,11 +2724,11 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin Source: ctrFileOnHost, Options: []string{"bind", "rprivate", "ro"}, } - return &newMount, nil + return &newMount, envFile, nil } // getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container -func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") var err error @@ -2524,7 +2771,6 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou gid = uint32(gid64) default: return nil, nil, errInvalidSyntax - } } @@ -2556,13 +2802,13 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := label.Relabel(hostSock, mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2570,19 +2816,19 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2596,7 +2842,7 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou } // cleanupRunMounts cleans up run mounts so they only appear in this run. -func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { +func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { for _, agent := range artifacts.Agents { err := agent.Shutdown() if err != nil { @@ -2604,6 +2850,25 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { } } + //cleanup any mounted images for this run + for _, image := range artifacts.MountedImages { + if image != "" { + // if flow hits here some image was mounted for this run + i, err := internalUtil.LookupImage(context, b.store, image) + if err == nil { + // silently try to unmount and do nothing + // if image is being used by something else + _ = i.Unmount(false) + } + if errors.Cause(err) == storagetypes.ErrImageUnknown { + // Ignore only if ErrImageUnknown + // Reason: Image is already unmounted do nothing + continue + } + return err + } + } + opts := copier.RemoveOptions{ All: true, } @@ -2613,5 +2878,72 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { return err } } - return nil + var prevErr error + for _, path := range artifacts.TmpFiles { + err := os.Remove(path) + if !os.IsNotExist(err) { + if prevErr != nil { + logrus.Error(prevErr) + } + prevErr = err + } + } + // unlock if any locked files from this RUN statement + for _, path := range artifacts.LockedTargets { + _, err := os.Stat(path) + if err != nil { + // Lockfile not found this might be a problem, + // since LockedTargets must contain list of all locked files + // don't break here since we need to unlock other files but + // log so user can take a look + logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err) + continue + } + lockfile, err := lockfile.GetLockfile(path) + if err != nil { + // unable to get lockfile + // lets log error and continue + // unlocking other files + logrus.Warn(err) + continue + } + if lockfile.Locked() { + lockfile.Unlock() + } else { + logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path) + continue + } + } + return prevErr +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + conf, err := config.Default() + if err != nil { + return nil, err + } + // copy the config to not modify the default by accident + newconf := *conf + if len(cniConfDir) > 0 { + newconf.Network.NetworkConfigDir = cniConfDir + } + if len(cniPluginPath) > 0 { + plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) + newconf.Network.CNIPluginDirs = plugins + } + + _, netInt, err := network.NetworkBackend(store, &newconf, false) + if err != nil { + return nil, err + } + return netInt, nil +} + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL } diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go index 86f7c748292..9e62691e854 100644 --- a/vendor/github.com/containers/buildah/run_unix.go +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -4,6 +4,8 @@ package buildah import ( "github.com/containers/buildah/define" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -22,3 +24,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go index 2a9029e5e1c..f0640ffe2ab 100644 --- a/vendor/github.com/containers/buildah/run_unsupported.go +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -3,6 +3,8 @@ package buildah import ( + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -18,3 +20,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index 00903203e0a..83fc867a2fe 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -1,10 +1,15 @@ +//go:build linux // +build linux package buildah import ( + "fmt" + "os" + "github.com/opencontainers/runtime-tools/generate" selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" ) func selinuxGetEnabled() bool { @@ -17,3 +22,21 @@ func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { g.SetLinuxMountLabel(mountLabel) } } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" { + // SELinux is completely disabled, or we're not doing anything at all with labeling + return nil + } + pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file") + if err != nil { + return errors.Wrapf(err, "computing file creation context for pipes") + } + for i := range stdioPipe { + pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) + if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "setting file label on %q", pipeFdName) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go index 2646148376b..5b1948315a4 100644 --- a/vendor/github.com/containers/buildah/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -12,3 +12,7 @@ func selinuxGetEnabled() bool { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + return nil +} diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 47c9ac5cd96..9bfa9d268ea 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -123,8 +123,8 @@ func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { // isReferenceSomething checks if the registry part of a reference is insecure or blocked func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) { - if ref != nil && ref.DockerReference() != nil { - if named, ok := ref.DockerReference().(reference.Named); ok { + if ref != nil { + if named := ref.DockerReference(); named != nil { if domain := reference.Domain(named); domain != "" { return what(domain, sc) } diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index 3a23fd69038..12546dbd5ce 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -7,10 +7,6 @@ import ( const ( // DefaultRuntime if containers.conf fails. DefaultRuntime = define.DefaultRuntime - // DefaultCNIPluginPath is the default location of CNI plugin helpers. - DefaultCNIPluginPath = define.DefaultCNIPluginPath - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = define.DefaultCNIConfigDir ) var ( diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index c6f022d4ed7..33a8c5657c5 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -16,7 +16,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" - "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" @@ -47,9 +46,8 @@ var ( // resolveName checks if name is a valid image name, and if that name doesn't // include a domain portion, returns a list of the names which it might -// correspond to in the set of configured registries, the transport used to -// pull the image, and a boolean which is true iff -// 1) the list of search registries was used, and 2) it was empty. +// correspond to in the set of configured registries, and the transport used to +// pull the image. // // The returned image names never include a transport: prefix, and if transport != "", // (transport, image) should be a valid input to alltransports.ParseImageName. @@ -58,9 +56,9 @@ var ( // // NOTE: The "list of search registries is empty" check does not count blocked registries, // and neither the implied "localhost" nor a possible firstRegistry are counted -func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) { +func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) { if name == "" { - return nil, "", false, nil + return nil, "", nil } // Maybe it's a truncated image ID. Don't prepend a registry name, then. @@ -68,7 +66,7 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { // It's a truncated version of the ID of an image that's present in local storage; // we need only expand the ID. - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } } // If we're referring to an image by digest, it *must* be local and we @@ -76,51 +74,32 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if strings.HasPrefix(name, "sha256:") { d, err := digest.Parse(name) if err != nil { - return nil, "", false, err + return nil, "", err } img, err := store.Image(d.Encoded()) if err != nil { - return nil, "", false, err + return nil, "", err } - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } // Transports are not supported for local image look ups. srcRef, err := alltransports.ParseImageName(name) if err == nil { - return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil + return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil } - // Figure out the list of registries. - var registries []string - searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc) - if err != nil { - logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) - searchRegistries = nil - } - for _, registry := range searchRegistries { - reg, err := sysregistriesv2.FindRegistry(sc, registry) - if err != nil { - logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err) - continue - } - if reg == nil || !reg.Blocked { - registries = append(registries, registry) - } - } - searchRegistriesAreEmpty := len(registries) == 0 - var candidates []string // Local short-name resolution. namedCandidates, err := shortnames.ResolveLocally(sc, name) if err != nil { - return nil, "", false, err + return nil, "", err } for _, named := range namedCandidates { candidates = append(candidates, named.String()) } - return candidates, DefaultTransport, searchRegistriesAreEmpty, nil + return candidates, DefaultTransport, nil } // ExpandNames takes unqualified names, parses them as image names, and returns @@ -131,7 +110,7 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora expanded := make([]string, 0, len(names)) for _, n := range names { var name reference.Named - nameList, _, _, err := resolveName(n, systemContext, store) + nameList, _, err := resolveName(n, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", n) } @@ -182,7 +161,7 @@ func ResolveNameToReferences( systemContext *types.SystemContext, image string, ) (refs []types.ImageReference, err error) { - names, transport, _, err := resolveName(image, systemContext, store) + names, transport, err := resolveName(image, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", image) } diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 4f5c7d0a16d..01cedc7ed49 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/containers/common/libimage/manifests" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" @@ -26,8 +27,10 @@ const ( ) // LookupReferenceFunc return an image reference based on the specified one. -// This can be used to pass custom blob caches to the copy operation. -type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc // CopyOptions allow for customizing image-copy operations. type CopyOptions struct { @@ -144,15 +147,13 @@ type copier struct { destinationLookup LookupReferenceFunc } -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} // getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns // nil if no credentials are set. @@ -218,15 +219,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags - if options.Architecture != "" { - c.systemContext.ArchitectureChoice = options.Architecture - } - if options.OS != "" { - c.systemContext.OSChoice = options.OS - } - if options.Variant != "" { - c.systemContext.VariantChoice = options.Variant - } + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) if options.SignaturePolicyPath != "" { c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath diff --git a/vendor/github.com/containers/common/libimage/download.go b/vendor/github.com/containers/common/libimage/download.go deleted file mode 100644 index 54edf1b9a4f..00000000000 --- a/vendor/github.com/containers/common/libimage/download.go +++ /dev/null @@ -1,46 +0,0 @@ -package libimage - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/pkg/errors" -) - -// tmpdir returns a path to a temporary directory. -func tmpdir() string { - tmpdir := os.Getenv("TMPDIR") - if tmpdir == "" { - tmpdir = "/var/tmp" - } - - return tmpdir -} - -// downloadFromURL downloads an image in the format "https:/example.com/myimage.tar" -// and temporarily saves in it $TMPDIR/importxyz, which is deleted after the image is imported -func (r *Runtime) downloadFromURL(source string) (string, error) { - fmt.Printf("Downloading from %q\n", source) - - outFile, err := ioutil.TempFile(r.systemContext.BigFilesTemporaryDir, "import") - if err != nil { - return "", errors.Wrap(err, "error creating file") - } - defer outFile.Close() - - response, err := http.Get(source) // nolint:noctx - if err != nil { - return "", errors.Wrapf(err, "error downloading %q", source) - } - defer response.Body.Close() - - _, err = io.Copy(outFile, response.Body) - if err != nil { - return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name()) - } - - return outFile.Name(), nil -} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 833f940cca6..f9f73f527ef 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -3,13 +3,14 @@ package libimage import ( "context" "fmt" - "path/filepath" + "path" "strconv" "strings" "time" filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" + "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,26 +19,53 @@ import ( // indicates that the image matches the criteria. type filterFunc func(*Image) (bool, error) +// Apply the specified filters. At least one filter of each key must apply. +func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { + matches := false + for key := range filters { // and + matches = false + for _, filter := range filters[key] { // or + var err error + matches, err = filter(i) + if err != nil { + // Some images may have been corrupted in the + // meantime, so do an extra check and make the + // error non-fatal (see containers/podman/issues/12582). + if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + logrus.Errorf(errCorrupted.Error()) + return false, nil + } + return false, err + } + if matches { + break + } + } + if !matches { + return false, nil + } + } + return matches, nil +} + // filterImages returns a slice of images which are passing all specified // filters. -func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { - if len(filters) == 0 { +func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { + if len(options.Filters) == 0 || len(images) == 0 { return images, nil } + + filters, err := r.compileImageFilters(ctx, options) + if err != nil { + return nil, err + } result := []*Image{} for i := range images { - include := true - var err error - for _, filter := range filters { - include, err = filter(images[i]) - if err != nil { - return nil, err - } - if !include { - break - } + match, err := images[i].applyFilters(filters) + if err != nil { + return nil, err } - if include { + if match { result = append(result, images[i]) } } @@ -47,15 +75,35 @@ func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { // compileImageFilters creates `filterFunc`s for the specified filters. The // required format is `key=value` with the following supported keys: // after, since, before, containers, dangling, id, label, readonly, reference, intermediate -func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) ([]filterFunc, error) { +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { logrus.Tracef("Parsing image filters %s", options.Filters) - filterFuncs := []filterFunc{} - for _, filter := range options.Filters { + var tree *layerTree + getTree := func() (*layerTree, error) { + if tree == nil { + t, err := r.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree, nil + } + + filters := map[string][]filterFunc{} + duplicate := map[string]string{} + for _, f := range options.Filters { var key, value string - split := strings.SplitN(filter, "=", 2) - if len(split) != 2 { - return nil, errors.Errorf("invalid image filter %q: must be in the format %q", filter, "filter=value") + var filter filterFunc + negate := false + split := strings.SplitN(f, "!=", 2) + if len(split) == 2 { + negate = true + } else { + split = strings.SplitN(f, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") + } } key = split[0] @@ -63,99 +111,197 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp switch key { case "after", "since": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterAfter(img.Created())) + key = "since" + filter = filterAfter(img.Created()) case "before": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterBefore(img.Created())) + filter = filterBefore(img.Created()) case "containers": - switch value { - case "false", "true": - case "external": - if options.IsExternalContainerFunc == nil { - return nil, fmt.Errorf("libimage error: external containers filter without callback") - } - default: - return nil, fmt.Errorf("unsupported value %q for containers filter", value) + if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { + return nil, err } - filterFuncs = append(filterFuncs, filterContainers(value, options.IsExternalContainerFunc)) + filter = filterContainers(value, options.IsExternalContainerFunc) case "dangling": - dangling, err := strconv.ParseBool(value) + dangling, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterDangling(ctx, dangling)) + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterDangling(ctx, dangling, t) case "id": - filterFuncs = append(filterFuncs, filterID(value)) + filter = filterID(value) case "intermediate": - intermediate, err := strconv.ParseBool(value) + intermediate, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for intermediate filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterIntermediate(ctx, intermediate)) + + filter = filterIntermediate(ctx, intermediate, t) case "label": - filterFuncs = append(filterFuncs, filterLabel(ctx, value)) + filter = filterLabel(ctx, value) case "readonly": - readOnly, err := strconv.ParseBool(value) + readOnly, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for readonly filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterReadOnly(readOnly)) - - case "reference": - filterFuncs = append(filterFuncs, filterReference(value)) + filter = filterReadOnly(readOnly) - case "until": - ts, err := timetype.GetTimestamp(value, time.Now()) + case "manifest": + manifest, err := r.bool(duplicate, key, value) if err != nil { return nil, err } - seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + filter = filterManifest(ctx, manifest) + + case "reference": + filter = filterReferences(value) + + case "until": + until, err := r.until(value) if err != nil { return nil, err } - until := time.Unix(seconds, nanoseconds) - filterFuncs = append(filterFuncs, filterBefore(until)) + filter = filterBefore(until) default: return nil, errors.Errorf("unsupported image filter %q", key) } + if negate { + filter = negateFilter(filter) + } + filters[key] = append(filters[key], filter) + } + + return filters, nil +} + +func negateFilter(f filterFunc) filterFunc { + return func(img *Image) (bool, error) { + b, err := f(img) + return !b, err + } +} + +func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { + if exists, ok := duplicate[key]; ok && exists != value { + return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + switch value { + case "false", "true": + case "external": + if externalFunc == nil { + return fmt.Errorf("libimage error: external containers filter without callback") + } + default: + return fmt.Errorf("unsupported value %q for containers filter", value) + } + return nil +} + +func (r *Runtime) until(value string) (time.Time, error) { + var until time.Time + ts, err := timetype.GetTimestamp(value, time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + return time.Unix(seconds, nanoseconds), nil +} + +func (r *Runtime) time(key, value string) (*Image, error) { + img, _, err := r.LookupImage(value, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) } + return img, nil +} - return filterFuncs, nil +func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { + if exists, ok := duplicate[key]; ok && exists != value { + return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + set, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + } + return set, nil } -// filterReference creates a reference filter for matching the specified value. -func filterReference(value string) filterFunc { - // Replacing all '/' with '|' so that filepath.Match() can work '|' - // character is not valid in image name, so this is safe. - // - // TODO: this has been copied from Podman and requires some more review - // and especially tests. - filter := fmt.Sprintf("*%s*", value) - filter = strings.ReplaceAll(filter, "/", "|") +// filterManifest filters whether or not the image is a manifest list +func filterManifest(ctx context.Context, value bool) filterFunc { return func(img *Image) (bool, error) { - if len(value) < 1 { - return true, nil + isManifestList, err := img.IsManifestList(ctx) + if err != nil { + return false, err } - for _, name := range img.Names() { - newName := strings.ReplaceAll(name, "/", "|") - match, _ := filepath.Match(filter, newName) - if match { - return true, nil + return isManifestList == value, nil + } +} + +// filterReferences creates a reference filter for matching the specified value. +func filterReferences(value string) filterFunc { + return func(img *Image) (bool, error) { + refs, err := img.NamesReferences() + if err != nil { + return false, err + } + + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if diggested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] + candidates = append(candidates, + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest + } + } + + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil + } } } return false, nil @@ -221,9 +367,9 @@ func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { } // filterDangling creates a dangling filter for matching the specified value. -func filterDangling(ctx context.Context, value bool) filterFunc { +func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isDangling, err := img.IsDangling(ctx) + isDangling, err := img.isDangling(ctx, tree) if err != nil { return false, err } @@ -241,9 +387,9 @@ func filterID(value string) filterFunc { // filterIntermediate creates an intermediate filter for images. An image is // considered to be an intermediate image if it is dangling (i.e., no tags) and // has no children (i.e., no other image depends on it). -func filterIntermediate(ctx context.Context, value bool) filterFunc { +func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isIntermediate, err := img.IsIntermediate(ctx) + isIntermediate, err := img.isIntermediate(ctx, tree) if err != nil { return false, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 00a2d620ef4..661ca159b48 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -44,6 +44,8 @@ type Image struct { completeInspectData *ImageData // Corresponding OCI image. ociv1Image *ociv1.Image + // Names() parsed into references. + namesReferences []reference.Reference } } @@ -59,6 +61,7 @@ func (i *Image) reload() error { i.cached.partialInspectData = nil i.cached.completeInspectData = nil i.cached.ociv1Image = nil + i.cached.namesReferences = nil return nil } @@ -89,6 +92,23 @@ func (i *Image) Names() []string { return i.storageImage.Names } +// NamesReferences returns Names() as references. +func (i *Image) NamesReferences() ([]reference.Reference, error) { + if i.cached.namesReferences != nil { + return i.cached.namesReferences, nil + } + refs := make([]reference.Reference, 0, len(i.Names())) + for _, name := range i.Names() { + ref, err := reference.Parse(name) + if err != nil { + return nil, err + } + refs = append(refs, ref) + } + i.cached.namesReferences = refs + return refs, nil +} + // StorageImage returns the underlying storage.Image. func (i *Image) StorageImage() *storage.Image { return i.storageImage @@ -128,10 +148,16 @@ func (i *Image) IsReadOnly() bool { // IsDangling returns true if the image is dangling, that is an untagged image // without children. func (i *Image) IsDangling(ctx context.Context) (bool, error) { + return i.isDangling(ctx, nil) +} + +// isDangling returns true if the image is dangling, that is an untagged image +// without children. If tree is nil, it will created for this invocation only. +func (i *Image) isDangling(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -141,10 +167,17 @@ func (i *Image) IsDangling(ctx context.Context) (bool, error) { // IsIntermediate returns true if the image is an intermediate image, that is // an untagged image with children. func (i *Image) IsIntermediate(ctx context.Context) (bool, error) { + return i.isIntermediate(ctx, nil) +} + +// isIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. If tree is nil, it will created for this +// invocation only. +func (i *Image) isIntermediate(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -189,7 +222,7 @@ func (i *Image) Parent(ctx context.Context) (*Image, error) { // HasChildren returns indicates if the image has children. func (i *Image) HasChildren(ctx context.Context) (bool, error) { - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, nil) if err != nil { return false, err } @@ -198,7 +231,7 @@ func (i *Image) HasChildren(ctx context.Context) (bool, error) { // Children returns the image's children. func (i *Image) Children(ctx context.Context) ([]*Image, error) { - children, err := i.getChildren(ctx, true) + children, err := i.getChildren(ctx, true, nil) if err != nil { return nil, err } @@ -206,13 +239,16 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { } // getChildren returns a list of imageIDs that depend on the image. If all is -// false, only the first child image is returned. -func (i *Image) getChildren(ctx context.Context, all bool) ([]*Image, error) { - tree, err := i.runtime.layerTree() - if err != nil { - return nil, err +// false, only the first child image is returned. If tree is nil, it will be +// created for this invocation only. +func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { + if tree == nil { + t, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + tree = t } - return tree.children(ctx, i, all) } @@ -608,8 +644,7 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) { } // inRepoTags looks for the specified name/tag pair in the image's repo tags. -// Note that tag may be empty. -func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { +func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { repoTags, err := i.NamedRepoTags() if err != nil { return nil, err @@ -620,8 +655,10 @@ func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { return nil, err } + name := namedTagged.Name() + tag := namedTagged.Tag() for _, pair := range pairs { - if tag != "" && tag != pair.Tag { + if tag != pair.Tag { continue } if !strings.HasSuffix(pair.Name, name) { diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 14020244050..683a2dc9806 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -95,9 +95,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // For now: we only support key=value // We will attempt to strip quotation marks if present. - var ( - key, val string - ) + var key, val string splitEnv := strings.SplitN(value, "=", 2) key = splitEnv[0] diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index bcfb4e1295f..3db392784e9 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -2,9 +2,11 @@ package libimage import ( "context" + "fmt" "net/url" "os" + "github.com/containers/common/pkg/download" storageTransport "github.com/containers/image/v5/storage" tarballTransport "github.com/containers/image/v5/tarball" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -47,21 +49,23 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption ic = config.ImageConfig } - hist := []v1.History{ + history := []v1.History{ {Comment: options.CommitMessage}, } config := v1.Image{ Config: ic, - History: hist, + History: history, OS: options.OS, Architecture: options.Arch, + Variant: options.Variant, } u, err := url.ParseRequestURI(path) if err == nil && u.Scheme != "" { // If source is a URL, download the file. - file, err := r.downloadFromURL(path) + fmt.Printf("Downloading from %q\n", path) + file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index a872e5cf9d1..05d60edfc1e 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -50,19 +50,39 @@ type RootFS struct { Layers []digest.Digest `json:"Layers"` } -// Inspect inspects the image. Use `withSize` to also perform the -// comparatively expensive size computation of the image. -func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) { +// InspectOptions allow for customizing inspecting images. +type InspectOptions struct { + // Compute the size of the image (expensive). + WithSize bool + // Compute the parent of the image (expensive). + WithParent bool +} + +// Inspect inspects the image. +func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageData, error) { logrus.Debugf("Inspecting image %s", i.ID()) + if options == nil { + options = &InspectOptions{} + } + if i.cached.completeInspectData != nil { - if withSize && i.cached.completeInspectData.Size == int64(-1) { + if options.WithSize && i.cached.completeInspectData.Size == int64(-1) { size, err := i.Size() if err != nil { return nil, err } i.cached.completeInspectData.Size = size } + if options.WithParent && i.cached.completeInspectData.Parent == "" { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + i.cached.completeInspectData.Parent = parentImage.ID() + } + } return i.cached.completeInspectData, nil } @@ -75,10 +95,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) if err != nil { return nil, err } - parentImage, err := i.Parent(ctx) - if err != nil { - return nil, err - } + repoTags, err := i.RepoTags() if err != nil { return nil, err @@ -93,7 +110,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) } size := int64(-1) - if withSize { + if options.WithSize { size, err = i.Size() if err != nil { return nil, err @@ -124,8 +141,14 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) NamesHistory: i.NamesHistory(), } - if parentImage != nil { - data.Parent = parentImage.ID() + if options.WithParent { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + data.Parent = parentImage.ID() + } } // Determine the format of the image. How we determine certain data @@ -164,7 +187,12 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) return nil, err } data.Comment = dockerManifest.Comment + // NOTE: Health checks may be listed in the container config or + // the config. data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck + if data.HealthCheck == nil { + data.HealthCheck = dockerManifest.Config.Healthcheck + } } if data.Annotations == nil { @@ -185,7 +213,6 @@ func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error ref, err := i.StorageReference() if err != nil { - return nil, err } diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 74a1870e0ff..4dfac710654 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -35,17 +35,6 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( var loadErrors []error for _, f := range []func() ([]string, string, error){ - // DOCKER-ARCHIVE - must be first (see containers/podman/issues/10809) - func() ([]string, string, error) { - logrus.Debugf("-> Attempting to load %q as a Docker archive", path) - ref, err := dockerArchiveTransport.ParseReference(path) - if err != nil { - return nil, dockerArchiveTransport.Transport.Name(), err - } - images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) - return images, dockerArchiveTransport.Transport.Name(), err - }, - // OCI func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as an OCI directory", path) @@ -68,6 +57,17 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( return images, ociArchiveTransport.Transport.Name(), err }, + // DOCKER-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker archive", path) + ref, err := dockerArchiveTransport.ParseReference(path) + if err != nil { + return nil, dockerArchiveTransport.Transport.Name(), err + } + images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + return images, dockerArchiveTransport.Transport.Name(), err + }, + // DIR func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as a Docker dir", path) diff --git a/vendor/github.com/containers/common/libimage/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go index 7e651a46c02..578b64ca838 100644 --- a/vendor/github.com/containers/common/libimage/manifests/copy.go +++ b/vendor/github.com/containers/common/libimage/manifests/copy.go @@ -4,12 +4,10 @@ import ( "github.com/containers/image/v5/signature" ) -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 8d1abfba9a3..2624dee785a 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -27,6 +27,12 @@ import ( const instancesData = "instances.json" +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) + // ErrListImageUnknown is returned when we attempt to create an image reference // for a List that has not yet been saved to an image. var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") @@ -57,6 +63,7 @@ type PushOptions struct { SignBy string // fingerprint of GPG key to use to sign images RemoveSignatures bool // true to discard signatures in images ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source } // Create creates a new list containing information about the specified image, @@ -221,6 +228,11 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push if err != nil { return nil, "", err } + if options.SourceFilter != nil { + if src, err = options.SourceFilter(src); err != nil { + return nil, "", err + } + } copyOptions := &cp.Options{ ImageListSelection: options.ImageListSelection, Instances: options.Instances, @@ -353,9 +365,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } if instanceInfo.OS == "" { instanceInfo.OS = config.OS + instanceInfo.OSVersion = config.OSVersion + instanceInfo.OSFeatures = config.OSFeatures } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture + instanceInfo.Variant = config.Variant } } manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) @@ -369,10 +384,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } instanceInfo.instanceDigest = &manifestDigest instanceInfo.Size = int64(len(manifestBytes)) - } else { - if manifestDigest == "" { - manifestDigest = *instanceInfo.instanceDigest - } + } else if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest } err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) if err != nil { @@ -390,9 +403,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag func (l *list) Remove(instanceDigest digest.Digest) error { err := l.List.Remove(instanceDigest) if err == nil { - if _, needToDelete := l.instances[instanceDigest]; needToDelete { - delete(l.instances, instanceDigest) - } + delete(l.instances, instanceDigest) } return err } diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index bfea807c8b0..7ceb6283063 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,13 +1,51 @@ package libimage import ( + "runtime" "strings" + "github.com/containerd/containerd/platforms" "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + os, arch, variant = rawOS, rawArch, rawVariant + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + rawPlatform := os + "/" + arch + if variant != "" { + rawPlatform += "/" + variant + } + + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + // NormalizeName normalizes the provided name according to the conventions by // Podman and Buildah. If tag and digest are missing, the "latest" tag will be // used. If it's a short name, it will be prefixed with "localhost/". diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 1c322c37e84..ff93b6ed888 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "runtime" "strings" "time" @@ -21,6 +22,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" + ociSpec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -169,6 +171,20 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP return localImages, pullError } +// nameFromAnnotations returns a reference string to be used as an image name, +// or an empty string. The annotations map may be nil. +func nameFromAnnotations(annotations map[string]string) string { + if annotations == nil { + return "" + } + // buildkit/containerd are using a custom annotation see + // containers/podman/issues/12560. + if annotations["io.containerd.image.name"] != "" { + return annotations["io.containerd.image.name"] + } + return annotations[ociSpec.AnnotationRefName] +} + // copyFromDefault is the default copier for a number of transports. Other // transports require some specific dancing, sometimes Yoga. func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { @@ -201,15 +217,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, err } - // if index.json has no reference name, compute the image ID instead - if manifestDescriptor.Annotations == nil || manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] == "" { + storageName = nameFromAnnotations(manifestDescriptor.Annotations) + switch len(storageName) { + case 0: + // If there's no reference name in the annotations, compute an ID. storageName, err = getImageID(ctx, ref, nil) if err != nil { return nil, err } imageName = "sha256:" + storageName[1:] - } else { - storageName = manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] + default: named, err := NormalizeName(storageName) if err != nil { return nil, err @@ -227,8 +244,14 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, imageName = named.String() default: - storageName = toLocalImageName(ref.StringWithinTransport()) - imageName = storageName + // Path-based transports (e.g., dir) may include invalid + // characters, so we should pessimistically generate an ID + // instead of looking at the StringWithinTransport(). + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] } // Create a storage reference. @@ -424,12 +447,18 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str // If there's already a local image "localhost/foo", then we should // attempt pulling that instead of doing the full short-name dance. // - // NOTE: we must ignore the platform of a local image when doing - // lookups here, even if arch/os/variant is set. Some images set an - // incorrect or even invalid platform (see containers/podman/issues/10682). - // Doing the lookup while ignoring the platform checks prevents - // redundantly downloading the same image. - localImage, resolvedImageName, err = r.LookupImage(imageName, nil) + // NOTE that we only do platform checks if the specified values differ + // from the local platform. Unfortunately, there are many images used + // in the wild which don't set the correct value(s) in the config + // causing various issues such as containers/podman/issues/10682. + lookupImageOptions := &LookupImageOptions{Variant: options.Variant} + if options.Architecture != runtime.GOARCH { + lookupImageOptions.Architecture = options.Architecture + } + if options.OS != runtime.GOOS { + lookupImageOptions.OS = options.OS + } + localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) if err != nil && errors.Cause(err) != storage.ErrImageUnknown { logrus.Errorf("Looking up %s in local storage: %v", imageName, err) } @@ -442,45 +471,26 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } } - customPlatform := false - if len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 { - customPlatform = true - // Unless the pull policy is "always", we must pessimistically assume - // that the local image has an invalid architecture (see - // containers/podman/issues/10682). Hence, whenever the user requests - // a custom platform, set the pull policy to "always" to make sure - // we're pulling down the image. + customPlatform := len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 + if customPlatform && pullPolicy != config.PullPolicyAlways && pullPolicy != config.PullPolicyNever { + // Unless the pull policy is always/never, we must + // pessimistically assume that the local image has an invalid + // architecture (see containers/podman/issues/10682). Hence, + // whenever the user requests a custom platform, set the pull + // policy to "newer" to make sure we're pulling down the + // correct image. // - // NOTE that this is will even override --pull={false,never}. This is - // very likely a bug but a consistent one in Podman/Buildah and should - // be addressed at a later point. - if pullPolicy != config.PullPolicyAlways { - switch { - // User input clearly refer to a local image. - case strings.HasPrefix(imageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "never", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNever - - // Image resolved to a local one, so let's still have a - // look at the registries or aliases but use it - // otherwise. - case strings.HasPrefix(resolvedImageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "newer", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNewer - - default: - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "always", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyAlways - } - } + // NOTE that this is will even override --pull={false,never}. + pullPolicy = config.PullPolicyNewer + logrus.Debugf("Enforcing pull policy to %q to pull custom platform (arch: %q, os: %q, variant: %q) - local image may mistakenly specify wrong platform", pullPolicy, options.Architecture, options.OS, options.Variant) } if pullPolicy == config.PullPolicyNever { if localImage != nil { - logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) + logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) return []string{resolvedImageName}, nil } - logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) + logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) return nil, errors.Wrap(storage.ErrImageUnknown, imageName) } @@ -518,6 +528,12 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str sys := r.systemContextCopy() resolved, err := shortnames.Resolve(sys, imageName) if err != nil { + // TODO: that is a too big of a hammer since we should only + // ignore errors that indicate that there's no alias and no + // USRs. Must be addressed in c/image first. + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } return nil, err } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 7f25df20053..974b50b50bb 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -21,6 +21,16 @@ import ( // Faster than the standard library, see https://github.com/json-iterator/go. var json = jsoniter.ConfigCompatibleWithStandardLibrary +// tmpdir returns a path to a temporary directory. +func tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + // RuntimeOptions allow for creating a customized Runtime. type RuntimeOptions struct { // The base system context of the runtime which will be used throughout @@ -64,7 +74,7 @@ func (r *Runtime) SystemContext() *types.SystemContext { // Returns a copy of the runtime's system context. func (r *Runtime) systemContextCopy() *types.SystemContext { var sys types.SystemContext - deepcopy.Copy(&sys, &r.systemContext) + _ = deepcopy.Copy(&sys, &r.systemContext) return &sys } @@ -180,6 +190,8 @@ type LookupImageOptions struct { returnManifestIfNoInstance bool } +var errNoHexValue = errors.New("invalid format: no 64-byte hexadecimal value") + // Lookup Image looks up `name` in the local container storage. Returns the // image and the name it has been found with. Note that name may also use the // `containers-storage:` prefix used to refer to the containers-storage @@ -223,13 +235,29 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, name = normalizedName } + byDigest := false originalName := name - idByDigest := false if strings.HasPrefix(name, "sha256:") { - // Strip off the sha256 prefix so it can be parsed later on. - idByDigest = true + byDigest = true name = strings.TrimPrefix(name, "sha256:") } + byFullID := reference.IsFullIdentifier(name) + + if byDigest && !byFullID { + return nil, "", fmt.Errorf("%s: %v", originalName, errNoHexValue) + } + + // If the name clearly refers to a local image, try to look it up. + if byFullID || byDigest { + img, err := r.lookupImageInLocalStorage(originalName, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, originalName, nil + } + return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + } // Unless specified, set the platform specified in the system context // for later platform matching. Builder likes to set these things via @@ -243,28 +271,14 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, if options.Variant == "" { options.Variant = r.systemContext.VariantChoice } - - // First, check if we have an exact match in the storage. Maybe an ID - // or a fully-qualified image name. - img, err := r.lookupImageInLocalStorage(name, name, options) - if err != nil { - return nil, "", err - } - if img != nil { - return img, originalName, nil - } - - // If the name clearly referred to a local image, there's nothing we can - // do anymore. - if storageRef != nil || idByDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) - } + // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). + options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) // Second, try out the candidates as resolved by shortnames. This takes // "localhost/" prefixed images into account as well. candidates, err := shortnames.ResolveLocally(&r.systemContext, name) if err != nil { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) } // Backwards compat: normalize to docker.io as some users may very well // rely on that. @@ -282,7 +296,17 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, } } - return r.lookupImageInDigestsAndRepoTags(originalName, options) + // The specified name may refer to a short ID. Note that this *must* + // happen after the short-name expansion as done above. + img, err := r.lookupImageInLocalStorage(name, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, name, err + } + + return r.lookupImageInDigestsAndRepoTags(name, options) } // lookupImageInLocalStorage looks up the specified candidate for name in the @@ -379,41 +403,49 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm return nil, "", err } - if !shortnames.IsShortName(name) { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, "", err - } - digested, hasDigest := named.(reference.Digested) - if !hasDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) - } + ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed + if err != nil { + return nil, "", err + } + named, isNamed := ref.(reference.Named) + if !isNamed { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + digested, isDigested := named.(reference.Digested) + if isDigested { logrus.Debug("Looking for image with matching recorded digests") digest := digested.Digest() for _, image := range allImages { for _, d := range image.Digests() { - if d == digest { - return image, name, nil + if d != digest { + continue + } + // Also make sure that the matching image fits all criteria (e.g., manifest list). + if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { + return nil, "", err } + return image, name, nil + } } + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + if !shortnames.IsShortName(name) { return nil, "", errors.Wrap(storage.ErrImageUnknown, name) } - // Podman compat: if we're looking for a short name but couldn't - // resolve it via the registries.conf dance, we need to look at *all* - // images and check if the name we're looking for matches a repo tag. - // Split the name into a repo/tag pair - split := strings.SplitN(name, ":", 2) - repo := split[0] - tag := "" - if len(split) == 2 { - tag = split[1] + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + namedTagged, isNammedTagged := named.(reference.NamedTagged) + if !isNammedTagged { + // NOTE: this should never happen since we already know it's + // not a digested reference. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) } + for _, image := range allImages { - named, err := image.inRepoTags(repo, tag) + named, err := image.inRepoTags(namedTagged) if err != nil { return nil, "", err } @@ -477,13 +509,16 @@ func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options } if options.Architecture != "" && options.Architecture != data.Architecture { - return false, err + logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) + return false, nil } if options.OS != "" && options.OS != data.Os { - return false, err + logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) + return false, nil } if options.Variant != "" && options.Variant != data.Variant { - return false, err + logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) + return false, nil } return true, nil @@ -539,16 +574,7 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI } } - var filters []filterFunc - if len(options.Filters) > 0 { - compiledFilters, err := r.compileImageFilters(ctx, options) - if err != nil { - return nil, err - } - filters = append(filters, compiledFilters...) - } - - return filterImages(images, filters) + return r.filterImages(ctx, images, options) } // RemoveImagesOptions allow for customizing image removal. @@ -566,6 +592,8 @@ type RemoveImagesOptions struct { // containers using a specific image. By default, all containers in // the local containers storage will be removed (if Force is set). RemoveContainerFunc RemoveContainerFunc + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool // IsExternalContainerFunc allows for checking whether the specified // container is an external one (when containers=external filter is // used). The definition of an external container can be set by @@ -651,6 +679,9 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem for _, name := range names { img, resolvedName, err := r.LookupImage(name, lookupOptions) if err != nil { + if options.Ignore && errors.Is(err, storage.ErrImageUnknown) { + continue + } appendError(err) continue } diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index e1b8c3f75ba..fed86d4efce 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -68,7 +68,6 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, } return errors.Errorf("unsupported format %q for saving images", format) - } // saveSingleImage saves the specified image name to the specified path. diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index df29bc7da7a..33a4776ce3f 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -58,6 +58,10 @@ type SearchOptions struct { InsecureSkipTLSVerify types.OptionalBool // ListTags returns the search result with available tags ListTags bool + // Registries to search if the specified term does not include a + // registry. If set, the unqualified-search registries in + // containers-registries.conf(5) are ignored. + Registries []string } // SearchFilter allows filtering images while searching. @@ -105,6 +109,10 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) { return sFilter, nil } +// Search searches term. If term includes a registry, only this registry will +// be used for searching. Otherwise, the unqualified-search registries in +// containers-registries.conf(5) or the ones specified in the options will be +// used. func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) { if options == nil { options = &SearchOptions{} @@ -117,10 +125,14 @@ func (r *Runtime) Search(ctx context.Context, term string, options *SearchOption // that we cannot use the reference parser from the containers/image // library as the search term may container arbitrary input such as // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. - if spl := strings.SplitN(term, "/", 2); len(spl) > 1 { - searchRegistries = append(searchRegistries, spl[0]) + spl := strings.SplitN(term, "/", 2) + switch { + case len(spl) > 1: + searchRegistries = []string{spl[0]} term = spl[1] - } else { + case len(options.Registries) > 0: + searchRegistries = options.Registries + default: regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy()) if err != nil { return nil, err @@ -244,7 +256,7 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri name = index + "/library/" + results[i].Name } params := SearchResult{ - Index: index, + Index: registry, Name: name, Description: description, Official: official, @@ -284,8 +296,9 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr paramsArr := []SearchResult{} for i := 0; i < limit; i++ { params := SearchResult{ - Name: imageRef.DockerReference().Name(), - Tag: tags[i], + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + Index: registry, } paramsArr = append(paramsArr, params) } diff --git a/vendor/github.com/containers/common/libnetwork/cni/README.md b/vendor/github.com/containers/common/libnetwork/cni/README.md new file mode 100644 index 00000000000..6f57feff51b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/README.md @@ -0,0 +1,10 @@ +This package abstracts CNI from libpod. +It implements the `ContainerNetwork` interface defined in [libpod/network/types/network.go](../types/network.go) for the CNI backend. + + +## Testing +Run the tests with: +``` +go test -v -mod=vendor -cover ./libpod/network/cni/ +``` +Run the tests as root to also test setup/teardown. This will execute CNI and therefore the cni plugins have to be installed. diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go new file mode 100644 index 00000000000..bda7ed7d02f --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -0,0 +1,438 @@ +//go:build linux +// +build linux + +package cni + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/containernetworking/cni/libcni" + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { + network := types.Network{ + Name: conf.Name, + ID: getNetworkIDFromName(conf.Name), + Labels: map[string]string{}, + Options: map[string]string{}, + IPAMOptions: map[string]string{}, + } + + cniJSON := make(map[string]interface{}) + err := json.Unmarshal(conf.Bytes, &cniJSON) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name) + } + if args, ok := cniJSON["args"]; ok { + if key, ok := args.(map[string]interface{}); ok { + // read network labels and options from the conf file + network.Labels = getNetworkArgsFromConfList(key, podmanLabelKey) + network.Options = getNetworkArgsFromConfList(key, podmanOptionsKey) + } + } + + t, err := fileTime(confPath) + if err != nil { + return nil, err + } + network.Created = t + + firstPlugin := conf.Plugins[0] + network.Driver = firstPlugin.Network.Type + + switch firstPlugin.Network.Type { + case types.BridgeNetworkDriver: + var bridge hostLocalBridge + err := json.Unmarshal(firstPlugin.Bytes, &bridge) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath) + } + network.NetworkInterface = bridge.BrName + + // if isGateway is false we have an internal network + if !bridge.IsGW { + network.Internal = true + } + + // set network options + if bridge.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(bridge.MTU) + } + if bridge.Vlan != 0 { + network.Options["vlan"] = strconv.Itoa(bridge.Vlan) + } + + err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath) + if err != nil { + return nil, err + } + + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + var vlan VLANConfig + err := json.Unmarshal(firstPlugin.Bytes, &vlan) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath) + } + network.NetworkInterface = vlan.Master + + // set network options + if vlan.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(vlan.MTU) + } + + if vlan.Mode != "" { + network.Options["mode"] = vlan.Mode + } + + err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath) + if err != nil { + return nil, err + } + + default: + // A warning would be good but users would get this warning every time so keep this at info level. + logrus.Infof("Unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information", + firstPlugin.Network.Type, confPath) + } + + // check if the dnsname plugin is configured + network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") + + return &network, nil +} + +func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool { + for _, plugin := range plugins { + if plugin.Network.Type == name { + return true + } + } + return false +} + +// convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets. +// It returns an array of subnets and an extra bool if dhcp is configured. +func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error { + switch ipam.PluginType { + case "": + network.IPAMOptions[types.Driver] = types.NoneIPAMDriver + case types.DHCPIPAMDriver: + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + case types.HostLocalIPAMDriver: + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + for _, r := range ipam.Ranges { + for _, ipam := range r { + s := types.Subnet{} + + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the sub. + _, sub, err := net.ParseCIDR(ipam.Subnet) + if err != nil { + return err + } + s.Subnet = types.IPNet{IPNet: *sub} + + // gateway + var gateway net.IP + if ipam.Gateway != "" { + gateway = net.ParseIP(ipam.Gateway) + if gateway == nil { + return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) + } + // convert to 4 byte if ipv4 + util.NormalizeIP(&gateway) + } else if !network.Internal { + // only add a gateway address if the network is not internal + gateway, err = util.FirstIPInSubnet(sub) + if err != nil { + return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + } + } + s.Gateway = gateway + + var rangeStart net.IP + var rangeEnd net.IP + if ipam.RangeStart != "" { + rangeStart = net.ParseIP(ipam.RangeStart) + if rangeStart == nil { + return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + } + } + if ipam.RangeEnd != "" { + rangeEnd = net.ParseIP(ipam.RangeEnd) + if rangeEnd == nil { + return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + } + } + if rangeStart != nil || rangeEnd != nil { + s.LeaseRange = &types.LeaseRange{} + s.LeaseRange.StartIP = rangeStart + s.LeaseRange.EndIP = rangeEnd + } + if util.IsIPv6(s.Subnet.IP) { + network.IPv6Enabled = true + } + network.Subnets = append(network.Subnets, s) + } + } + default: + // This is not an error. While we only support certain ipam drivers, we + // cannot make it fail for unsupported ones. CNI is still able to use them, + // just our translation logic cannot convert this into a Network. + // For the same reason this is not warning, it would just be annoying for + // everyone using a unknown ipam driver. + logrus.Infof("unsupported ipam plugin %q in %s", ipam.PluginType, confPath) + network.IPAMOptions[types.Driver] = ipam.PluginType + } + return nil +} + +// getNetworkArgsFromConfList returns the map of args in a conflist, argType should be labels or options +func getNetworkArgsFromConfList(args map[string]interface{}, argType string) map[string]string { + if args, ok := args[argType]; ok { + if labels, ok := args.(map[string]interface{}); ok { + result := make(map[string]string, len(labels)) + for k, v := range labels { + if v, ok := v.(string); ok { + result[k] = v + } + } + return result + } + } + return map[string]string{} +} + +// createCNIConfigListFromNetwork will create a cni config file from the given network. +// It returns the cni config and the path to the file where the config was written. +// Set writeToDisk to false to only add this network into memory. +func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writeToDisk bool) (*libcni.NetworkConfigList, string, error) { + var ( + routes []ipamRoute + ipamRanges [][]ipamLocalHostRangeConf + ipamConf *ipamConfig + err error + ) + + ipamDriver := network.IPAMOptions[types.Driver] + switch ipamDriver { + case types.HostLocalIPAMDriver: + defIpv4Route := false + defIpv6Route := false + for _, subnet := range network.Subnets { + ipam := newIPAMLocalHostRange(subnet.Subnet, subnet.LeaseRange, subnet.Gateway) + ipamRanges = append(ipamRanges, []ipamLocalHostRangeConf{*ipam}) + + // only add default route for not internal networks + if !network.Internal { + ipv6 := util.IsIPv6(subnet.Subnet.IP) + if !ipv6 && defIpv4Route { + continue + } + if ipv6 && defIpv6Route { + continue + } + + if ipv6 { + defIpv6Route = true + } else { + defIpv4Route = true + } + route, err := newIPAMDefaultRoute(ipv6) + if err != nil { + return nil, "", err + } + routes = append(routes, route) + } + } + conf := newIPAMHostLocalConf(routes, ipamRanges) + ipamConf = &conf + case types.DHCPIPAMDriver: + ipamConf = &ipamConfig{PluginType: "dhcp"} + + case types.NoneIPAMDriver: + // do nothing + default: + return nil, "", errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + + opts, err := parseOptions(network.Options, network.Driver) + if err != nil { + return nil, "", err + } + + isGateway := true + ipMasq := true + if network.Internal { + isGateway = false + ipMasq = false + } + // create CNI plugin configuration + // explicitly use CNI version 0.4.0 here, to use v1.0.0 at least containernetwork-plugins-1.0.1 has to be installed + // the dnsname plugin also needs to be updated for 1.0.0 + // TODO change to 1.0.0 when most distros support it + ncList := newNcList(network.Name, "0.4.0", network.Labels, network.Options) + var plugins []interface{} + + switch network.Driver { + case types.BridgeNetworkDriver: + bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, opts.mtu, opts.vlan, ipamConf) + plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin()) + // if we find the dnsname plugin we add configuration for it + if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled { + // Note: in the future we might like to allow for dynamic domain names + plugins = append(plugins, newDNSNamePlugin(defaultPodmanDomainName)) + } + + case types.MacVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) + + case types.IPVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) + + default: + return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver) + } + ncList["plugins"] = plugins + b, err := json.MarshalIndent(ncList, "", " ") + if err != nil { + return nil, "", err + } + cniPathName := "" + if writeToDisk { + cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist") + err = ioutil.WriteFile(cniPathName, b, 0o644) + if err != nil { + return nil, "", err + } + t, err := fileTime(cniPathName) + if err != nil { + return nil, "", err + } + network.Created = t + } else { + network.Created = time.Now() + } + config, err := libcni.ConfListFromBytes(b) + if err != nil { + return nil, "", err + } + return config, cniPathName, nil +} + +func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry, error) { + cniPorts := make([]cniPortMapEntry, 0, len(ports)) + for _, port := range ports { + if port.Protocol == "" { + return nil, errors.New("port protocol should not be empty") + } + protocols := strings.Split(port.Protocol, ",") + + for _, protocol := range protocols { + if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { + return nil, errors.Errorf("unknown port protocol %s", protocol) + } + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort), + ContainerPort: int(port.ContainerPort), + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + for i := 1; i < int(port.Range); i++ { + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort) + i, + ContainerPort: int(port.ContainerPort) + i, + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + } + } + } + return cniPorts, nil +} + +func removeMachinePlugin(conf *libcni.NetworkConfigList) *libcni.NetworkConfigList { + plugins := make([]*libcni.NetworkConfig, 0, len(conf.Plugins)) + for _, net := range conf.Plugins { + if net.Network.Type != "podman-machine" { + plugins = append(plugins, net) + } + } + conf.Plugins = plugins + return conf +} + +type options struct { + vlan int + mtu int + vlanPluginMode string +} + +func parseOptions(networkOptions map[string]string, networkDriver string) (*options, error) { + opt := &options{} + var err error + for k, v := range networkOptions { + switch k { + case "mtu": + opt.mtu, err = internalutil.ParseMTU(v) + if err != nil { + return nil, err + } + + case "vlan": + opt.vlan, err = internalutil.ParseVlan(v) + if err != nil { + return nil, err + } + + case "mode": + switch networkDriver { + case types.MacVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + return nil, errors.Errorf("unknown macvlan mode %q", v) + } + case types.IPVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + return nil, errors.Errorf("unknown ipvlan mode %q", v) + } + default: + return nil, errors.Errorf("cannot set option \"mode\" with driver %q", networkDriver) + } + opt.vlanPluginMode = v + + default: + return nil, errors.Errorf("unsupported network option %s", k) + } + } + return opt, nil +} + +func fileTime(file string) (time.Time, error) { + var st unix.Stat_t + for { + err := unix.Stat(file, &st) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return time.Time{}, &os.PathError{Path: file, Op: "stat", Err: err} + } + } + return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)), nil //nolint:unconvert // On some platforms Sec and Nsec are int32. +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go new file mode 100644 index 00000000000..6bfa8d63b1a --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -0,0 +1,111 @@ +// Copyright 2016 CNI authors +// Copyright 2021 Podman authors +// +// This code has been originally copied from github.com/containernetworking/cni +// but has been changed to better fit the Podman use case. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package cni + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/version" + "github.com/containers/storage/pkg/unshare" +) + +type cniExec struct { + version.PluginDecoder +} + +type cniPluginError struct { + plugin string + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +// Error returns a nicely formatted error message for the cni plugin errors. +func (e *cniPluginError) Error() string { + err := fmt.Sprintf("cni plugin %s failed", e.plugin) + if e.Msg != "" { + err = fmt.Sprintf("%s: %s", err, e.Msg) + } else if e.Code > 0 { + err = fmt.Sprintf("%s with error code %d", err, e.Code) + } + if e.Details != "" { + err = fmt.Sprintf("%s: %s", err, e.Details) + } + return err +} + +// ExecPlugin execute the cni plugin. Returns the stdout of the plugin or an error. +func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files. + // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use + // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process + // for rootful users. This causes issues since the cleanup process is spawned + // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. + // Because of it dnsname will not find the config files and cannot correctly cleanup. + // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. + if !unshare.IsRootless() { + c.Env = append(c.Env, "XDG_RUNTIME_DIR=") + } + + err := c.Run() + if err != nil { + return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) + } + return stdout.Bytes(), nil +} + +// annotatePluginError parses the common cni plugin error json. +func annotatePluginError(err error, plugin string, stdout, stderr []byte) error { + pluginName := filepath.Base(plugin) + emsg := cniPluginError{ + plugin: pluginName, + } + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = err.Error() + } else { + emsg.Msg = string(stderr) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("failed to unmarshal error message %q: %v", string(stdout), perr) + } + return &emsg +} + +// FindInPath finds the plugin in the given paths. +func (e *cniExec) FindInPath(plugin string, paths []string) (string, error) { + return invoke.FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go new file mode 100644 index 00000000000..25cc173a67d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -0,0 +1,286 @@ +//go:build linux +// +build linux + +package cni + +import ( + "net" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/types" +) + +const ( + defaultIPv4Route = "0.0.0.0/0" + defaultIPv6Route = "::/0" + // defaultPodmanDomainName is used for the dnsname plugin to define + // a localized domain name for a created network + defaultPodmanDomainName = "dns.podman" + + // cniDeviceName is the default name for a new bridge, it should be suffixed with an integer + cniDeviceName = "cni-podman" + + // podmanLabelKey key used to store the podman network label in a cni config + podmanLabelKey = "podman_labels" + + // podmanOptionsKey key used to store the podman network options in a cni config + podmanOptionsKey = "podman_options" +) + +// cniPortMapEntry struct is used by the portmap plugin +// https://github.com/containernetworking/plugins/blob/649e0181fe7b3a61e708f3e4249a798f57f25cc5/plugins/meta/portmap/main.go#L43-L50 +type cniPortMapEntry struct { + HostPort int `json:"hostPort"` + ContainerPort int `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP,omitempty"` +} + +// hostLocalBridge describes a configuration for a bridge plugin +// https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge#network-configuration-reference +type hostLocalBridge struct { + PluginType string `json:"type"` + BrName string `json:"bridge,omitempty"` + IsGW bool `json:"isGateway"` + IsDefaultGW bool `json:"isDefaultGateway,omitempty"` + ForceAddress bool `json:"forceAddress,omitempty"` + IPMasq bool `json:"ipMasq,omitempty"` + MTU int `json:"mtu,omitempty"` + HairpinMode bool `json:"hairpinMode,omitempty"` + PromiscMode bool `json:"promiscMode,omitempty"` + Vlan int `json:"vlan,omitempty"` + IPAM ipamConfig `json:"ipam"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// ipamConfig describes an IPAM configuration +// https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local#network-configuration-reference +type ipamConfig struct { + PluginType string `json:"type"` + Routes []ipamRoute `json:"routes,omitempty"` + ResolveConf string `json:"resolveConf,omitempty"` + DataDir string `json:"dataDir,omitempty"` + Ranges [][]ipamLocalHostRangeConf `json:"ranges,omitempty"` +} + +// ipamLocalHostRangeConf describes the new style IPAM ranges +type ipamLocalHostRangeConf struct { + Subnet string `json:"subnet"` + RangeStart string `json:"rangeStart,omitempty"` + RangeEnd string `json:"rangeEnd,omitempty"` + Gateway string `json:"gateway,omitempty"` +} + +// ipamRoute describes a route in an ipam config +type ipamRoute struct { + Dest string `json:"dst"` +} + +// portMapConfig describes the default portmapping config +type portMapConfig struct { + PluginType string `json:"type"` + Capabilities map[string]bool `json:"capabilities"` +} + +// VLANConfig describes the macvlan config +type VLANConfig struct { + PluginType string `json:"type"` + Master string `json:"master"` + IPAM ipamConfig `json:"ipam"` + MTU int `json:"mtu,omitempty"` + Mode string `json:"mode,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// firewallConfig describes the firewall plugin +type firewallConfig struct { + PluginType string `json:"type"` + Backend string `json:"backend"` +} + +// tuningConfig describes the tuning plugin +type tuningConfig struct { + PluginType string `json:"type"` +} + +// dnsNameConfig describes the dns container name resolution plugin config +type dnsNameConfig struct { + PluginType string `json:"type"` + DomainName string `json:"domainName"` + Capabilities map[string]bool `json:"capabilities"` +} + +// ncList describes a generic map +type ncList map[string]interface{} + +// newNcList creates a generic map of values with string +// keys and adds in version and network name +func newNcList(name, version string, labels, options map[string]string) ncList { + n := ncList{} + n["cniVersion"] = version + n["name"] = name + args := map[string]map[string]string{} + if len(labels) > 0 { + args[podmanLabelKey] = labels + } + if len(options) > 0 { + args[podmanOptionsKey] = options + } + if len(args) > 0 { + n["args"] = args + } + return n +} + +// newHostLocalBridge creates a new LocalBridge for host-local +func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { + caps := make(map[string]bool) + caps["ips"] = true + bridge := hostLocalBridge{ + PluginType: "bridge", + BrName: name, + IsGW: isGateWay, + IPMasq: ipMasq, + MTU: mtu, + HairpinMode: true, + Vlan: vlan, + } + if ipamConf != nil { + bridge.IPAM = *ipamConf + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipamConf.PluginType == types.HostLocalIPAMDriver { + bridge.Capabilities = caps + } + } + return &bridge +} + +// newIPAMHostLocalConf creates a new IPAMHostLocal configuration +func newIPAMHostLocalConf(routes []ipamRoute, ipamRanges [][]ipamLocalHostRangeConf) ipamConfig { + ipamConf := ipamConfig{ + PluginType: "host-local", + Routes: routes, + } + + ipamConf.Ranges = ipamRanges + return ipamConf +} + +// newIPAMLocalHostRange create a new IPAM range +func newIPAMLocalHostRange(subnet types.IPNet, leaseRange *types.LeaseRange, gw net.IP) *ipamLocalHostRangeConf { + hostRange := &ipamLocalHostRangeConf{ + Subnet: subnet.String(), + } + + // a user provided a range, we add it here + if leaseRange != nil { + if leaseRange.StartIP != nil { + hostRange.RangeStart = leaseRange.StartIP.String() + } + if leaseRange.EndIP != nil { + hostRange.RangeEnd = leaseRange.EndIP.String() + } + } + + if gw != nil { + hostRange.Gateway = gw.String() + } + return hostRange +} + +// newIPAMRoute creates a new IPAM route configuration +// nolint:interfacer +func newIPAMRoute(r *net.IPNet) ipamRoute { + return ipamRoute{Dest: r.String()} +} + +// newIPAMDefaultRoute creates a new IPAMDefault route of +// 0.0.0.0/0 for IPv4 or ::/0 for IPv6 +func newIPAMDefaultRoute(isIPv6 bool) (ipamRoute, error) { + route := defaultIPv4Route + if isIPv6 { + route = defaultIPv6Route + } + _, n, err := net.ParseCIDR(route) + if err != nil { + return ipamRoute{}, err + } + return newIPAMRoute(n), nil +} + +// newPortMapPlugin creates a predefined, default portmapping +// configuration +func newPortMapPlugin() portMapConfig { + caps := make(map[string]bool) + caps["portMappings"] = true + p := portMapConfig{ + PluginType: "portmap", + Capabilities: caps, + } + return p +} + +// newFirewallPlugin creates a generic firewall plugin +func newFirewallPlugin() firewallConfig { + return firewallConfig{ + PluginType: "firewall", + } +} + +// newTuningPlugin creates a generic tuning section +func newTuningPlugin() tuningConfig { + return tuningConfig{ + PluginType: "tuning", + } +} + +// newDNSNamePlugin creates the dnsname config with a given +// domainname +func newDNSNamePlugin(domainName string) dnsNameConfig { + caps := make(map[string]bool, 1) + caps["aliases"] = true + return dnsNameConfig{ + PluginType: "dnsname", + DomainName: domainName, + Capabilities: caps, + } +} + +// hasDNSNamePlugin looks to see if the dnsname cni plugin is present +func hasDNSNamePlugin(paths []string) bool { + for _, p := range paths { + if _, err := os.Stat(filepath.Join(p, "dnsname")); err == nil { + return true + } + } + return false +} + +// newVLANPlugin creates a macvlanconfig with a given device name +func newVLANPlugin(pluginType, device, mode string, mtu int, ipam *ipamConfig) VLANConfig { + m := VLANConfig{ + PluginType: pluginType, + } + if ipam != nil { + m.IPAM = *ipam + } + if mtu > 0 { + m.MTU = mtu + } + if len(mode) > 0 { + m.Mode = mode + } + // CNI is supposed to use the default route if a + // parent device is not provided + if len(device) > 0 { + m.Master = device + } + caps := make(map[string]bool) + caps["ips"] = true + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipam.PluginType == types.HostLocalIPAMDriver { + m.Capabilities = caps + } + return m +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go new file mode 100644 index 00000000000..c6967b600bf --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -0,0 +1,241 @@ +//go:build linux +// +build linux + +package cni + +import ( + "net" + "os" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.libpodNet.Name] = network + return *network.libpodNet, nil +} + +// networkCreate will fill out the given network struct and return the new network entry. +// If defaultNet is true it will not validate against used subnets and it will not write the cni config to disk. +func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) + if err != nil { + return nil, err + } + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + err = createIPMACVLAN(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks) + if err != nil { + return nil, err + } + + // generate the network ID + newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDns(newNetwork) + + // FIXME: Should this be a hard error? + if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { + logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) + newNetwork.DNSEnabled = false + } + + cniConf, path, err := n.createCNIConfigListFromNetwork(newNetwork, !defaultNet) + if err != nil { + return nil, err + } + return &network{cniNet: cniConf, libpodNet: newNetwork, filename: path}, nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *cniNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.libpodNet.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + // Remove the bridge network interface on the host. + if network.libpodNet.Driver == types.BridgeNetworkDriver { + link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) + if err == nil { + err = netlink.LinkDel(link) + // only log the error, it is not fatal + if err != nil { + logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) + } + } + } + + file := network.filename + delete(n.networks, network.libpodNet.Name) + + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *cniNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net.libpodNet) { + continue outer + } + } + networks = append(networks, *net.libpodNet) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network.libpodNet, nil +} + +func createIPMACVLAN(network *types.Network) error { + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + + switch network.IPAMOptions[types.Driver] { + // set default + case "": + if len(network.Subnets) == 0 { + // if no subnets and no driver choose dhcp + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + } else { + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + } + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return errors.New("host-local ipam driver set but no subnets are given") + } + } + + if network.IPAMOptions[types.Driver] == types.DHCPIPAMDriver && network.Internal { + return errors.New("internal is not supported with macvlan and dhcp ipam driver") + } + return nil +} + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.DHCPIPAMDriver, types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return errors.Errorf("%s ipam driver is set but subnets are given", ipamDriver) + } + default: + return errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go new file mode 100644 index 00000000000..82b9cbd2e50 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -0,0 +1,289 @@ +//go:build linux +// +build linux + +package cni + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containernetworking/cni/libcni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/lockfile" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type cniNetwork struct { + // cniConfigDir is directory where the cni config files are stored. + cniConfigDir string + // cniPluginDirs is a list of directories where cni should look for the plugins. + cniPluginDirs []string + + cniConf *libcni.CNIConfig + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + + // isMachine describes whenever podman runs in a podman machine environment. + isMachine bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*network +} + +type network struct { + // filename is the full path to the cni config file on disk + filename string + libpodNet *types.Network + cniNet *libcni.NetworkConfigList +} + +type InitConfig struct { + // CNIConfigDir is directory where the cni config files are stored. + CNIConfigDir string + // CNIPluginDirs is a list of directories where cni should look for the plugins. + CNIPluginDirs []string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + + // IsMachine describes whenever podman runs in a podman machine environment. + IsMachine bool +} + +// NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. +// Note: The networks are not loaded from disk until a method is called. +func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.CNIConfigDir, "cni.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools + } + + cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) + n := &cniNetwork{ + cniConfigDir: conf.CNIConfigDir, + cniPluginDirs: conf.CNIPluginDirs, + cniConf: cni, + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + isMachine: conf.IsMachine, + lock: lock, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *cniNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver, types.IPVLANNetworkDriver} +} + +// DefaultNetworkName will return the default cni network name. +func (n *cniNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *cniNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.cniConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + // FIXME: do we have to support other file types as well, e.g. .conf? + files, err := libcni.ConfFiles(n.cniConfigDir, []string{".conflist"}) + if err != nil { + return err + } + networks := make(map[string]*network, len(files)) + for _, file := range files { + conf, err := libcni.ConfListFromFile(file) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading CNI config file %s: %v", file, err) + } + continue + } + + if !types.NameRegex.MatchString(conf.Name) { + logrus.Warnf("CNI config list %s has invalid name, skipping: %v", file, types.RegexError) + continue + } + + // podman < v4.0 used the podman-machine cni plugin for podman machine port forwarding + // since this is now build into podman we no longer use the plugin + // old configs may still contain it so we just remove it here + if n.isMachine { + conf = removeMachinePlugin(conf) + } + + if _, err := n.cniConf.ValidateNetworkList(context.Background(), conf); err != nil { + logrus.Warnf("Error validating CNI config file %s: %v", file, err) + continue + } + + if val, ok := networks[conf.Name]; ok { + logrus.Warnf("CNI config list %s has the same network name as %s, skipping", file, val.filename) + continue + } + + net, err := createNetworkFromCNIConfigList(conf, file) + if err != nil { + logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + continue + } + logrus.Debugf("Successfully loaded network %s: %v", net.Name, net) + networkInfo := network{ + filename: file, + cniNet: conf, + libpodNet: net, + } + networks[net.Name] = &networkInfo + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func (n *cniNetwork) createDefaultNetwork() (*network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: "cni-podman0", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.libpodNet.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.libpodNet.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// getNetworkIDFromName creates a network ID from the name. It is just the +// sha256 hash so it is not safe but it should be safe enough for our use case. +func getNetworkIDFromName(name string) string { + hash := sha256.Sum256([]byte(name)) + return hex.EncodeToString(hash[:]) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *cniNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val.libpodNet) + } +} + +// Len return the number of networks +func (n *cniNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *cniNetwork) DefaultInterfaceName() string { + return cniDeviceName +} + +func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network.libpodNet, err +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go new file mode 100644 index 00000000000..c7fa86ed0aa --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -0,0 +1,278 @@ +//go:build linux +// +build linux + +package cni + +import ( + "context" + "net" + "os" + "strings" + + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // set the loopback adapter up in the container netns + err = ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error { + link, err := netlink.LinkByName("lo") + if err == nil { + err = netlink.LinkSetUp(link) + } + return err + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to set the loopback adapter up") + } + + var retErr error + teardownOpts := options + teardownOpts.Networks = map[string]types.PerNetworkOptions{} + // make sure to teardown the already connected networks on error + defer func() { + if retErr != nil { + if len(teardownOpts.Networks) > 0 { + err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) + if err != nil { + logrus.Warn(err) + } + } + } + }() + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return nil, err + } + + results := make(map[string]types.StatusBlock, len(options.Networks)) + for name, netOpts := range options.Networks { + netOpts := netOpts + network := n.networks[name] + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + // If we have more than one static ip we need parse the ips via runtime config, + // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips + if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { + caps := make(map[string]interface{}) + caps["capabilities"] = map[string]bool{"ips": true} + network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + if retErr != nil { + return nil, retErr + } + } + + var res cnitypes.Result + res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) + // Add this network to teardown opts since it is now connected. + // Also add this if an errors was returned since we want to call teardown on this regardless. + teardownOpts.Networks[name] = netOpts + if retErr != nil { + return nil, retErr + } + + logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) + var status types.StatusBlock + status, retErr = CNIResultToStatus(res) + if retErr != nil { + return nil, retErr + } + results[name] = status + } + return results, nil +} + +// CNIResultToStatus convert the cni result to status block +// nolint:golint +func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { + result := types.StatusBlock{} + cniResult, err := types040.GetResult(res) + if err != nil { + return result, err + } + nameservers := make([]net.IP, 0, len(cniResult.DNS.Nameservers)) + for _, nameserver := range cniResult.DNS.Nameservers { + ip := net.ParseIP(nameserver) + if ip == nil { + return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver) + } + nameservers = append(nameservers, ip) + } + result.DNSServerIPs = nameservers + result.DNSSearchDomains = cniResult.DNS.Search + + interfaces := make(map[string]types.NetInterface) + for intIndex, netInterface := range cniResult.Interfaces { + // we are only interested about interfaces in the container namespace + if netInterface.Sandbox == "" { + continue + } + + mac, err := net.ParseMAC(netInterface.Mac) + if err != nil { + return result, err + } + subnets := make([]types.NetAddress, 0, len(cniResult.IPs)) + for _, ip := range cniResult.IPs { + if ip.Interface == nil { + // we do no expect ips without an interface + continue + } + if len(cniResult.Interfaces) <= *ip.Interface { + return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) + } + + // when we have a ip for this interface add it to the subnets + if *ip.Interface == intIndex { + subnets = append(subnets, types.NetAddress{ + IPNet: types.IPNet{IPNet: ip.Address}, + Gateway: ip.Gateway, + }) + } + } + interfaces[netInterface.Name] = types.NetInterface{ + MacAddress: types.HardwareAddr(mac), + Subnets: subnets, + } + } + result.Interfaces = interfaces + return result, nil +} + +func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPortMapEntry, opts *types.PerNetworkOptions) *libcni.RuntimeConf { + rt := &libcni.RuntimeConf{ + ContainerID: conID, + NetNS: netns, + IfName: opts.InterfaceName, + Args: [][2]string{ + {"IgnoreUnknown", "1"}, + // Do not set the K8S env vars, see https://github.com/containers/podman/issues/12083. + // Only K8S_POD_NAME is used by dnsname to get the container name. + {"K8S_POD_NAME", conName}, + }, + CapabilityArgs: map[string]interface{}{}, + } + + // Propagate environment CNI_ARGS + for _, kvpairs := range strings.Split(os.Getenv("CNI_ARGS"), ";") { + if keyval := strings.SplitN(kvpairs, "=", 2); len(keyval) == 2 { + rt.Args = append(rt.Args, [2]string{keyval[0], keyval[1]}) + } + } + + // Add mac address to cni args + if len(opts.StaticMAC) > 0 { + rt.Args = append(rt.Args, [2]string{"MAC", opts.StaticMAC.String()}) + } + + if len(opts.StaticIPs) == 1 { + // Add a single IP to the args field. CNI plugins < 1.0.0 + // do not support multiple ips via capability args. + rt.Args = append(rt.Args, [2]string{"IP", opts.StaticIPs[0].String()}) + } else if len(opts.StaticIPs) > 1 { + // Set the static ips in the capability args + // to support more than one static ip per network. + rt.CapabilityArgs["ips"] = opts.StaticIPs + } + + // Set network aliases for the dnsname plugin. + if len(opts.Aliases) > 0 { + rt.CapabilityArgs["aliases"] = map[string][]string{ + networkName: opts.Aliases, + } + } + + // Set PortMappings in Capabilities + if len(ports) > 0 { + rt.CapabilityArgs["portMappings"] = ports + } + + return rt +} + +// Teardown will teardown the container network namespace. +func (n *cniNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + return n.teardown(namespacePath, options) +} + +func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOptions) error { + // Note: An empty namespacePath is allowed because some plugins + // still need teardown, for example ipam should remove used ip allocations. + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return err + } + + var multiErr *multierror.Error + for name, netOpts := range options.Networks { + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) + if err == nil { + rt = newRt + } else { + logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) + network := n.networks[name] + if network == nil { + multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name)) + continue + } + cniConfList = network.cniNet + } + + err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + } + return multiErr.ErrorOrNil() +} + +func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.RuntimeConf) (*libcni.NetworkConfigList, *libcni.RuntimeConf, error) { + cniConfList := &libcni.NetworkConfigList{ + Name: name, + } + confBytes, rt, err := cniConf.GetNetworkListCachedConfig(cniConfList, rt) + if err != nil { + return nil, nil, err + } else if confBytes == nil { + return nil, nil, errors.Errorf("network %s not found in CNI cache", name) + } + + cniConfList, err = libcni.ConfListFromBytes(confBytes) + if err != nil { + return nil, nil, err + } + return cniConfList, rt, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go new file mode 100644 index 00000000000..ce248a181ba --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -0,0 +1,339 @@ +package etchosts + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" +) + +const ( + hostContainersInternal = "host.containers.internal" + localhost = "localhost" +) + +type HostEntries []HostEntry + +type HostEntry struct { + IP string + Names []string +} + +// Params for the New() function call +type Params struct { + // BaseFile is the file where we read entries from and add entries to + // the target hosts file. If the name is empty it will not read any entries. + BaseFile string + // ExtraHosts is a slice of entries in the "hostname:ip" format. + // Optional. + ExtraHosts []string + // ContainerIPs should contain the main container ipv4 and ipv6 if available + // with the container name and host name as names set. + // Optional. + ContainerIPs HostEntries + // HostContainersInternalIP is the IP for the host.containers.internal entry. + // Optional. + HostContainersInternalIP string + // TargetFile where the hosts are written to. + TargetFile string +} + +// New will create a new hosts file and write this to the target file. +// This function does not prevent any kind of concurrency problems, it is +// the callers responsibility to avoid concurrent writes to this file. +// The extraHosts are written first, then the hosts from the file baseFile and the +// containerIps. The container ip entry is only added when the name was not already +// added before. +func New(params *Params) error { + if err := new(params); err != nil { + return fmt.Errorf("failed to create new hosts file: %w", err) + } + return nil +} + +// Add adds the given entries to the hosts file, entries are only added if +// they are not already present. +// Add is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Add(file string, entries HostEntries) error { + if err := add(file, entries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// AddIfExists will add the given entries only if one of the existsEntries +// is in the hosts file. This API is required for podman network connect. +// Since we want to add the same host name for each network ip we want to +// add duplicates and the normal Add() call prevents us from doing so. +// However since we also do not want to overwrite potential entries that +// were added by users manually we first have to check if there are the +// current expected entries in the file. Note that this will only check +// for one match not all. It will also only check that the ip and one of +// the hostnames match like Remove(). +func AddIfExists(file string, existsEntries, newEntries HostEntries) error { + if err := addIfExists(file, existsEntries, newEntries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// Remove will remove the given entries from the file. An entry will be +// removed when the ip and at least one name matches. Not all names have +// to match. If the given entries are not present in the file no error is +// returned. +// Remove is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Remove(file string, entries HostEntries) error { + if err := remove(file, entries); err != nil { + return fmt.Errorf("failed to remove entries from hosts file: %w", err) + } + return nil +} + +// new see comment on New() +func new(params *Params) error { + entries, err := parseExtraHosts(params.ExtraHosts) + if err != nil { + return err + } + entries2, err := parseHostsFile(params.BaseFile) + if err != nil { + return err + } + entries = append(entries, entries2...) + + // preallocate the slice with enough space for the 3 special entries below + containerIPs := make(HostEntries, 0, len(params.ContainerIPs)+3) + + // if localhost was not added we add it + // https://github.com/containers/podman/issues/11411 + lh := []string{localhost} + l1 := HostEntry{IP: "127.0.0.1", Names: lh} + l2 := HostEntry{IP: "::1", Names: lh} + containerIPs = append(containerIPs, l1, l2) + if params.HostContainersInternalIP != "" { + e := HostEntry{IP: params.HostContainersInternalIP, Names: []string{hostContainersInternal}} + containerIPs = append(containerIPs, e) + } + containerIPs = append(containerIPs, params.ContainerIPs...) + + if err := writeHostFile(params.TargetFile, entries, containerIPs); err != nil { + return err + } + return nil +} + +// add see comment on Add() +func add(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + names := make(map[string]struct{}) + for _, entry := range currentEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + } + + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + return addEntriesIfNotExists(f, entries, names) +} + +// addIfExists see comment on AddIfExists() +func addIfExists(file string, existsEntries, newEntries HostEntries) error { + // special case when there are no existing entries do a normal add + // this can happen when we connect a network which was not connected + // to any other networks before + if len(existsEntries) == 0 { + return add(file, newEntries) + } + + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + for _, entry := range currentEntries { + if !checkIfEntryExists(entry, existsEntries) { + // keep looking for existing entries + continue + } + // if we have a matching existing entry add the new entries + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + for _, e := range newEntries { + if _, err = f.WriteString(formatLine(e.IP, e.Names)); err != nil { + return err + } + } + return nil + } + // no match found is no error + return nil +} + +// remove see comment on Remove() +func remove(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + for _, entry := range currentEntries { + if checkIfEntryExists(entry, entries) { + continue + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + return nil +} + +func checkIfEntryExists(current HostEntry, entries HostEntries) bool { + // check if the current entry equals one of the given entries + for _, rm := range entries { + if current.IP == rm.IP { + // it is enough if one of the names match, in this case we remove the full entry + for _, name := range current.Names { + if util.StringInSlice(name, rm.Names) { + return true + } + } + } + } + return false +} + +// parseExtraHosts converts a slice of "name:ip" string to entries. +// Because podman and buildah both store the extra hosts in this format +// we convert it here instead of having to this on the caller side. +func parseExtraHosts(extraHosts []string) (HostEntries, error) { + entries := make(HostEntries, 0, len(extraHosts)) + for _, entry := range extraHosts { + values := strings.SplitN(entry, ":", 2) + if len(values) != 2 { + return nil, fmt.Errorf("unable to parse host entry %q: incorrect format", entry) + } + if values[0] == "" { + return nil, fmt.Errorf("hostname in host entry %q is empty", entry) + } + if values[1] == "" { + return nil, fmt.Errorf("IP address in host entry %q is empty", entry) + } + e := HostEntry{IP: values[1], Names: []string{values[0]}} + entries = append(entries, e) + } + return entries, nil +} + +// parseHostsFile parses a given host file and returns all entries in it. +// Note that this will remove all comments and spaces. +func parseHostsFile(file string) (HostEntries, error) { + // empty file is valid, in this case we skip adding entries from the file + if file == "" { + return nil, nil + } + + f, err := os.Open(file) + if err != nil { + // do not error when the default hosts file does not exists + // https://github.com/containers/podman/issues/12667 + if errors.Is(err, os.ErrNotExist) && file == config.DefaultHostsFile { + return nil, nil + } + return nil, err + } + defer f.Close() + + entries := HostEntries{} + scanner := bufio.NewScanner(f) + for scanner.Scan() { + // split of the comments + line := scanner.Text() + if c := strings.IndexByte(line, '#'); c != -1 { + line = line[:c] + } + fields := strings.Fields(line) + // if we only have a ip without names we skip it + if len(fields) < 2 { + continue + } + + e := HostEntry{IP: fields[0], Names: fields[1:]} + entries = append(entries, e) + } + + return entries, scanner.Err() +} + +// writeHostFile write the entries to the given file +func writeHostFile(file string, userEntries, containerIPs HostEntries) error { + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + names := make(map[string]struct{}) + for _, entry := range userEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + + return addEntriesIfNotExists(f, containerIPs, names) +} + +// addEntriesIfNotExists only adds the entries for names that are not already +// in the hosts file, otherwise we start overwriting user entries +func addEntriesIfNotExists(f io.StringWriter, containerIPs HostEntries, names map[string]struct{}) error { + for _, entry := range containerIPs { + freeNames := make([]string, 0, len(entry.Names)) + for _, name := range entry.Names { + if _, ok := names[name]; !ok { + freeNames = append(freeNames, name) + } + } + if len(freeNames) > 0 { + if _, err := f.WriteString(formatLine(entry.IP, freeNames)); err != nil { + return err + } + } + } + return nil +} + +// formatLine converts the given ip and names to a valid hosts line. +// The returned string includes the newline. +func formatLine(ip string, names []string) string { + return ip + "\t" + strings.Join(names, " ") + "\n" +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/ip.go b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go new file mode 100644 index 00000000000..2b8186e72c7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go @@ -0,0 +1,92 @@ +package etchosts + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage/pkg/unshare" +) + +// GetHostContainersInternalIP return the host.containers.internal ip +// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics +func GetHostContainersInternalIP(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork) string { + switch conf.Containers.HostContainersInternalIP { + case "": + // if empty (default) we will automatically choose one below + // if machine using gvproxy we let the gvproxy dns server handle the dns name so do not add it + if machine.IsGvProxyBased() { + return "" + } + case "none": + return "" + default: + return conf.Containers.HostContainersInternalIP + } + ip := "" + // Only use the bridge ip when root, as rootless the interfaces are created + // inside the special netns and not the host so we cannot use them. + if unshare.IsRootless() { + return getLocalIP() + } + for net, status := range netStatus { + network, err := networkInterface.NetworkInspect(net) + // only add the host entry for bridge networks + // ip/macvlan gateway is normally not on the host + if err != nil || network.Driver != types.BridgeNetworkDriver { + continue + } + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + if netAddress.Gateway != nil { + if util.IsIPv4(netAddress.Gateway) { + return netAddress.Gateway.String() + } + // ipv6 address but keep looking since we prefer to use ipv4 + ip = netAddress.Gateway.String() + } + } + } + } + if ip != "" { + return ip + } + return getLocalIP() +} + +// getLocalIP returns the non loopback local IP of the host +func getLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + ip := "" + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { + if util.IsIPv4(ipnet.IP) { + return ipnet.IP.String() + } + // if ipv6 we keep looking for an ipv4 address + ip = ipnet.IP.String() + } + } + return ip +} + +// GetNetworkHostEntries returns HostEntries for all ips in the network status +// with the given hostnames. +func GetNetworkHostEntries(netStatus map[string]types.StatusBlock, names ...string) HostEntries { + hostEntries := make(HostEntries, 0, len(netStatus)) + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + e := HostEntry{IP: netAddress.IPNet.IP.String(), Names: names} + hostEntries = append(hostEntries, e) + } + } + } + return hostEntries +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/util.go b/vendor/github.com/containers/common/libnetwork/etchosts/util.go new file mode 100644 index 00000000000..d78284594bb --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/util.go @@ -0,0 +1,30 @@ +package etchosts + +import ( + "fmt" + + "github.com/containers/common/pkg/config" + securejoin "github.com/cyphar/filepath-securejoin" +) + +// GetBaseHostFile return the hosts file which should be used as base. +// The first param should be the config value config.Containers.BaseHostsFile +// The second param should be the root path to the mounted image. This is +// required when the user conf value is set to "image". +func GetBaseHostFile(confValue, imageRoot string) (string, error) { + switch confValue { + case "": + return config.DefaultHostsFile, nil + case "none": + return "", nil + case "image": + // use secure join to prevent problems with symlinks + path, err := securejoin.SecureJoin(imageRoot, config.DefaultHostsFile) + if err != nil { + return "", fmt.Errorf("failed to get /etc/hosts path in image: %w", err) + } + return path, nil + default: + return confValue, nil + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go new file mode 100644 index 00000000000..bfa72808dda --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -0,0 +1,71 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { + if network.NetworkInterface != "" { + bridges := GetBridgeInterfaceNames(n) + if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + return errors.Errorf("bridge name %s already in use", network.NetworkInterface) + } + if !types.NameRegex.MatchString(network.NetworkInterface) { + return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface) + } + } else { + var err error + network.NetworkInterface, err = GetFreeDeviceName(n) + if err != nil { + return err + } + } + + ipamDriver := network.IPAMOptions[types.Driver] + // also do this when the driver is unset + if ipamDriver == "" || ipamDriver == types.HostLocalIPAMDriver { + if len(network.Subnets) == 0 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + // ipv6 enabled means dual stack, check if we already have + // a ipv4 or ipv6 subnet and add one if not. + if network.IPv6Enabled { + ipv4 := false + ipv6 := false + for _, subnet := range network.Subnets { + if util.IsIPv6(subnet.Subnet.IP) { + ipv6 = true + } + if util.IsIPv4(subnet.Subnet.IP) { + ipv4 = true + } + } + if !ipv4 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + if !ipv6 { + freeSubnet, err := GetFreeIPv6NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + } + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go new file mode 100644 index 00000000000..c1a4bee757b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go @@ -0,0 +1,49 @@ +package util + +import ( + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func CommonNetworkCreate(n NetUtil, network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + var name string + var err error + // validate the name when given + if network.Name != "" { + if !types.NameRegex.MatchString(network.Name) { + return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name) + } + if _, err := n.Network(network.Name); err == nil { + return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name) + } + } else { + name, err = GetFreeDeviceName(n) + if err != nil { + return err + } + network.Name = name + // also use the name as interface name when we create a bridge network + if network.Driver == types.BridgeNetworkDriver && network.NetworkInterface == "" { + network.NetworkInterface = name + } + } + return nil +} + +func IpamNoneDisableDns(network *types.Network) { + if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver { + logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name) + network.DNSEnabled = false + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go new file mode 100644 index 00000000000..650fcb193c9 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -0,0 +1,19 @@ +package util + +import "github.com/containers/common/libnetwork/types" + +// This is a helper package to allow code sharing between the different +// network interfaces. + +// NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing +type NetUtil interface { + // ForEach eaxecutes the given function for each network + ForEach(func(types.Network)) + // Len returns the number of networks + Len() int + // DefaultInterfaceName return the default interface name, this will be suffixed by a number + DefaultInterfaceName() string + // Network returns the network with the given name or ID. + // It returns an error if the network is not found + Network(nameOrID string) (*types.Network, error) +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go new file mode 100644 index 00000000000..20819f75663 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go @@ -0,0 +1,34 @@ +package util + +import "net" + +// getLiveNetworkSubnets returns a slice of subnets representing what the system +// has defined as network interfaces +func getLiveNetworkSubnets() ([]*net.IPNet, error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + nets := make([]*net.IPNet, 0, len(addrs)) + for _, address := range addrs { + _, n, err := net.ParseCIDR(address.String()) + if err != nil { + return nil, err + } + nets = append(nets, n) + } + return nets, nil +} + +// GetLiveNetworkNames returns a list of network interface names on the system +func GetLiveNetworkNames() ([]string, error) { + liveInterfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + interfaceNames := make([]string, 0, len(liveInterfaces)) + for _, i := range liveInterfaces { + interfaceNames = append(interfaceNames, i.Name) + } + return interfaceNames, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go new file mode 100644 index 00000000000..6dc5d932538 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go @@ -0,0 +1,71 @@ +package util + +import ( + "crypto/rand" + "net" + + "github.com/pkg/errors" +) + +func incByte(subnet *net.IPNet, idx int, shift uint) error { + if idx < 0 { + return errors.New("no more subnets left") + } + + var val byte = 1 << shift + // if overflow we have to inc the previous byte + if uint(subnet.IP[idx])+uint(val) > 255 { + if err := incByte(subnet, idx-1, 0); err != nil { + return err + } + } + subnet.IP[idx] += val + return nil +} + +// NextSubnet returns subnet incremented by 1 +func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { + newSubnet := &net.IPNet{ + IP: subnet.IP, + Mask: subnet.Mask, + } + ones, bits := newSubnet.Mask.Size() + if ones == 0 { + return nil, errors.Errorf("%s has only one subnet", subnet.String()) + } + zeroes := uint(bits - ones) + shift := zeroes % 8 + idx := (ones - 1) / 8 + if err := incByte(newSubnet, idx, shift); err != nil { + return nil, err + } + return newSubnet, nil +} + +func NetworkIntersectsWithNetworks(n *net.IPNet, networklist []*net.IPNet) bool { + for _, nw := range networklist { + if networkIntersect(n, nw) { + return true + } + } + return false +} + +func networkIntersect(n1, n2 *net.IPNet) bool { + return n2.Contains(n1.IP) || n1.Contains(n2.IP) +} + +// getRandomIPv6Subnet returns a random internal ipv6 subnet as described in RFC3879. +func getRandomIPv6Subnet() (net.IPNet, error) { + ip := make(net.IP, 8, net.IPv6len) + // read 8 random bytes + _, err := rand.Read(ip) + if err != nil { + return net.IPNet{}, err + } + // first byte must be FD as per RFC3879 + ip[0] = 0xfd + // add 8 zero bytes + ip = append(ip, make([]byte, 8)...) + return net.IPNet{IP: ip, Mask: net.CIDRMask(64, 128)}, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go new file mode 100644 index 00000000000..1f68df0bb0d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go @@ -0,0 +1,37 @@ +package util + +import ( + "strconv" + + "github.com/pkg/errors" +) + +// ParseMTU parses the mtu option +func ParseMTU(mtu string) (int, error) { + if mtu == "" { + return 0, nil // default + } + m, err := strconv.Atoi(mtu) + if err != nil { + return 0, err + } + if m < 0 { + return 0, errors.Errorf("mtu %d is less than zero", m) + } + return m, nil +} + +// ParseVlan parses the vlan option +func ParseVlan(vlan string) (int, error) { + if vlan == "" { + return 0, nil // default + } + v, err := strconv.Atoi(vlan) + if err != nil { + return 0, err + } + if v < 0 || v > 4094 { + return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) + } + return v, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go new file mode 100644 index 00000000000..6b76a700b79 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -0,0 +1,131 @@ +package util + +import ( + "errors" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" + "github.com/sirupsen/logrus" +) + +// GetBridgeInterfaceNames returns all bridge interface names +// already used by network configs +func GetBridgeInterfaceNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetUsedNetworkNames returns all network names already used +// by network configs +func GetUsedNetworkNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetFreeDeviceName returns a free device name which can +// be used for new configs as name and bridge interface name. +// The base name is suffixed by a number +func GetFreeDeviceName(n NetUtil) (string, error) { + bridgeNames := GetBridgeInterfaceNames(n) + netNames := GetUsedNetworkNames(n) + liveInterfaces, err := GetLiveNetworkNames() + if err != nil { + return "", nil + } + names := make([]string, 0, len(bridgeNames)+len(netNames)+len(liveInterfaces)) + names = append(names, bridgeNames...) + names = append(names, netNames...) + names = append(names, liveInterfaces...) + // FIXME: Is a limit fine? + // Start by 1, 0 is reserved for the default network + for i := 1; i < 1000000; i++ { + deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) + if !util.StringInSlice(deviceName, names) { + logrus.Debugf("found free device name %s", deviceName) + return deviceName, nil + } + } + return "", errors.New("could not find free device name, to many iterations") +} + +// GetUsedSubnets returns a list of all used subnets by network +// configs and interfaces on the host. +func GetUsedSubnets(n NetUtil) ([]*net.IPNet, error) { + // first, load all used subnets from network configs + subnets := make([]*net.IPNet, 0, n.Len()) + n.ForEach(func(n types.Network) { + for i := range n.Subnets { + subnets = append(subnets, &n.Subnets[i].Subnet.IPNet) + } + }) + // second, load networks from the current system + liveSubnets, err := getLiveNetworkSubnets() + if err != nil { + return nil, err + } + return append(subnets, liveSubnets...), nil +} + +// GetFreeIPv4NetworkSubnet returns a unused ipv4 subnet +func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) (*types.Subnet, error) { + var err error + for _, pool := range subnetPools { + // make sure to copy the netip to prevent overwriting the subnet pool + netIP := make(net.IP, net.IPv4len) + copy(netIP, pool.Base.IP) + network := &net.IPNet{ + IP: netIP, + Mask: net.CIDRMask(pool.Size, 32), + } + for pool.Base.Contains(network.IP) { + if !NetworkIntersectsWithNetworks(network, usedNetworks) { + logrus.Debugf("found free ipv4 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: *network}, + }, nil + } + network, err = NextSubnet(network) + if err != nil { + // when error go to next pool, we return the error only when all pools are done + break + } + } + } + + if err != nil { + return nil, err + } + return nil, errors.New("could not find free subnet from subnet pools") +} + +// GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet +func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // FIXME: Is 10000 fine as limit? We should prevent an endless loop. + for i := 0; i < 10000; i++ { + // RFC4193: Choose the ipv6 subnet random and NOT sequentially. + network, err := getRandomIPv6Subnet() + if err != nil { + return nil, err + } + if intersectsConfig := NetworkIntersectsWithNetworks(&network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv6 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: network}, + }, nil + } + } + return nil, errors.New("failed to get random ipv6 subnet") +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go new file mode 100644 index 00000000000..4dd44110aaa --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -0,0 +1,124 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" +) + +// ValidateSubnet will validate a given Subnet. It checks if the +// given gateway and lease range are part of this subnet. If the +// gateway is empty and addGateway is true it will get the first +// available ip in the subnet assigned. +func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) error { + if s == nil { + return errors.New("subnet is nil") + } + if s.Subnet.IP == nil { + return errors.New("subnet ip is nil") + } + + // Reparse to ensure subnet is valid. + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the subnet. + _, n, err := net.ParseCIDR(s.Subnet.String()) + if err != nil { + return errors.Wrap(err, "subnet invalid") + } + + // check that the new subnet does not conflict with existing ones + if NetworkIntersectsWithNetworks(n, usedNetworks) { + return errors.Errorf("subnet %s is already used on the host or by another config", n.String()) + } + + s.Subnet = types.IPNet{IPNet: *n} + if s.Gateway != nil { + if !s.Subnet.Contains(s.Gateway) { + return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) + } + util.NormalizeIP(&s.Gateway) + } else if addGateway { + ip, err := util.FirstIPInSubnet(n) + if err != nil { + return err + } + s.Gateway = ip + } + + if s.LeaseRange != nil { + if s.LeaseRange.StartIP != nil { + if !s.Subnet.Contains(s.LeaseRange.StartIP) { + return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.StartIP) + } + if s.LeaseRange.EndIP != nil { + if !s.Subnet.Contains(s.LeaseRange.EndIP) { + return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.EndIP) + } + } + return nil +} + +// ValidateSubnets will validate the subnets for this network. +// It also sets the gateway if the gateway is empty and addGateway is set to true +// IPv6Enabled to true if at least one subnet is ipv6. +func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*net.IPNet) error { + for i := range network.Subnets { + err := ValidateSubnet(&network.Subnets[i], addGateway, usedNetworks) + if err != nil { + return err + } + if util.IsIPv6(network.Subnets[i].Subnet.IP) { + network.IPv6Enabled = true + } + } + return nil +} + +func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error { + if namespacePath == "" { + return errors.New("namespacePath is empty") + } + if options.ContainerID == "" { + return errors.New("ContainerID is empty") + } + if len(options.Networks) == 0 { + return errors.New("must specify at least one network") + } + for name, netOpts := range options.Networks { + netOpts := netOpts + network, err := n.Network(name) + if err != nil { + return err + } + err = validatePerNetworkOpts(network, &netOpts) + if err != nil { + return err + } + } + return nil +} + +// validatePerNetworkOpts checks that all given static ips are in a subnet on this network +func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error { + if netOpts.InterfaceName == "" { + return errors.Errorf("interface name on network %s is empty", network.Name) + } + if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver { + outer: + for _, ip := range netOpts.StaticIPs { + for _, s := range network.Subnets { + if s.Subnet.Contains(ip) { + continue outer + } + } + return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go new file mode 100644 index 00000000000..0bb0539b294 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -0,0 +1,280 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "net" + "os" + "path/filepath" + "time" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.Name] = network + return *network, nil +} + +func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*types.Network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + if !defaultNet { + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + // generate random network ID + var i int + for i = 0; i < 1000; i++ { + id := stringid.GenerateNonCryptoID() + if _, err := n.getNetwork(id); err != nil { + newNetwork.ID = id + break + } + } + if i == 1000 { + return nil, errors.New("failed to create random network ID") + } + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) + if err != nil { + return nil, err + } + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range newNetwork.Options { + switch key { + case "mtu": + _, err = internalutil.ParseMTU(value) + if err != nil { + return nil, err + } + + case "vlan": + _, err = internalutil.ParseVlan(value) + if err != nil { + return nil, err + } + + default: + return nil, errors.Errorf("unsupported bridge network option %s", key) + } + } + case types.MacVLANNetworkDriver: + err = createMacvlan(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDns(newNetwork) + + // add gateway when not internal or dns enabled + addGateway := !newNetwork.Internal || newNetwork.DNSEnabled + err = internalutil.ValidateSubnets(newNetwork, addGateway, usedNetworks) + if err != nil { + return nil, err + } + + newNetwork.Created = time.Now() + + if !defaultNet { + confPath := filepath.Join(n.networkConfigDir, newNetwork.Name+".json") + f, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(newNetwork) + if err != nil { + return nil, err + } + } + + return newNetwork, nil +} + +func createMacvlan(network *types.Network) error { + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + + // we already validated the drivers before so we just have to set the default here + switch network.IPAMOptions[types.Driver] { + case "": + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark") + } + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set") + } + } + + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range network.Options { + switch key { + case "mode": + if !util.StringInSlice(value, types.ValidMacVLANModes) { + return errors.Errorf("unknown macvlan mode %q", value) + } + case "mtu": + _, err := internalutil.ParseMTU(value) + if err != nil { + return err + } + default: + return errors.Errorf("unsupported macvlan network option %s", key) + } + } + return nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + file := filepath.Join(n.networkConfigDir, network.Name+".json") + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + delete(n.networks, network.Name) + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *netavarkNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net) { + continue outer + } + } + networks = append(networks, *net) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network, nil +} + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return errors.New("none ipam driver is set but subnets are given") + } + case types.DHCPIPAMDriver: + return errors.New("dhcp ipam driver is not yet supported with netavark") + default: + return errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go new file mode 100644 index 00000000000..29a7b4f2a58 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -0,0 +1,6 @@ +//go:build linux +// +build linux + +package netavark + +const defaultBridgeName = "podman" diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go new file mode 100644 index 00000000000..ac87c5438be --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -0,0 +1,162 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/exec" + "strconv" + + "github.com/sirupsen/logrus" +) + +type netavarkError struct { + exitCode int + // Set the json key to "error" so we can directly unmarshal into this struct + Msg string `json:"error"` + err error +} + +func (e *netavarkError) Error() string { + ec := "" + // only add the exit code the the error message if we have at least info log level + // the normal user does not need to care about the number + if e.exitCode > 0 && logrus.IsLevelEnabled(logrus.InfoLevel) { + ec = " (exit code " + strconv.Itoa(e.exitCode) + ")" + } + msg := "netavark" + ec + if len(msg) > 0 { + msg += ": " + e.Msg + } + if e.err != nil { + msg += ": " + e.err.Error() + } + return msg +} + +func (e *netavarkError) Unwrap() error { + return e.err +} + +func newNetavarkError(msg string, err error) error { + return &netavarkError{ + Msg: msg, + err: err, + } +} + +// Type to implement io.Writer interface +// This will write the logrus at info level +type logrusNetavarkWriter struct{} + +func (l *logrusNetavarkWriter) Write(b []byte) (int, error) { + logrus.Info("netavark: ", string(b)) + return len(b), nil +} + +// getRustLogEnv returns the RUST_LOG env var based on the current logrus level +func getRustLogEnv() string { + level := logrus.GetLevel().String() + // rust env_log uses warn instead of warning + if level == "warning" { + level = "warn" + } + // the rust netlink library is very verbose + // make sure to only log netavark logs + return "RUST_LOG=netavark=" + level +} + +// execNetavark will execute netavark with the following arguments +// It takes the path to the binary, the list of args and an interface which is +// marshaled to json and send via stdin to netavark. The result interface is +// used to marshal the netavark output into it. This can be nil. +// All errors return by this function should be of the type netavarkError +// to provide a helpful error message. +func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{}) error { + stdinR, stdinW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdin pipe", err) + } + stdinWClosed := false + defer func() { + stdinR.Close() + if !stdinWClosed { + stdinW.Close() + } + }() + + stdoutR, stdoutW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdout pipe", err) + } + stdoutWClosed := false + defer func() { + stdoutR.Close() + if !stdoutWClosed { + stdoutW.Close() + } + }() + + // connect stderr to the podman stderr for logging + var logWriter io.Writer = os.Stderr + if n.syslog { + // connect logrus to stderr as well so that the logs will be written to the syslog as well + logWriter = io.MultiWriter(logWriter, &logrusNetavarkWriter{}) + } + + cmd := exec.Command(n.netavarkBinary, append(n.getCommonNetavarkOptions(), args...)...) + // connect the pipes to stdin and stdout + cmd.Stdin = stdinR + cmd.Stdout = stdoutW + cmd.Stderr = logWriter + // set the netavark log level to the same as the podman + cmd.Env = append(os.Environ(), getRustLogEnv()) + // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1") + } + + err = cmd.Start() + if err != nil { + return newNetavarkError("failed to start process", err) + } + err = json.NewEncoder(stdinW).Encode(stdin) + // we have to close stdinW so netavark gets the EOF and does not hang forever + stdinW.Close() + stdinWClosed = true + if err != nil { + return newNetavarkError("failed to encode stdin data", err) + } + + dec := json.NewDecoder(stdoutR) + + err = cmd.Wait() + // we have to close stdoutW so we can decode the json without hanging forever + stdoutW.Close() + stdoutWClosed = true + if err != nil { + exitError := &exec.ExitError{} + if errors.As(err, &exitError) { + ne := &netavarkError{} + // lets disallow unknown fields to make sure we do not get some unexpected stuff + dec.DisallowUnknownFields() + // this will unmarshal the error message into the error struct + ne.err = dec.Decode(ne) + ne.exitCode = exitError.ExitCode() + return ne + } + return newNetavarkError("unexpected failure during execution", err) + } + + if result != nil { + err = dec.Decode(result) + if err != nil { + return newNetavarkError("failed to decode result", err) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go new file mode 100644 index 00000000000..861854351d6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -0,0 +1,371 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" + "go.etcd.io/bbolt" +) + +// IPAM boltdb structure +// Each network has their own bucket with the network name as bucket key. +// Inside the network bucket there is an ID bucket which maps the container ID (key) +// to a json array of ip addresses (value). +// The network bucket also has a bucket for each subnet, the subnet is used as key. +// Inside the subnet bucket an ip is used as key and the container ID as value. + +const ( + idBucket = "ids" + // lastIP this is used as key to store the last allocated ip + // note that this string should not be 4 or 16 byte long + lastIP = "lastIP" +) + +var ( + idBucketKey = []byte(idBucket) + lastIPKey = []byte(lastIP) +) + +type ipamError struct { + msg string + cause error +} + +func (e *ipamError) Error() string { + msg := "IPAM error" + if e.msg != "" { + msg += ": " + e.msg + } + if e.cause != nil { + msg += ": " + e.cause.Error() + } + return msg +} + +func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { + return &ipamError{ + msg: fmt.Sprintf(msg, args...), + cause: cause, + } +} + +// openDB will open the ipam database +// Note that the caller has to Close it. +func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { + db, err := bbolt.Open(n.ipamDBPath, 0o600, nil) + if err != nil { + return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath) + } + return db, nil +} + +// allocIPs will allocate ips for the the container. It will change the +// NetworkOptions in place. When static ips are given it will validate +// that these are free to use and will allocate them to the container. +func (n *netavarkNetwork) allocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + + // create/get network bucket + netBkt, err := tx.CreateBucketIfNotExists([]byte(netName)) + if err != nil { + return newIPAMError(err, "failed to create/get network bucket for network %s", netName) + } + + // requestIPs is the list of ips which should be used for this container + requestIPs := make([]net.IP, 0, len(network.Subnets)) + + for i := range network.Subnets { + subnetBkt, err := netBkt.CreateBucketIfNotExists([]byte(network.Subnets[i].Subnet.String())) + if err != nil { + return newIPAMError(err, "failed to create/get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if network.Subnets[i].Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + + // when static ip is requested for this network + if ip != nil { + // convert to 4 byte ipv4 if needed + util.NormalizeIP(&ip) + id := subnetBkt.Get(ip) + if id != nil { + return newIPAMError(nil, "requested ip address %s is already allocated to container ID %s", ip.String(), string(id)) + } + } else { + ip, err = getFreeIPFromBucket(subnetBkt, &network.Subnets[i]) + if err != nil { + return err + } + err = subnetBkt.Put(lastIPKey, ip) + if err != nil { + return newIPAMError(err, "failed to store last ip in database") + } + } + + err = subnetBkt.Put(ip, []byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to store ip in database") + } + + requestIPs = append(requestIPs, ip) + } + + idsBucket, err := netBkt.CreateBucketIfNotExists(idBucketKey) + if err != nil { + return newIPAMError(err, "failed to create/get id bucket for network %s", netName) + } + + ipsBytes, err := json.Marshal(requestIPs) + if err != nil { + return newIPAMError(err, "failed to marshal ips") + } + + err = idsBucket.Put([]byte(opts.ContainerID), ipsBytes) + if err != nil { + return newIPAMError(err, "failed to store ips in database") + } + + netOpts.StaticIPs = requestIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, error) { + var rangeStart net.IP + var rangeEnd net.IP + if subnet.LeaseRange != nil { + rangeStart = subnet.LeaseRange.StartIP + rangeEnd = subnet.LeaseRange.EndIP + } + + if rangeStart == nil { + // let start with the first ip in subnet + rangeStart = util.NextIP(subnet.Subnet.IP) + } + + if rangeEnd == nil { + lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet) + // this error should never happen but lets check anyways to prevent panics + if err != nil { + return nil, errors.Wrap(err, "failed to get lastIP") + } + // ipv4 uses the last ip in a subnet for broadcast so we cannot use it + if util.IsIPv4(lastIP) { + lastIP = util.PrevIP(lastIP) + } + rangeEnd = lastIP + } + + lastIPByte := bucket.Get(lastIPKey) + curIP := net.IP(lastIPByte) + if curIP == nil { + curIP = rangeStart + } else { + curIP = util.NextIP(curIP) + } + + // store the start ip to make sure we know when we looped over all available ips + startIP := curIP + + for { + // skip the gateway + if subnet.Gateway != nil { + if util.Cmp(curIP, subnet.Gateway) == 0 { + curIP = util.NextIP(curIP) + continue + } + } + + // if we are at the end we need to jump back to the start ip + if util.Cmp(curIP, rangeEnd) > 0 { + if util.Cmp(rangeStart, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + curIP = rangeStart + continue + } + + // check if ip is already used by another container + // if not return it + if bucket.Get(curIP) == nil { + return curIP, nil + } + + curIP = util.NextIP(curIP) + + if util.Cmp(curIP, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + } +} + +// getAssignedIPs will read the ipam database and will fill in the used ips for this container. +// It will change the NetworkOptions in place. +func (n *netavarkNetwork) getAssignedIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.View(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + ipJSON := idBkt.Get([]byte(opts.ContainerID)) + if ipJSON == nil { + return newIPAMError(nil, "failed to get ips for container ID %s on network %s", opts.ContainerID, netName) + } + + // assignedIPs is the list of ips which should be used for this container + assignedIPs := make([]net.IP, 0, len(network.Subnets)) + + err = json.Unmarshal(ipJSON, &assignedIPs) + if err != nil { + return newIPAMError(err, "failed to unmarshal ips from database") + } + + for i := range assignedIPs { + util.NormalizeIP(&assignedIPs[i]) + } + + netOpts.StaticIPs = assignedIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +// deallocIPs will release the ips in the network options from the DB so that +// they can be reused by other containers. It expects that the network options +// are already filled with the correct ips. Use getAssignedIPs() for this. +func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + for _, subnet := range network.Subnets { + subnetBkt := netBkt.Bucket([]byte(subnet.Subnet.String())) + if subnetBkt == nil { + return newIPAMError(nil, "failed to get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if subnet.Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + if ip == nil { + return newIPAMError(nil, "failed to find ip for subnet %s on network %s", subnet.Subnet.String(), netName) + } + util.NormalizeIP(&ip) + + err = subnetBkt.Delete(ip) + if err != nil { + return newIPAMError(err, "failed to remove ip %s from subnet bucket for network %s", ip.String(), netName) + } + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + err = idBkt.Delete([]byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to remove allocated ips for container ID %s on network %s", opts.ContainerID, netName) + } + } + return nil + }) + return err +} + +// requiresIPAMAlloc return true when we have to allocate ips for this network +// it checks the ipam driver and if subnets are set +func requiresIPAMAlloc(network *types.Network) bool { + // only do host allocation when driver is set to HostLocalIPAMDriver or unset + switch network.IPAMOptions[types.Driver] { + case "", types.HostLocalIPAMDriver: + default: + return false + } + + // no subnets == no ips to assign + return len(network.Subnets) > 0 +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go new file mode 100644 index 00000000000..8e7576a56e4 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -0,0 +1,323 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkNetwork struct { + // networkConfigDir is directory where the network config files are stored. + networkConfigDir string + + // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc + networkRunDir string + + // tells netavark whether this is rootless mode or rootful, "true" or "false" + networkRootless bool + + // netavarkBinary is the path to the netavark binary. + netavarkBinary string + // aardvarkBinary is the path to the aardvark binary. + aardvarkBinary string + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + + // ipamDBPath is the path to the ip allocation bolt db + ipamDBPath string + + // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + syslog bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*types.Network +} + +type InitConfig struct { + // NetworkConfigDir is directory where the network config files are stored. + NetworkConfigDir string + + // NetavarkBinary is the path to the netavark binary. + NetavarkBinary string + // AardvarkBinary is the path to the aardvark binary. + AardvarkBinary string + + // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config + NetworkRunDir string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + + // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + Syslog bool +} + +// NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. +// Note: The networks are not loaded from disk until a method is called. +func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.NetworkConfigDir, "netavark.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil { + return nil, err + } + + if err := os.MkdirAll(conf.NetworkRunDir, 0o755); err != nil { + return nil, err + } + + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools + } + + n := &netavarkNetwork{ + networkConfigDir: conf.NetworkConfigDir, + networkRunDir: conf.NetworkRunDir, + netavarkBinary: conf.NetavarkBinary, + aardvarkBinary: conf.AardvarkBinary, + networkRootless: unshare.IsRootless(), + ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + lock: lock, + syslog: conf.Syslog, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *netavarkNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver} +} + +// DefaultNetworkName will return the default netavark network name. +func (n *netavarkNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *netavarkNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.networkConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + files, err := ioutil.ReadDir(n.networkConfigDir) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + networks := make(map[string]*types.Network, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + if filepath.Ext(f.Name()) != ".json" { + continue + } + + path := filepath.Join(n.networkConfigDir, f.Name()) + file, err := os.Open(path) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading network config file %q: %v", path, err) + } + continue + } + network := new(types.Network) + err = json.NewDecoder(file).Decode(network) + if err != nil { + logrus.Warnf("Error reading network config file %q: %v", path, err) + continue + } + + // check that the filename matches the network name + if network.Name+".json" != f.Name() { + logrus.Warnf("Network config name %q does not match file name %q, skipping", network.Name, f.Name()) + continue + } + + if !types.NameRegex.MatchString(network.Name) { + logrus.Warnf("Network config %q has invalid name: %q, skipping: %v", path, network.Name, types.RegexError) + continue + } + + err = parseNetwork(network) + if err != nil { + logrus.Warnf("Network config %q could not be parsed, skipping: %v", path, err) + continue + } + + logrus.Debugf("Successfully loaded network %s: %v", network.Name, network) + networks[network.Name] = network + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func parseNetwork(network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + if len(network.ID) != 64 { + return errors.Errorf("invalid network ID %q", network.ID) + } + + // add gateway when not internal or dns enabled + addGateway := !network.Internal || network.DNSEnabled + return util.ValidateSubnets(network, addGateway, nil) +} + +func (n *netavarkNetwork) createDefaultNetwork() (*types.Network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: defaultBridgeName + "0", + // Important do not change this ID + ID: "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *types.Network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *netavarkNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val) + } +} + +// Len return the number of networks +func (n *netavarkNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *netavarkNetwork) DefaultInterfaceName() string { + return defaultBridgeName +} + +func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go new file mode 100644 index 00000000000..c5aa181fd32 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -0,0 +1,143 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkOptions struct { + types.NetworkOptions + Networks map[string]*types.Network `json:"network_info"` +} + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // allocate IPs in the IPAM db + err = n.allocIPs(&options.NetworkOptions) + if err != nil { + return nil, err + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return nil, errors.Wrap(err, "failed to convert net opts") + } + + // Warn users if one or more networks have dns enabled + // but aardvark-dns binary is not configured + for _, network := range netavarkOpts.Networks { + if network != nil && network.DNSEnabled && n.aardvarkBinary == "" { + // this is not a fatal error we can still use container without dns + logrus.Warnf("aardvark-dns binary not found, container dns will not be enabled") + break + } + } + + // trace output to get the json + if logrus.IsLevelEnabled(logrus.TraceLevel) { + b, err := json.Marshal(&netavarkOpts) + if err != nil { + return nil, err + } + // show the full netavark command so we can easily reproduce errors from the cli + logrus.Tracef("netavark command: printf '%s' | %s setup %s", string(b), n.netavarkBinary, namespacePath) + } + + result := map[string]types.StatusBlock{} + err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return nil, err + } + + // make sure that the result makes sense + if len(result) != len(options.Networks) { + logrus.Errorf("unexpected netavark result: %v", result) + return nil, fmt.Errorf("unexpected netavark result length, want (%d), got (%d) networks", len(options.Networks), len(result)) + } + + return result, err +} + +// Teardown will teardown the container network namespace. +func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + // get IPs from the IPAM db + err = n.getAssignedIPs(&options.NetworkOptions) + if err != nil { + // when there is an error getting the ips we should still continue + // to call teardown for netavark to prevent leaking network interfaces + logrus.Error(err) + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return errors.Wrap(err, "failed to convert net opts") + } + + retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil) + + // when netavark returned an error we still free the used ips + // otherwise we could end up in a state where block the ips forever + err = n.deallocIPs(&netavarkOpts.NetworkOptions) + if err != nil { + if retErr != nil { + logrus.Error(err) + } else { + retErr = err + } + } + + return retErr +} + +func (n *netavarkNetwork) getCommonNetavarkOptions() []string { + return []string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "--aardvark-binary=" + n.aardvarkBinary} +} + +func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOptions, error) { + netavarkOptions := netavarkOptions{ + NetworkOptions: opts, + Networks: make(map[string]*types.Network, len(opts.Networks)), + } + + for network := range opts.Networks { + net, err := n.getNetwork(network) + if err != nil { + return nil, err + } + netavarkOptions.Networks[network] = net + } + return &netavarkOptions, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go new file mode 100644 index 00000000000..893bdea2ef3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -0,0 +1,202 @@ +//go:build linux +// +build linux + +package network + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/netavark" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +const ( + // defaultNetworkBackendFileName is the file name for sentinel file to store the backend + defaultNetworkBackendFileName = "defaultNetworkBackend" + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/etc/cni/net.d/" + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/run/containers/networks" + + // netavarkBinary is the name of the netavark binary + netavarkBinary = "netavark" + // aardvarkBinary is the name of the aardvark binary + aardvarkBinary = "aardvark-dns" +) + +// NetworkBackend returns the network backend name and interface +// It returns either the CNI or netavark backend depending on what is set in the config. +// If the the backend is set to "" we will automatically assign the backend on the following conditions: +// 1. read ${graphroot}/defaultNetworkBackend +// 2. find netavark binary (if not installed use CNI) +// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI +func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + backend := types.NetworkBackend(conf.Network.NetworkBackend) + if backend == "" { + var err error + backend, err = defaultNetworkBackend(store, conf) + if err != nil { + return "", nil, fmt.Errorf("failed to get default network backend: %w", err) + } + } + + switch backend { + case types.Netavark: + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return "", nil, err + } + + aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) + + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } + + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") + } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + Syslog: syslog, + }) + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} + +func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + // read defaultNetworkBackend file + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) + b, err := ioutil.ReadFile(file) + if err == nil { + val := string(b) + if val == string(types.Netavark) { + return types.Netavark, nil + } + if val == string(types.CNI) { + return types.CNI, nil + } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) + } + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) + } + + // cache the network backend to make sure always the same one will be used + defer func() { + // only write when there is no error + if err == nil { + if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil { + logrus.Errorf("could not write network backend to file: %v", err) + } + } + }() + + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // now check if there are already containers, images and CNI networks (new install?) + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) == 0 { + imgs, err := store.Images() + if err != nil { + return "", err + } + if len(imgs) == 0 { + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check <= 1 + if err == nil && len(nets) <= 1 { + // we have a fresh system so use netavark + return types.Netavark, nil + } + } + } + } + return types.CNI, nil +} + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + CNIConfigDir: confDir, + CNIPluginDirs: conf.Network.CNIPluginDirs, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + IsMachine: machine.IsGvProxyBased(), + }) +} + +func getDefultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +// getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will +// use "/etc/containers/networks" and for rootless "$graphroot/networks". We cannot +// use the graphroot for rootful since the network namespace is shared for all +// libpod instances. +func getDefaultNetavarkConfigDir(store storage.Store) string { + if !unshare.IsRootless() { + return netavarkConfigDir + } + return filepath.Join(store.GraphRoot(), "networks") +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go new file mode 100644 index 00000000000..a1e4d71edd8 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -0,0 +1,50 @@ +package types + +const ( + // BridgeNetworkDriver defines the bridge driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan driver + MacVLANNetworkDriver = "macvlan" + // MacVLANNetworkDriver defines the macvlan driver + IPVLANNetworkDriver = "ipvlan" + + // IPAM drivers + Driver = "driver" + // HostLocalIPAMDriver store the ip locally in a db + HostLocalIPAMDriver = "host-local" + // DHCPIPAMDriver get subnet and ip from dhcp server + DHCPIPAMDriver = "dhcp" + // NoneIPAMDriver do not provide ipam management + NoneIPAMDriver = "none" + + // DefaultSubnet is the name that will be used for the default CNI network. + DefaultNetworkName = "podman" + // DefaultSubnet is the subnet that will be used for the default CNI network. + DefaultSubnet = "10.88.0.0/16" + + // valid macvlan driver mode values + MacVLANModeBridge = "bridge" + MacVLANModePrivate = "private" + MacVLANModeVepa = "vepa" + MacVLANModePassthru = "passthru" + + // valid ipvlan driver modes + IPVLANModeL2 = "l2" + IPVLANModeL3 = "l3" + IPVLANModeL3s = "l3s" +) + +type NetworkBackend string + +const ( + CNI NetworkBackend = "cni" + Netavark NetworkBackend = "netavark" +) + +// ValidMacVLANModes is the list of valid mode options for the macvlan driver +var ValidMacVLANModes = []string{MacVLANModeBridge, MacVLANModePrivate, MacVLANModeVepa, MacVLANModePassthru} + +// ValidIPVLANModes is the list of valid mode options for the ipvlan driver +var ValidIPVLANModes = []string{IPVLANModeL2, IPVLANModeL3, IPVLANModeL3s} diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go new file mode 100644 index 00000000000..d37e529dfe6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -0,0 +1,25 @@ +package types + +import ( + "regexp" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = errors.New("network not found") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = errors.New("network already exists") + + // NameRegex is a regular expression to validate names. + // This must NOT be changed. + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + // RegexError is thrown in presence of an invalid name. + RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") +) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go new file mode 100644 index 00000000000..de865537738 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -0,0 +1,282 @@ +package types + +import ( + "encoding/json" + "net" + "time" +) + +type ContainerNetwork interface { + // NetworkCreate will take a partial filled Network and fill the + // missing fields. It creates the Network and returns the full Network. + NetworkCreate(Network) (Network, error) + // NetworkRemove will remove the Network with the given name or ID. + NetworkRemove(nameOrID string) error + // NetworkList will return all known Networks. Optionally you can + // supply a list of filter functions. Only if a network matches all + // functions it is returned. + NetworkList(...FilterFunc) ([]Network, error) + // NetworkInspect will return the Network with the given name or ID. + NetworkInspect(nameOrID string) (Network, error) + + // Setup will setup the container network namespace. It returns + // a map of StatusBlocks, the key is the network name. + Setup(namespacePath string, options SetupOptions) (map[string]StatusBlock, error) + // Teardown will teardown the container network namespace. + Teardown(namespacePath string, options TeardownOptions) error + + // Drivers will return the list of supported network drivers + // for this interface. + Drivers() []string + + // DefaultNetworkName will return the default network name + // for this interface. + DefaultNetworkName() string +} + +// Network describes the Network attributes. +type Network struct { + // Name of the Network. + Name string `json:"name"` + // ID of the Network. + ID string `json:"id"` + // Driver for this Network, e.g. bridge, macvlan... + Driver string `json:"driver"` + // NetworkInterface is the network interface name on the host. + NetworkInterface string `json:"network_interface,omitempty"` + // Created contains the timestamp when this network was created. + Created time.Time `json:"created,omitempty"` + // Subnets to use for this network. + Subnets []Subnet `json:"subnets,omitempty"` + // IPv6Enabled if set to true an ipv6 subnet should be created for this net. + IPv6Enabled bool `json:"ipv6_enabled"` + // Internal is whether the Network should not have external routes + // to public or other Networks. + Internal bool `json:"internal"` + // DNSEnabled is whether name resolution is active for container on + // this Network. + DNSEnabled bool `json:"dns_enabled"` + // Labels is a set of key-value labels that have been applied to the + // Network. + Labels map[string]string `json:"labels,omitempty"` + // Options is a set of key-value options that have been applied to + // the Network. + Options map[string]string `json:"options,omitempty"` + // IPAMOptions contains options used for the ip assignment. + IPAMOptions map[string]string `json:"ipam_options,omitempty"` +} + +// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. +type IPNet struct { + net.IPNet +} + +// ParseCIDR parse a string to IPNet +func ParseCIDR(cidr string) (IPNet, error) { + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return IPNet{}, err + } + // convert to 4 bytes if ipv4 + ipv4 := ip.To4() + if ipv4 != nil { + ip = ipv4 + } + subnet.IP = ip + return IPNet{*subnet}, err +} + +func (n *IPNet) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +func (n *IPNet) UnmarshalText(text []byte) error { + subnet, err := ParseCIDR(string(text)) + if err != nil { + return err + } + *n = subnet + return nil +} + +// HardwareAddr is the same as net.HardwareAddr except +// that it adds the json marshal/unmarshal methods. +// This allows us to read the mac from a json string +// and a byte array. +// swagger:model MacAddress +type HardwareAddr net.HardwareAddr + +func (h *HardwareAddr) String() string { + return (*net.HardwareAddr)(h).String() +} + +func (h HardwareAddr) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *HardwareAddr) UnmarshalJSON(text []byte) error { + if len(text) == 0 { + *h = nil + return nil + } + + // if the json string start with a quote we got a string + // unmarshal the string and parse the mac from this string + if string(text[0]) == `"` { + var macString string + err := json.Unmarshal(text, &macString) + if err == nil { + mac, err := net.ParseMAC(macString) + if err == nil { + *h = HardwareAddr(mac) + return nil + } + } + } + // not a string or got an error fallback to the normal parsing + mac := make(net.HardwareAddr, 0, 6) + // use the standard json unmarshal for backwards compat + err := json.Unmarshal(text, &mac) + if err != nil { + return err + } + *h = HardwareAddr(mac) + return nil +} + +type Subnet struct { + // Subnet for this Network in CIDR form. + // swagger:strfmt string + Subnet IPNet `json:"subnet"` + // Gateway IP for this Network. + // swagger:strfmt string + Gateway net.IP `json:"gateway,omitempty"` + // LeaseRange contains the range where IP are leased. Optional. + LeaseRange *LeaseRange `json:"lease_range,omitempty"` +} + +// LeaseRange contains the range where IP are leased. +type LeaseRange struct { + // StartIP first IP in the subnet which should be used to assign ips. + // swagger:strfmt string + StartIP net.IP `json:"start_ip,omitempty"` + // EndIP last IP in the subnet which should be used to assign ips. + // swagger:strfmt string + EndIP net.IP `json:"end_ip,omitempty"` +} + +// StatusBlock contains the network information about a container +// connected to one Network. +type StatusBlock struct { + // Interfaces contains the created network interface in the container. + // The map key is the interface name. + Interfaces map[string]NetInterface `json:"interfaces,omitempty"` + // DNSServerIPs nameserver addresses which should be added to + // the containers resolv.conf file. + DNSServerIPs []net.IP `json:"dns_server_ips,omitempty"` + // DNSSearchDomains search domains which should be added to + // the containers resolv.conf file. + DNSSearchDomains []string `json:"dns_search_domains,omitempty"` +} + +// NetInterface contains the settings for a given network interface. +type NetInterface struct { + // Subnets list of assigned subnets with their gateway. + Subnets []NetAddress `json:"subnets,omitempty"` + // MacAddress for this Interface. + MacAddress HardwareAddr `json:"mac_address"` +} + +// NetAddress contains the ip address, subnet and gateway. +type NetAddress struct { + // IPNet of this NetAddress. Note that this is a subnet but it has to contain the + // actual ip of the network interface and not the network address. + IPNet IPNet `json:"ipnet"` + // Gateway for the network. This can be empty if there is no gateway, e.g. internal network. + Gateway net.IP `json:"gateway,omitempty"` +} + +// PerNetworkOptions are options which should be set on a per network basis. +type PerNetworkOptions struct { + // StaticIPs for this container. Optional. + StaticIPs []net.IP `json:"static_ips,omitempty"` + // Aliases contains a list of names which the dns server should resolve + // to this container. Should only be set when DNSEnabled is true on the Network. + // If aliases are set but there is no dns support for this network the + // network interface implementation should ignore this and NOT error. + // Optional. + Aliases []string `json:"aliases,omitempty"` + // StaticMac for this container. Optional. + StaticMAC HardwareAddr `json:"static_mac,omitempty"` + // InterfaceName for this container. Required in the backend. + // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. + InterfaceName string `json:"interface_name"` +} + +// NetworkOptions for a given container. +type NetworkOptions struct { + // ContainerID is the container id, used for iptables comments and ipam allocation. + ContainerID string `json:"container_id"` + // ContainerName is the container name, used as dns name. + ContainerName string `json:"container_name"` + // PortMappings contains the port mappings for this container + PortMappings []PortMapping `json:"port_mappings,omitempty"` + // Networks contains all networks with the PerNetworkOptions. + // The map should contain at least one element. + Networks map[string]PerNetworkOptions `json:"networks"` +} + +// PortMapping is one or more ports that will be mapped into the container. +type PortMapping struct { + // HostIP is the IP that we will bind to on the host. + // If unset, assumed to be 0.0.0.0 (all interfaces). + HostIP string `json:"host_ip"` + // ContainerPort is the port number that will be exposed from the + // container. + // Mandatory. + ContainerPort uint16 `json:"container_port"` + // HostPort is the port number that will be forwarded from the host into + // the container. + // If omitted, a random port on the host (guaranteed to be over 1024) + // will be assigned. + HostPort uint16 `json:"host_port"` + // Range is the number of ports that will be forwarded, starting at + // HostPort and ContainerPort and counting up. + // This is 1-indexed, so 1 is assumed to be a single port (only the + // Hostport:Containerport mapping will be added), 2 is two ports (both + // Hostport:Containerport and Hostport+1:Containerport+1), etc. + // If unset, assumed to be 1 (a single port). + // Both hostport + range and containerport + range must be less than + // 65536. + Range uint16 `json:"range"` + // Protocol is the protocol forward. + // Must be either "tcp", "udp", and "sctp", or some combination of these + // separated by commas. + // If unset, assumed to be TCP. + Protocol string `json:"protocol"` +} + +// OCICNIPortMapping maps to the standard CNI portmapping Capability. +// Deprecated: Do not use this struct for new fields. This only exists +// for backwards compatibility. +type OCICNIPortMapping struct { + // HostPort is the port number on the host. + HostPort int32 `json:"hostPort"` + // ContainerPort is the port number inside the sandbox. + ContainerPort int32 `json:"containerPort"` + // Protocol is the protocol of the port mapping. + Protocol string `json:"protocol"` + // HostIP is the host ip to use. + HostIP string `json:"hostIP"` +} + +type SetupOptions struct { + NetworkOptions +} + +type TeardownOptions struct { + NetworkOptions +} + +// FilterFunc can be passed to NetworkList to filter the networks. +type FilterFunc func(Network) bool diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go new file mode 100644 index 00000000000..58d79d25bd5 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -0,0 +1,80 @@ +package util + +import ( + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "name": + // matches one name, regex allowed + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.Name, filterValues) + }, nil + + case types.Driver: + // matches network driver + return func(net types.Network) bool { + return util.StringInSlice(net.Driver, filterValues) + }, nil + + case "id": + // matches part of one id + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.ID, filterValues) + }, nil + + // TODO: add dns enabled, internal filter + } + return createPruneFilterFuncs(key, filterValues) +} + +func GenerateNetworkPruneFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createPruneFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "label": + // matches all labels + return func(net types.Network) bool { + return filters.MatchLabelFilters(filterValues, net.Labels) + }, nil + + case "until": + until, err := filters.ComputeUntilTimestamp(filterValues) + if err != nil { + return nil, err + } + return func(net types.Network) bool { + return net.Created.Before(until) + }, nil + default: + return nil, errors.Errorf("invalid filter %q", key) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go new file mode 100644 index 00000000000..7c315e31293 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -0,0 +1,56 @@ +package util + +import ( + "net" +) + +// IsIPv6 returns true if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv4 returns true if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// LastIPInSubnet gets the last IP in a subnet +func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + for i := range cidr.IP { + cidr.IP[i] |= ^cidr.Mask[i] + } + return cidr.IP, nil +} + +// FirstIPInSubnet gets the first IP in a subnet +func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + cidr.IP[len(cidr.IP)-1]++ + return cidr.IP, nil +} + +// NormalizeIP will transform the given ip to the 4 byte len ipv4 if possible +func NormalizeIP(ip *net.IP) { + ipv4 := ip.To4() + if ipv4 != nil { + *ip = ipv4 + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip_calc.go b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go new file mode 100644 index 00000000000..a27ddf78bf7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go @@ -0,0 +1,53 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1 +func NextIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Add(i, big.NewInt(1))) +} + +// PrevIP returns IP decremented by 1 +func PrevIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Sub(i, big.NewInt(1))) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +func Cmp(a, b net.IP) int { + aa := ipToInt(a) + bb := ipToInt(b) + return aa.Cmp(bb) +} + +func ipToInt(ip net.IP) *big.Int { + if v := ip.To4(); v != nil { + return big.NewInt(0).SetBytes(v) + } + return big.NewInt(0).SetBytes(ip.To16()) +} + +func intToIP(i *big.Int) net.IP { + return net.IP(i.Bytes()) +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 735d194932f..35f79a1adb1 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor @@ -232,7 +233,6 @@ func parseAAParserVersion(output string) (int, error) { // major*10^5 + minor*10^3 + patch*10^0 numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel return numericVersion, nil - } // CheckProfileAndLoadDefault checks if the specified profile is loaded and diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 021e32571df..667fa9f2655 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go index 13469f1b629..dacfc2f48c2 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !apparmor // +build !linux !apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go index 61b3653e57c..f61bd3bb26b 100644 --- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroupv2 diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go index 921927de4cd..61327fd2485 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package chown @@ -40,7 +41,6 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { return nil }) - if err != nil { return errors.Wrap(err, "failed to chown recursively host path") } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index d5be77edd33..a86eca88efd 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -10,6 +11,7 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" @@ -48,6 +50,18 @@ const ( BoltDBStateStore RuntimeStateStore = iota ) +// ProxyEnv is a list of Proxy Environment variables +var ProxyEnv = []string{ + "http_proxy", + "https_proxy", + "ftp_proxy", + "no_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + "FTP_PROXY", + "NO_PROXY", +} + // Config contains configuration options for container tools type Config struct { // Containers specify settings that configure how containers will run ont the system @@ -60,6 +74,8 @@ type Config struct { Network NetworkConfig `toml:"network"` // Secret section defines configurations for the secret management Secrets SecretConfig `toml:"secrets"` + // ConfigMap section defines configurations for the configmaps management + ConfigMaps ConfigMapConfig `toml:"configmaps"` } // ContainersConfig represents the "containers" TOML config table @@ -79,6 +95,13 @@ type ContainersConfig struct { // Annotation to add to all containers Annotations []string `toml:"annotations,omitempty"` + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to /etc/hosts. + BaseHostsFile string `toml:"base_hosts_file,omitempty"` + // Default way to create a cgroup namespace for the container CgroupNS string `toml:"cgroupns,omitempty"` @@ -120,6 +143,9 @@ type ContainersConfig struct { // EnvHost Pass all host environment variables into the container. EnvHost bool `toml:"env_host,omitempty"` + // HostContainersInternalIP is used to set a specific host.containers.internal ip. + HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"` + // HTTPProxy is the proxy environment variable list to apply to container process HTTPProxy bool `toml:"http_proxy,omitempty"` @@ -167,11 +193,6 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` - // RootlessNetworking depicts the "kind" of networking for rootless - // containers. Valid options are `slirp4netns` and `cni`. Default is - // `slirp4netns` on Linux, and `cni` on non-Linux OSes. - RootlessNetworking string `toml:"rootless_networking,omitempty"` - // SeccompProfile is the seccomp.json profile path which is used as the // default for the runtime. SeccompProfile string `toml:"seccomp_profile,omitempty"` @@ -215,6 +236,12 @@ type EngineConfig struct { // The first path pointing to a valid file will be used. ConmonPath []string `toml:"conmon_path,omitempty"` + // CompatAPIEnforceDockerHub enforces using docker.io for completing + // short names in Podman's compatibility REST API. Note that this will + // ignore unqualified-search-registries and short-name aliases defined + // in containers-registries.conf(5). + CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + // DetachKeys is the sequence of keys used to detach a container. DetachKeys string `toml:"detach_keys,omitempty"` @@ -233,6 +260,10 @@ type EngineConfig struct { // EventsLogFilePath is where the events log is stored. EventsLogFilePath string `toml:"events_logfile_path,omitempty"` + // EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded, + // the logfile is rotated and the old one is deleted. + EventsLogFileMaxSize eventsLogMaxSize `toml:"events_logfile_max_size,omitzero"` + // EventsLogger determines where events should be logged. EventsLogger string `toml:"events_logger,omitempty"` @@ -281,6 +312,8 @@ type EngineConfig struct { LockType string `toml:"lock_type,omitempty"` // MachineEnabled indicates if Podman is running in a podman-machine VM + // + // This method is soft deprecated, use machine.IsPodmanMachine instead MachineEnabled bool `toml:"machine_enabled,omitempty"` // MultiImageArchive - if true, the container engine allows for storing @@ -316,6 +349,9 @@ type EngineConfig struct { // OCIRuntimes are the set of configured OCI runtimes (default is runc). OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + // PodExitPolicy determines the behaviour when the last container of a pod exits. + PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"` + // PullPolicy determines whether to pull image before creating or running a container // default is "missing" PullPolicy string `toml:"pull_policy,omitempty"` @@ -390,6 +426,10 @@ type EngineConfig struct { // before sending kill signal. StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"` + // ExitCommandDelay is the number of seconds to wait for the exit + // command to be send to the API process on the server. + ExitCommandDelay uint `toml:"exit_command_delay,omitempty,omitzero"` + // ImageCopyTmpDir is the default location for storing temporary // container image content, Can be overridden with the TMPDIR // environment variable. If you specify "storage", then the @@ -414,6 +454,9 @@ type EngineConfig struct { // ChownCopiedFiles tells the container engine whether to chown files copied // into a container to the container's primary uid/gid. ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"` + + // CompressionFormat is the compression format used to compress image layers. + CompressionFormat string `toml:"compression_format,omitempty"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -461,23 +504,43 @@ type SetOptions struct { // NetworkConfig represents the "network" TOML config table type NetworkConfig struct { + // NetworkBackend determines what backend should be used for Podman's + // networking. + NetworkBackend string `toml:"network_backend,omitempty"` + // CNIPluginDirs is where CNI plugin binaries are stored. CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` - // DefaultNetwork is the network name of the default CNI network + // DefaultNetwork is the network name of the default network // to attach pods to. DefaultNetwork string `toml:"default_network,omitempty"` - // DefaultSubnet is the subnet to be used for the default CNI network. + // DefaultSubnet is the subnet to be used for the default network. // If a network with the name given in DefaultNetwork is not present // then a new network using this subnet will be created. // Must be a valid IPv4 CIDR block. DefaultSubnet string `toml:"default_subnet,omitempty"` - // NetworkConfigDir is where CNI network configuration files are stored. + // DefaultSubnetPools is a list of subnets and size which are used to + // allocate subnets automatically for podman network create. + // It will iterate through the list and will pick the first free subnet + // with the given size. This is only used for ipv4 subnets, ipv6 subnets + // are always assigned randomly. + DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + + // NetworkConfigDir is where network configuration files are stored. NetworkConfigDir string `toml:"network_config_dir,omitempty"` } +type SubnetPool struct { + // Base is a bigger subnet which will be used to allocate a subnet with + // the given size. + Base *types.IPNet `toml:"base,omitempty"` + // Size is the CIDR for the new subnet. It must be equal or small + // than the CIDR from the base subnet. + Size int `toml:"size,omitempty"` +} + // SecretConfig represents the "secret" TOML config table type SecretConfig struct { // Driver specifies the secret driver to use. @@ -489,6 +552,17 @@ type SecretConfig struct { Opts map[string]string `toml:"opts,omitempty"` } +// ConfigMapConfig represents the "configmap" TOML config table +type ConfigMapConfig struct { + // Driver specifies the configmap driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + // MachineConfig represents the "machine" TOML config table type MachineConfig struct { // Number of CPU's a machine is created with. @@ -499,6 +573,10 @@ type MachineConfig struct { Image string `toml:"image,omitempty"` // Memory in MB a machine is created with. Memory uint64 `toml:"memory,omitempty,omitzero"` + // User to use for rootless podman when init-ing a podman machine VM + User string `toml:"user,omitempty"` + // Volumes are host directories mounted into the VM by default. + Volumes []string `toml:"volumes"` } // Destination represents destination for remote service @@ -518,7 +596,6 @@ type Destination struct { // with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This // might change in the future. func NewConfig(userConfigPath string) (*Config, error) { - // Generate the default config for the system config, err := DefaultConfig() if err != nil { @@ -559,6 +636,10 @@ func NewConfig(userConfigPath string) (*Config, error) { return nil, err } + if err := config.setupEnv(); err != nil { + return nil, err + } + return config, nil } @@ -574,7 +655,7 @@ func readConfigFromFile(path string, config *Config) error { } keys := meta.Undecoded() if len(keys) > 0 { - logrus.Warningf("Failed to decode the keys %q from %q.", keys, path) + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) } return nil @@ -585,17 +666,14 @@ func readConfigFromFile(path string, config *Config) error { func addConfigs(dirPath string, configs []string) ([]string, error) { newConfigs := []string{} - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: - // this should only happen when err != nil but let's be sure - return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -701,7 +779,6 @@ func (c *Config) addCAPPrefix() { // Validate is the main entry point for library configuration validation. func (c *Config) Validate() error { - if err := c.Containers.Validate(); err != nil { return errors.Wrap(err, "validating containers config") } @@ -758,7 +835,6 @@ func (c *EngineConfig) Validate() error { // It returns an `error` on validation failure, otherwise // `nil`. func (c *ContainersConfig) Validate() error { - if err := c.validateUlimits(); err != nil { return err } @@ -791,18 +867,18 @@ func (c *ContainersConfig) Validate() error { // execution checks. It returns an `error` on validation failure, otherwise // `nil`. func (c *NetworkConfig) Validate() error { - expectedConfigDir := _cniConfigDir - if unshare.IsRootless() { - home, err := unshare.HomeDir() - if err != nil { - return err - } - expectedConfigDir = filepath.Join(home, _cniConfigDirRootless) - } - if c.NetworkConfigDir != expectedConfigDir { - err := isDirectory(c.NetworkConfigDir) - if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir) + if &c.DefaultSubnetPools != &DefaultSubnetPools { + for _, pool := range c.DefaultSubnetPools { + if pool.Base.IP.To4() == nil { + return errors.Errorf("invalid subnet pool ip %q", pool.Base.IP) + } + ones, _ := pool.Base.IPNet.Mask.Size() + if ones > pool.Size { + return errors.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet) + } + if pool.Size > 32 { + return errors.New("invalid subnet pool size, must be between 0-32") + } } } @@ -878,8 +954,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { if envHost { env = append(env, os.Environ()...) } else if httpProxy { - proxy := []string{"http_proxy", "https_proxy", "ftp_proxy", "no_proxy", "HTTP_PROXY", "HTTPS_PROXY", "FTP_PROXY", "NO_PROXY"} - for _, p := range proxy { + for _, p := range ProxyEnv { if val, ok := os.LookupEnv(p); ok { env = append(env, fmt.Sprintf("%s=%s", p, val)) } @@ -891,7 +966,6 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { // Capabilities returns the capabilities parses the Add and Drop capability // list from the default capabiltiies for the container func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { - userNotRoot := func(user string) bool { if user == "" || user == "root" || user == "0" { return false @@ -951,7 +1025,7 @@ func Device(device string) (src, dst, permissions string, err error) { // IsValidDeviceMode checks if the mode for device is valid or not. // IsValid mode is a composition of r (read), w (write), and m (mknod). func IsValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, @@ -1002,7 +1076,6 @@ func rootlessConfigPath() (string, error) { } func stringsEq(a, b []string) bool { - if len(a) != len(b) { return false } @@ -1087,10 +1160,10 @@ func (c *Config) Write() error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } - configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) if err != nil { return err } @@ -1142,7 +1215,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - for _, path := range c.Engine.HelperBinariesDir { + dir_list := c.Engine.HelperBinariesDir + + // If set, search this directory first. This is used in testing. + if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { + dir_list = append([]string{dir}, dir_list...) + } + + for _, path := range dir_list { fullpath := filepath.Join(path, name) if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { return fullpath, nil @@ -1151,13 +1231,14 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) if searchPATH { return exec.LookPath(name) } + configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." if len(c.Engine.HelperBinariesDir) == 0 { - return "", errors.Errorf("could not find %q because there are no helper binary directories configured", name) + return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } - return "", errors.Errorf("could not find %q in one of %v", name, c.Engine.HelperBinariesDir) + return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) } -// ImageCopyTmpDir default directory to store tempory image files during copy +// ImageCopyTmpDir default directory to store temporary image files during copy func (c *Config) ImageCopyTmpDir() (string, error) { if path, found := os.LookupEnv("TMPDIR"); found { return path, nil @@ -1175,3 +1256,53 @@ func (c *Config) ImageCopyTmpDir() (string, error) { return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) } + +// setupEnv sets the environment variables for the engine +func (c *Config) setupEnv() error { + for _, env := range c.Engine.Env { + splitEnv := strings.SplitN(env, "=", 2) + if len(splitEnv) != 2 { + logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) + continue + } + // skip if the env is already defined + if _, ok := os.LookupEnv(splitEnv[0]); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + continue + } + if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + return err + } + } + return nil +} + +// eventsLogMaxSize is the type used by EventsLogFileMaxSize +type eventsLogMaxSize uint64 + +// UnmarshalText parses the JSON encoding of eventsLogMaxSize and +// stores it in a value. +func (e *eventsLogMaxSize) UnmarshalText(text []byte) error { + // REMOVE once writing works + if string(text) == "" { + return nil + } + val, err := units.FromHumanSize((string(text))) + if err != nil { + return err + } + if val < 0 { + return fmt.Errorf("events log file max size cannot be negative: %s", string(text)) + } + *e = eventsLogMaxSize(uint64(val)) + return nil +} + +// MarshalText returns the JSON encoding of eventsLogMaxSize. +func (e eventsLogMaxSize) MarshalText() ([]byte, error) { + if uint64(e) == DefaultEventsLogSizeMax || e == 0 { + v := []byte{} + return v, nil + } + return []byte(fmt.Sprintf("%d", e)), nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go new file mode 100644 index 00000000000..85404a48ddf --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -0,0 +1,25 @@ +package config + +import ( + "os" +) + +// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/bin", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index 21dab043f87..bfb9675824b 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -1,3 +1,4 @@ +//go:build !remote // +build !remote package config diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index 7fd9202bbfc..bff869efa96 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -1,3 +1,4 @@ +//go:build remote // +build remote package config diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go index 6563fd31743..64e4fcfcdf5 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package config diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 1d3c003e304..a4e755a665b 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -26,6 +26,13 @@ # #apparmor_profile = "container-default" +# The hosts entries from the base hosts file are added to the containers hosts +# file. This must be either an absolute path or as special values "image" which +# uses the hosts file from the container image or "none" which means +# no base hosts file is used. The default is "" which will use /etc/hosts. +# +#base_hosts_file = "" + # Default way to to create a cgroup namespace for the container # Options are: # `private` Create private Cgroup Namespace for the container. @@ -114,6 +121,16 @@ default_sysctls = [ # #env_host = false +# Set the ip for the host.containers.internal entry in the containers /etc/hosts +# file. This can be set to "none" to disable adding this entry. By default it +# will automatically choose the host ip. +# +# NOTE: When using podman machine this entry will never be added to the containers +# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore +# it is not possible to disable the entry in this case. +# +#host_containers_internal_ip = "" + # Default proxy environment variables passed into the container. # The environment variables passed in include: # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of @@ -133,10 +150,12 @@ default_sysctls = [ # Default way to to create an IPC namespace (POSIX SysV IPC) for the container # Options are: -# `private` Create private IPC Namespace for the container. -# `host` Share host IPC Namespace with the container. +# "host" Share host IPC Namespace with the container. +# "none" Create shareable IPC Namespace for the container without a private /dev/shm. +# "private" Create private IPC Namespace for the container, other containers are not allowed to share it. +# "shareable" Create shareable IPC Namespace for the container. # -#ipcns = "private" +#ipcns = "shareable" # keyring tells the container engine whether to create # a kernel keyring for use within the container. @@ -197,10 +216,6 @@ default_sysctls = [ # #prepare_volume_on_create = false -# Indicates the networking to be used for rootless containers -# -#rootless_networking = "slirp4netns" - # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. # @@ -249,9 +264,6 @@ default_sysctls = [ # #volumes = [] -# The network table contains settings pertaining to the management of -# CNI plugins. - [secrets] #driver = "file" @@ -260,6 +272,16 @@ default_sysctls = [ [network] +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + # Path to directory where CNI plugin binaries are located. # #cni_plugin_dirs = [ @@ -270,18 +292,36 @@ default_sysctls = [ # "/opt/cni/bin", #] -# The network name of the default CNI network to attach pods to. +# The network name of the default network to attach pods to. # #default_network = "podman" -# The default subnet for the default CNI network given in default_network. +# The default subnet for the default network given in default_network. # If a network with that name does not exist, a new network using that name and # this subnet will be created. # Must be a valid IPv4 CIDR prefix. # #default_subnet = "10.88.0.0/16" -# Path to the directory where CNI configuration files are located. +# DefaultSubnetPools is a list of subnets and size which are used to +# allocate subnets automatically for podman network create. +# It will iterate through the list and will pick the first free subnet +# with the given size. This is only used for ipv4 subnets, ipv6 subnets +# are always assigned randomly. +# +#default_subnet_pools = [ +# {"base" = "10.89.0.0/16", "size" = 24}, +# {"base" = "10.90.0.0/15", "size" = 24}, +# {"base" = "10.92.0.0/14", "size" = 24}, +# {"base" = "10.96.0.0/11", "size" = 24}, +# {"base" = "10.128.0.0/9", "size" = 24}, +#] + +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. # #network_config_dir = "/etc/cni/net.d/" @@ -290,6 +330,12 @@ default_sysctls = [ # #active_service = production +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + + # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" # @@ -313,6 +359,11 @@ default_sysctls = [ # "/usr/local/sbin/conmon" #] +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-`, where `` is one of: @@ -336,6 +387,18 @@ default_sysctls = [ # #env = [] +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + +# Sets the maximum size for events_logfile_path. +# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# The format for the size is ``, e.g., `1b` or `3g`. +# If no unit is included then the size will be read in bytes. +# When the limit is exceeded, the logfile will be rotated and the old one will be deleted. +# If the maximum size is set to 0, then no limit will be applied, +# and the logfile will not be rotated. +#events_logfile_max_size = "1m" + # Selects which logging mechanism to use for container engine events. # Valid values are `journald`, `file` and `none`. # @@ -378,24 +441,20 @@ default_sysctls = [ # Infra (pause) container image name for pod infra containers. When running a # pod, we start a `pause` process in a container to hold open the namespaces # associated with the pod. This container does nothing other then sleep, -# reserving the pods resources for the lifetime of the pod. +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. # -#infra_image = "k8s.gcr.io/pause:3.4.1" +#infra_image = "" # Specify the locking mechanism to use; valid values are "shm" and "file". # Change the default only if you are sure of what you are doing, in general # "file" is useful only on platforms where cgo is not available for using the -# faster "shm" lock type. You may need to run "podman system renumber" after +# faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # #lock_type** = "shm" -# Indicates if Podman is running inside a VM via Podman Machine. -# Podman uses this value to do extra setup around networking from the -# container inside the VM to to host. -# -#machine_enabled = false - # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. @@ -416,9 +475,26 @@ default_sysctls = [ #network_cmd_path = "" # Default options to pass to the slirp4netns binary. -# For example "allow_host_loopback=true" -# -#network_cmd_options = ["enable_ipv6=true",] +# Valid options values are: +# +# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). +# Default is false. +# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`). +# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`). +# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`). +# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only). +# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to. +# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only). +# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to. +# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default. +# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container +# network namespace, usually `10.0.2.100`. If your application requires the real source IP address, +# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for +# rootless containers when connected to user-defined networks. +# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but +# preserves the correct source IP address. This port handler cannot be used for user-defined networks. +# +#network_cmd_options = [] # Whether to use chroot instead of pivot_root in the runtime # @@ -430,6 +506,9 @@ default_sysctls = [ # #num_locks = 2048 +# Set the exit policy of the pod when the last container exits. +#pod_exit_policy = "continue" + # Whether to pull new image before running a container # #pull_policy = "missing" @@ -444,7 +523,7 @@ default_sysctls = [ # #runtime = "crun" -# List of the OCI runtimes that support --format=json. When json is supported +# List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # #runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] @@ -457,8 +536,8 @@ default_sysctls = [ # #runtime_supports_nocgroups = ["crun", "krun"] -# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment -# variable. If you specify "storage", then the location of the +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the # container/storage tmp directory will be used. # image_copy_tmp_dir="/var/tmp" @@ -478,6 +557,11 @@ default_sysctls = [ # #stop_timeout = 10 +# Number of seconds to wait before exit command in API process is given to. +# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds). +# +#exit_command_delay = 300 + # map of service destinations # #[service_destinations] @@ -485,9 +569,9 @@ default_sysctls = [ # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootfull "unix://run/podman/podman.sock (Default) +# rootful "unix://run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock -# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock +# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # # uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key @@ -572,6 +656,20 @@ default_sysctls = [ # #memory=2048 +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + +# Host directories to be mounted as volumes into the VM by default. +# Environment variables like $HOME as well as complete paths are supported for +# the source and destination. An optional third field `:ro` can be used to +# tell the container engines to mount the volume readonly. +# +# volumes = [ +# "$HOME:$HOME", +#] + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index e72e1b3e447..8979a406bc7 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -3,14 +3,18 @@ package config import ( "bytes" "fmt" + "net" "os" "os/exec" "path/filepath" "regexp" "strconv" + "strings" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -45,7 +49,7 @@ var ( // DefaultInitPath is the default path to the container-init binary DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage to use for infra container - DefaultInfraImage = "k8s.gcr.io/pause:3.5" + DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks DefaultRootlessSHMLockPath = "/libpod_rootless_lock" // DefaultDetachKeys is the default keys sequence for detaching a @@ -84,8 +88,29 @@ var ( "/usr/lib/cni", "/opt/cni/bin", } + DefaultSubnetPools = []SubnetPool{ + // 10.89.0.0/24-10.255.255.0/24 + parseSubnetPool("10.89.0.0/16", 24), + parseSubnetPool("10.90.0.0/15", 24), + parseSubnetPool("10.92.0.0/14", 24), + parseSubnetPool("10.96.0.0/11", 24), + parseSubnetPool("10.128.0.0/9", 24), + } + // additionalHelperBinariesDir is an extra helper binaries directory that + // should be set during link-time, if different packagers put their + // helper binary in a different location + additionalHelperBinariesDir string ) +// nolint:unparam +func parseSubnetPool(subnet string, size int) SubnetPool { + _, n, _ := net.ParseCIDR(subnet) + return SubnetPool{ + Base: &nettypes.IPNet{IPNet: *n}, + Size: size, + } +} + const ( // _etcDir is the sysconfdir where podman should look for system config files. // It can be overridden at build time. @@ -93,19 +118,20 @@ const ( // InstallPrefix is the prefix where podman will be installed. // It can be overridden at build time. _installPrefix = "/usr" - // _cniConfigDir is the directory where cni configuration is found - _cniConfigDir = "/etc/cni/net.d/" - // _cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins - _cniConfigDirRootless = "cni/net.d/" // CgroupfsCgroupsManager represents cgroupfs native cgroup manager CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. DefaultApparmorProfile = apparmor.Profile + // DefaultHostsFile is the default path to the hosts file + DefaultHostsFile = "/etc/hosts" // SystemdCgroupsManager represents systemd native cgroup manager SystemdCgroupsManager = "systemd" // DefaultLogSizeMax is the default value for the maximum log size // allowed for a container. Negative values mean that no limit is imposed. DefaultLogSizeMax = -1 + // DefaultEventsLogSize is the default value for the maximum events log size + // before rotation. + DefaultEventsLogSizeMax = uint64(1000000) // DefaultPidsLimit is the default value for maximum number of processes // allowed inside a container DefaultPidsLimit = 2048 @@ -114,7 +140,7 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - // DefaultSubnet is the subnet that will be used for the default CNI + // DefaultSubnet is the subnet that will be used for the default // network. DefaultSubnet = "10.88.0.0/16" // DefaultRootlessSignaturePolicyPath is the location within @@ -134,14 +160,11 @@ const ( // DefaultConfig defines the default values from containers.conf func DefaultConfig() (*Config, error) { - defaultEngineConfig, err := defaultConfigFromMemory() if err != nil { return nil, err } - cniConfig := _cniConfigDir - defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath if unshare.IsRootless() { configHome, err := homedir.GetConfigHome() @@ -155,7 +178,6 @@ func DefaultConfig() (*Config, error) { defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath } } - cniConfig = filepath.Join(configHome, _cniConfigDirRootless) } cgroupNS := "host" @@ -169,6 +191,7 @@ func DefaultConfig() (*Config, error) { Volumes: []string{}, Annotations: []string{}, ApparmorProfile: DefaultApparmorProfile, + BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: "enabled", DefaultCapabilities: DefaultCapabilities, @@ -183,28 +206,28 @@ func DefaultConfig() (*Config, error) { "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", }, - EnvHost: false, - HTTPProxy: true, - Init: false, - InitPath: "", - IPCNS: "private", - LogDriver: defaultLogDriver(), - LogSizeMax: DefaultLogSizeMax, - NoHosts: false, - PidsLimit: DefaultPidsLimit, - PidNS: "private", - RootlessNetworking: getDefaultRootlessNetwork(), - ShmSize: DefaultShmSize, - TZ: "", - Umask: "0022", - UTSNS: "private", - UserNSSize: DefaultUserNSSize, + EnvHost: false, + HTTPProxy: true, + Init: false, + InitPath: "", + IPCNS: "shareable", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + NetNS: "private", + NoHosts: false, + PidsLimit: DefaultPidsLimit, + PidNS: "private", + ShmSize: DefaultShmSize, + TZ: "", + Umask: "0022", + UTSNS: "private", + UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - NetworkConfigDir: cniConfig, - CNIPluginDirs: DefaultCNIPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + CNIPluginDirs: DefaultCNIPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -224,9 +247,11 @@ func defaultSecretConfig() SecretConfig { func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: 1, - DiskSize: 10, - Image: "testing", + DiskSize: 100, + Image: getDefaultMachineImage(), Memory: 2048, + User: getDefaultMachineUser(), + Volumes: []string{"$HOME:$HOME"}, } } @@ -242,6 +267,10 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) + + c.CompatAPIEnforceDockerHub = true + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { types.SetDefaultConfigFilePath(path) } @@ -255,11 +284,14 @@ func defaultConfigFromMemory() (*EngineConfig, error) { storeOpts.GraphRoot = _defaultGraphRoot } c.graphRoot = storeOpts.GraphRoot - c.ImageCopyTmpDir = "/var/tmp" + c.ImageCopyTmpDir = getDefaultTmpDir() c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.HelperBinariesDir = defaultHelperBinariesDir + if additionalHelperBinariesDir != "" { + c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + } c.HooksDir = DefaultHooksDirs c.ImageDefaultTransport = _defaultTransport c.StateType = BoltDBStateStore @@ -269,9 +301,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.CgroupManager = defaultCgroupManager() c.ServiceTimeout = uint(5) c.StopTimeout = uint(10) - c.NetworkCmdOptions = []string{ - "enable_ipv6=true", - } + c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() c.OCIRuntimes = map[string][]string{ "crun": { @@ -358,6 +388,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.MachineEnabled = false c.ChownCopiedFiles = true + c.PodExitPolicy = defaultPodExitPolicy + return c, nil } @@ -366,16 +398,16 @@ func defaultTmpDir() (string, error) { return "/run/libpod", nil } - runtimeDir, err := getRuntimeDir() + runtimeDir, err := util.GetRuntimeDir() if err != nil { return "", err } libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") - if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + if err := os.Mkdir(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { if !os.IsExist(err) { return "", err - } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + } else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { // The directory already exist, just set the sticky bit return "", errors.Wrap(err, "set sticky bit on") } @@ -389,15 +421,14 @@ func probeConmon(conmonBinary string) error { cmd := exec.Command(conmonBinary, "--version") var out bytes.Buffer cmd.Stdout = &out - err := cmd.Run() - if err != nil { + if err := cmd.Run(); err != nil { return err } r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) matches := r.FindStringSubmatch(out.String()) if len(matches) != 4 { - return errors.Wrap(err, _conmonVersionFormatErr) + return errors.New(_conmonVersionFormatErr) } major, err := strconv.Atoi(matches[1]) if err != nil { @@ -440,6 +471,10 @@ func (c *Config) NetNS() string { return c.Containers.NetNS } +func (c EngineConfig) EventsLogMaxSize() uint64 { + return uint64(c.EventsLogFileMaxSize) +} + // SecurityOptions returns the default security options func (c *Config) SecurityOptions() []string { securityOpts := []string{} @@ -570,8 +605,23 @@ func (c *Config) MachineEnabled() bool { return c.Engine.MachineEnabled } -// RootlessNetworking returns the "kind" of networking -// rootless containers should use -func (c *Config) RootlessNetworking() string { - return c.Containers.RootlessNetworking +// MachineVolumes returns volumes to mount into the VM +func (c *Config) MachineVolumes() ([]string, error) { + return machineVolumes(c.Machine.Volumes) +} + +func machineVolumes(volumes []string) ([]string, error) { + translatedVolumes := []string{} + for _, v := range volumes { + vol := os.ExpandEnv(v) + split := strings.Split(vol, ":") + if len(split) < 2 || len(split) > 3 { + return nil, errors.Errorf("invalid machine volume %s, 2 or 3 fields required", v) + } + if split[0] == "" || split[1] == "" { + return nil, errors.Errorf("invalid machine volume %s, fields must container data", v) + } + translatedVolumes = append(translatedVolumes, vol) + } + return translatedVolumes, nil } diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index c68c0b130fc..d6ea4359cc3 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -3,6 +3,7 @@ package config import ( "fmt" "io/ioutil" + "os" "strconv" "strings" @@ -13,10 +14,15 @@ const ( oldMaxSize = uint64(1048576) ) -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "slirp4netns" for Linux. -func getDefaultRootlessNetwork() string { - return "slirp4netns" +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // getDefaultProcessLimits returns the nproc for the current process in ulimits format @@ -43,3 +49,12 @@ func getDefaultProcessLimits() []string { } return defaultLimits } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index e38fb810de8..4be8267558d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,11 +1,19 @@ -// +build !linux +//go:build !linux && !windows +// +build !linux,!windows package config -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases). -func getDefaultRootlessNetwork() string { - return "cni" +import "os" + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. @@ -17,3 +25,12 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { func getDefaultProcessLimits() []string { return []string{} } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go new file mode 100644 index 00000000000..db230dfb286 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -0,0 +1,34 @@ +package config + +import "os" + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "35" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "user" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} + +// getDefaultTmpDir for windows +func getDefaultTmpDir() string { + // first check the Temp env var + // https://answers.microsoft.com/en-us/windows/forum/all/where-is-the-temporary-folder/44a039a5-45ba-48dd-84db-fd700e54fd56 + if val, ok := os.LookupEnv("TEMP"); ok { + return val + } + return os.Getenv("LOCALAPPDATA") + "\\Temp" +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index 2a3b6fb35f1..352fddf92cc 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -1,3 +1,4 @@ +//go:build !systemd || !cgo // +build !systemd !cgo package config @@ -22,3 +23,7 @@ func defaultLogDriver() string { func useSystemd() bool { return false } + +func useJournald() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go new file mode 100644 index 00000000000..f0f983077dd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go @@ -0,0 +1,36 @@ +package config + +import "fmt" + +// PodExitPolicies includes the supported pod exit policies. +var PodExitPolicies = []string{string(PodExitPolicyContinue), string(PodExitPolicyStop)} + +// PodExitPolicy determines a pod's exit and stop behaviour. +type PodExitPolicy string + +const ( + // PodExitPolicyContinue instructs the pod to continue running when the + // last container has exited. + PodExitPolicyContinue PodExitPolicy = "continue" + // PodExitPolicyStop instructs the pod to stop when the last container + // has exited. + PodExitPolicyStop = "stop" + // PodExitPolicyUnsupported implies an internal error. + // Negative for backwards compat. + PodExitPolicyUnsupported = "invalid" + + defaultPodExitPolicy = PodExitPolicyContinue +) + +// ParsePodExitPolicy parses the specified policy and returns an error if it is +// invalid. +func ParsePodExitPolicy(policy string) (PodExitPolicy, error) { + switch policy { + case "", string(PodExitPolicyContinue): + return PodExitPolicyContinue, nil + case string(PodExitPolicyStop): + return PodExitPolicyStop, nil + default: + return PodExitPolicyUnsupported, fmt.Errorf("invalid pod exit policy: %q", policy) + } +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index fab3ea437a8..03d19a12f30 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -1,15 +1,16 @@ +//go:build systemd && cgo // +build systemd,cgo package config import ( "io/ioutil" + "path/filepath" "strings" "sync" "github.com/containers/common/pkg/cgroupv2" "github.com/containers/storage/pkg/unshare" - "github.com/coreos/go-systemd/v22/sdjournal" ) var ( @@ -57,7 +58,6 @@ func useSystemd() bool { val := strings.TrimSuffix(string(dat), "\n") usesSystemd = (val == "systemd") } - return }) return usesSystemd } @@ -67,13 +67,20 @@ func useJournald() bool { if !useSystemd() { return } - journal, err := sdjournal.NewJournal() - if err != nil { - return + for _, root := range []string{"/run/log/journal", "/var/log/journal"} { + dirs, err := ioutil.ReadDir(root) + if err != nil { + continue + } + for _, d := range dirs { + if d.IsDir() { + if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + usesJournald = true + return + } + } + } } - journal.Close() - usesJournald = true - return }) return usesJournald } diff --git a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go deleted file mode 100644 index 9b32241d65c..00000000000 --- a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go +++ /dev/null @@ -1,222 +0,0 @@ -package defaultnet - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// TODO: A smarter implementation would make sure cni-podman0 was unused before -// making the default, and adjust if necessary -const networkTemplate = `{ - "cniVersion": "0.4.0", - "name": "{{{{.Name}}}}", - "plugins": [ - { - "type": "bridge", - "bridge": "cni-podman0", - "isGateway": true, - "ipMasq": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "routes": [{ "dst": "0.0.0.0/0" }], - "ranges": [ - [ - { - "subnet": "{{{{.Subnet}}}}", - "gateway": "{{{{.Gateway}}}}" - } - ] - ] - } - }, -{{{{- if (eq .Machine true) }}}} - { - "type": "podman-machine", - "capabilities": { - "portMappings": true - } - }, -{{{{- end}}}} - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - }, - { - "type": "firewall" - }, - { - "type": "tuning" - } - ] -} -` - -var ( - // Borrowed from Podman, modified to remove dashes and periods. - nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$") -) - -// Used to pass info into the template engine -type networkInfo struct { - Name string - Subnet string - Gateway string - Machine bool -} - -// The most trivial definition of a CNI network possible for our use here. -// We need the name, and nothing else. -type network struct { - Name string `json:"name"` -} - -// Create makes the CNI default network, if necessary. -// Accepts the name and subnet of the network to create (a standard template -// will be used, with these values plugged in), the configuration directory -// where CNI configs are stored (to verify if a named configuration already -// exists), an exists directory (where a sentinel file will be stored, to ensure -// the network is only made once), and an isMachine bool (to determine whether -// the machine block will be added to the config). -// Create first checks if a default network has already been created via the -// presence of a sentinel file. If it does exist, it returns immediately without -// error. -// It next checks if a CNI network with the given name already exists. In that -// case, it creates the sentinel file and returns without error. -// If neither of these are true, the default network is created. -func Create(name, subnet, configDir, existsDir string, isMachine bool) error { - // TODO: Should probably regex name to make sure it's valid. - if name == "" || subnet == "" || configDir == "" || existsDir == "" { - return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork") - } - if !nameRegex.MatchString(name) { - return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name) - } - - sentinelFile := filepath.Join(existsDir, "defaultCNINetExists") - - // Check if sentinel file exists, return immediately if it does. - if _, err := os.Stat(sentinelFile); err == nil { - return nil - } - - // Create the sentinel file if it doesn't exist, so subsequent checks - // don't need to go further. - file, err := os.Create(sentinelFile) - if err != nil { - return err - } - file.Close() - - // We may need to make the config dir. - if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) { - return errors.Wrapf(err, "error creating CNI configuration directory") - } - - // Check all networks in the CNI conflist. - files, err := ioutil.ReadDir(configDir) - if err != nil { - return errors.Wrapf(err, "error reading CNI configuration directory") - } - if len(files) > 0 { - configPaths := make([]string, 0, len(files)) - for _, path := range files { - if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" { - configPaths = append(configPaths, filepath.Join(configDir, path.Name())) - } - } - for _, config := range configPaths { - configName, err := getConfigName(config) - if err != nil { - logrus.Errorf("Error reading CNI configuration file: %v", err) - continue - } - if configName == name { - return nil - } - } - } - - // We need to make the config. - // Get subnet and gateway. - _, ipNet, err := net.ParseCIDR(subnet) - if err != nil { - return errors.Wrapf(err, "default network subnet %s is invalid", subnet) - } - - ones, bits := ipNet.Mask.Size() - if ones == bits { - return errors.Wrapf(err, "default network subnet %s is to small", subnet) - } - gateway := make(net.IP, len(ipNet.IP)) - // copy the subnet ip to the gateway so we can modify it - copy(gateway, ipNet.IP) - // the default gateway should be the first ip in the subnet - gateway[len(gateway)-1]++ - - netInfo := new(networkInfo) - netInfo.Name = name - netInfo.Gateway = gateway.String() - netInfo.Subnet = ipNet.String() - netInfo.Machine = isMachine - - templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate) - if err != nil { - return errors.Wrapf(err, "error compiling template for default network") - } - var output bytes.Buffer - if err := templ.Execute(&output, netInfo); err != nil { - return errors.Wrapf(err, "error executing template for default network") - } - - // Next, we need to place the config on disk. - // Loop through possible indexes, with a limit of 100 attempts. - created := false - for i := 87; i < 187; i++ { - configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) - if err != nil { - logrus.Infof("Attempt to create default CNI network config file failed: %v", err) - continue - } - defer configFile.Close() - - created = true - - // Success - file is open. Write our buffer to it. - if _, err := configFile.Write(output.Bytes()); err != nil { - return errors.Wrapf(err, "error writing default CNI config to file") - } - break - } - if !created { - return errors.Errorf("no available default network configuration file was found") - } - - return nil -} - -// Get the name of the configuration contained in a given conflist file. Accepts -// the full path of a .conflist CNI configuration. -func getConfigName(file string) (string, error) { - contents, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - config := new(network) - if err := json.Unmarshal(contents, config); err != nil { - return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file)) - } - return config.Name, nil -} diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go new file mode 100644 index 00000000000..abf4c87739e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -0,0 +1,31 @@ +package download + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// FromURL downloads the specified source to a file in tmpdir (OS defaults if +// empty). +func FromURL(tmpdir, source string) (string, error) { + tmp, err := ioutil.TempFile(tmpdir, "") + if err != nil { + return "", fmt.Errorf("creating temporary download file: %w", err) + } + defer tmp.Close() + + response, err := http.Get(source) // nolint:noctx + if err != nil { + return "", fmt.Errorf("downloading %s: %w", source, err) + } + defer response.Body.Close() + + _, err = io.Copy(tmp, response.Body) + if err != nil { + return "", fmt.Errorf("copying %s to %s: %w", source, tmp.Name(), err) + } + + return tmp.Name(), nil +} diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go new file mode 100644 index 00000000000..465eeceaf1b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -0,0 +1,70 @@ +package machine + +import ( + "os" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +type MachineMarker struct { + Enabled bool + Type string +} + +const ( + markerFile = "/etc/containers/podman-machine" + Wsl = "wsl" + Qemu = "qemu" +) + +var ( + markerSync sync.Once + machineMarker *MachineMarker +) + +func loadMachineMarker(file string) { + var kind string + + // Support deprecated config value for compatibility + enabled := isLegacyConfigSet() + + if content, err := os.ReadFile(file); err == nil { + enabled = true + kind = strings.TrimSpace(string(content)) + } + + machineMarker = &MachineMarker{enabled, kind} +} + +func isLegacyConfigSet() bool { + config, err := config.Default() + if err != nil { + logrus.Warnf("could not obtain container configuration") + return false + } + + //nolint:staticcheck //lint:ignore SA1019 deprecated call + return config.Engine.MachineEnabled +} + +func IsPodmanMachine() bool { + return GetMachineMarker().Enabled +} + +func MachineHostType() string { + return GetMachineMarker().Type +} + +func IsGvProxyBased() bool { + return IsPodmanMachine() && MachineHostType() != Wsl +} + +func GetMachineMarker() *MachineMarker { + markerSync.Do(func() { + loadMachineMarker(markerFile) + }) + return machineMarker +} diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index ea9495ee73a..75ffac06c36 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -16,31 +16,22 @@ import ( type List interface { AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error Remove(instanceDigest digest.Digest) error - SetURLs(instanceDigest digest.Digest, urls []string) error URLs(instanceDigest digest.Digest) ([]string, error) - SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error Annotations(instanceDigest *digest.Digest) (map[string]string, error) - SetOS(instanceDigest digest.Digest, os string) error OS(instanceDigest digest.Digest) (string, error) - SetArchitecture(instanceDigest digest.Digest, arch string) error Architecture(instanceDigest digest.Digest) (string, error) - SetOSVersion(instanceDigest digest.Digest, osVersion string) error OSVersion(instanceDigest digest.Digest) (string, error) - SetVariant(instanceDigest digest.Digest, variant string) error Variant(instanceDigest digest.Digest) (string, error) - SetFeatures(instanceDigest digest.Digest, features []string) error Features(instanceDigest digest.Digest) ([]string, error) - SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error OSFeatures(instanceDigest digest.Digest) ([]string, error) - Serialize(mimeType string) ([]byte, error) Instances() []digest.Digest OCIv1() *v1.Index @@ -74,13 +65,14 @@ func Create() List { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } } // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. -func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error { +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { return err } @@ -373,6 +365,7 @@ func FromBlob(manifestBytes []byte) (List, error) { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } switch manifestType { @@ -449,38 +442,37 @@ func (l *list) preferOCI() bool { // Serialize encodes the list using the specified format, or by selecting one // which it thinks is appropriate. func (l *list) Serialize(mimeType string) ([]byte, error) { - var manifestBytes []byte + var ( + res []byte + err error + ) switch mimeType { case "": if l.preferOCI() { - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { return nil, errors.Wrapf(err, "error marshalling OCI image index") } - manifestBytes = manifest } else { - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { return nil, errors.Wrapf(err, "error marshalling Docker manifest list") } - manifestBytes = manifest } case v1.MediaTypeImageIndex: - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { return nil, errors.Wrapf(err, "error marshalling OCI image index") } - manifestBytes = manifest case manifest.DockerV2ListMediaType: - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { return nil, errors.Wrapf(err, "error marshalling Docker manifest list") } - manifestBytes = manifest default: return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) } - return manifestBytes, nil + return res, nil } // Instances returns the list of image instances mentioned in this list. diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 02e670c50c8..6c4958cc2b6 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -14,9 +14,31 @@ import ( // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int finalOpts := make([]string, 0, len(options)) for _, opt := range options { + // support advanced options like upperdir=/path, workdir=/path + if strings.Contains(opt, "upperdir") { + foundUpperDir++ + if foundUpperDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.Contains(opt, "workdir") { + foundWorkDir++ + if foundWorkDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.HasPrefix(opt, "idmap") { + finalOpts = append(finalOpts, opt) + continue + } + switch opt { case "noexec", "exec": foundExec++ @@ -119,7 +141,7 @@ func Device(device string) (src, dest, permissions string, err error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index ce4446a1b00..d087c4a02b6 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package parse diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index 43e3a668882..a9573e4e8a8 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -45,8 +45,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions func isRetryable(err error) bool { err = errors.Cause(err) - if err == context.Canceled || err == context.DeadlineExceeded { + switch err { + case nil: return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue } type unwrapper interface { @@ -57,7 +61,8 @@ func isRetryable(err error) bool { case errcode.Error: switch e.Code { - case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: return false } return true @@ -86,7 +91,7 @@ func isRetryable(err error) bool { } } return true - case unwrapper: + case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() return isRetryable(err) } diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go index 6769809755b..901e28a5dcc 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package retry diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index cf333744c53..3712afc71f1 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -80,6 +80,7 @@ func DefaultProfile() *Seccomp { "vmsplice", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, }, @@ -168,6 +169,7 @@ func DefaultProfile() *Seccomp { "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -183,7 +185,6 @@ func DefaultProfile() *Seccomp { "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -235,6 +236,7 @@ func DefaultProfile() *Seccomp { "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -248,6 +250,7 @@ func DefaultProfile() *Seccomp { "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -271,9 +274,9 @@ func DefaultProfile() *Seccomp { "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -292,8 +295,12 @@ func DefaultProfile() *Seccomp { "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -352,7 +359,6 @@ func DefaultProfile() *Seccomp { "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -366,6 +372,7 @@ func DefaultProfile() *Seccomp { "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -387,10 +394,15 @@ func DefaultProfile() *Seccomp { "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -404,6 +416,7 @@ func DefaultProfile() *Seccomp { "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -416,6 +429,7 @@ func DefaultProfile() *Seccomp { "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -516,10 +530,10 @@ func DefaultProfile() *Seccomp { Names: []string{ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", "set_tls", + "sync_file_range2", }, Action: ActAllow, Args: []*Arg{}, @@ -574,6 +588,7 @@ func DefaultProfile() *Seccomp { "open_by_handle_at", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -609,6 +624,7 @@ func DefaultProfile() *Seccomp { "setns", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -630,6 +646,7 @@ func DefaultProfile() *Seccomp { "chroot", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -639,8 +656,8 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActAllow, @@ -652,11 +669,12 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -678,6 +696,7 @@ func DefaultProfile() *Seccomp { "acct", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -688,9 +707,6 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActAllow, Args: []*Arg{}, @@ -702,11 +718,9 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -715,8 +729,8 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActAllow, Args: []*Arg{}, @@ -726,10 +740,11 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -738,10 +753,10 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActAllow, Args: []*Arg{}, @@ -751,12 +766,13 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -778,6 +794,7 @@ func DefaultProfile() *Seccomp { "vhangup", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -789,6 +806,7 @@ func DefaultProfile() *Seccomp { "socket", }, Action: ActErrno, + Errno: "EINVAL", ErrnoRet: &einval, Args: []*Arg{ { @@ -867,6 +885,7 @@ func DefaultProfile() *Seccomp { return &Seccomp{ DefaultAction: ActErrno, + DefaultErrno: "ENOSYS", DefaultErrnoRet: &enosys, ArchMap: arches(), Syscalls: syscalls, diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go new file mode 100644 index 00000000000..87ac2ab77ac --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -0,0 +1,94 @@ +//go:build linux && seccomp +// +build linux,seccomp + +package seccomp + +import ( + "golang.org/x/sys/unix" +) + +// Error table +var errnoArch = map[string]uint{ + "EPERM": uint(unix.EPERM), + "ENOENT": uint(unix.ENOENT), + "ESRCH": uint(unix.ESRCH), + "EIO": uint(unix.EIO), + "ENXIO": uint(unix.ENXIO), + "E2BIG": uint(unix.E2BIG), + "ENOEXEC": uint(unix.ENOEXEC), + "EBADF": uint(unix.EBADF), + "ECHILD": uint(unix.ECHILD), + "EDEADLK": uint(unix.EDEADLK), + "ENOMEM": uint(unix.ENOMEM), + "EACCES": uint(unix.EACCES), + "EFAULT": uint(unix.EFAULT), + "ENOTBLK": uint(unix.ENOTBLK), + "EBUSY": uint(unix.EBUSY), + "EEXIST": uint(unix.EEXIST), + "EXDEV": uint(unix.EXDEV), + "ENODEV": uint(unix.ENODEV), + "ENOTDIR": uint(unix.ENOTDIR), + "EISDIR": uint(unix.EISDIR), + "EINVAL": uint(unix.EINVAL), + "ENFILE": uint(unix.ENFILE), + "EMFILE": uint(unix.EMFILE), + "ENOTTY": uint(unix.ENOTTY), + "ETXTBSY": uint(unix.ETXTBSY), + "EFBIG": uint(unix.EFBIG), + "ENOSPC": uint(unix.ENOSPC), + "ESPIPE": uint(unix.ESPIPE), + "EROFS": uint(unix.EROFS), + "EMLINK": uint(unix.EMLINK), + "EPIPE": uint(unix.EPIPE), + "EDOM": uint(unix.EDOM), + "ERANGE": uint(unix.ERANGE), + "EAGAIN": uint(unix.EAGAIN), + "EINPROGRESS": uint(unix.EINPROGRESS), + "EALREADY": uint(unix.EALREADY), + "ENOTSOCK": uint(unix.ENOTSOCK), + "EDESTADDRREQ": uint(unix.EDESTADDRREQ), + "EMSGSIZE": uint(unix.EMSGSIZE), + "EPROTOTYPE": uint(unix.EPROTOTYPE), + "ENOPROTOOPT": uint(unix.ENOPROTOOPT), + "EPROTONOSUPPORT": uint(unix.EPROTONOSUPPORT), + "ESOCKTNOSUPPORT": uint(unix.ESOCKTNOSUPPORT), + "EOPNOTSUPP": uint(unix.EOPNOTSUPP), + "EPFNOSUPPORT": uint(unix.EPFNOSUPPORT), + "EAFNOSUPPORT": uint(unix.EAFNOSUPPORT), + "EADDRINUSE": uint(unix.EADDRINUSE), + "EADDRNOTAVAIL": uint(unix.EADDRNOTAVAIL), + "ENETDOWN": uint(unix.ENETDOWN), + "ENETUNREACH": uint(unix.ENETUNREACH), + "ENETRESET": uint(unix.ENETRESET), + "ECONNABORTED": uint(unix.ECONNABORTED), + "ECONNRESET": uint(unix.ECONNRESET), + "ENOBUFS": uint(unix.ENOBUFS), + "EISCONN": uint(unix.EISCONN), + "ENOTCONN": uint(unix.ENOTCONN), + "ESHUTDOWN": uint(unix.ESHUTDOWN), + "ETOOMANYREFS": uint(unix.ETOOMANYREFS), + "ETIMEDOUT": uint(unix.ETIMEDOUT), + "ECONNREFUSED": uint(unix.ECONNREFUSED), + "ELOOP": uint(unix.ELOOP), + "ENAMETOOLONG": uint(unix.ENAMETOOLONG), + "EHOSTDOWN": uint(unix.EHOSTDOWN), + "EHOSTUNREACH": uint(unix.EHOSTUNREACH), + "ENOTEMPTY": uint(unix.ENOTEMPTY), + "EUSERS": uint(unix.EUSERS), + "EDQUOT": uint(unix.EDQUOT), + "ESTALE": uint(unix.ESTALE), + "EREMOTE": uint(unix.EREMOTE), + "ENOLCK": uint(unix.ENOLCK), + "ENOSYS": uint(unix.ENOSYS), + "EILSEQ": uint(unix.EILSEQ), + "ENOMEDIUM": uint(unix.ENOMEDIUM), + "EMEDIUMTYPE": uint(unix.EMEDIUMTYPE), + "EOVERFLOW": uint(unix.EOVERFLOW), + "ECANCELED": uint(unix.ECANCELED), + "EIDRM": uint(unix.EIDRM), + "ENOMSG": uint(unix.ENOMSG), + "ENOTSUP": uint(unix.ENOTSUP), + "EBADMSG": uint(unix.EBADMSG), + "ENOTRECOVERABLE": uint(unix.ENOTRECOVERABLE), + "EOWNERDEAD": uint(unix.EOWNERDEAD), +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go index 90da99f0a45..5c278574cff 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/filter.go +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // NOTE: this package has originally been copied from diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index c009134e3cc..442632e7d11 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -1,6 +1,7 @@ { "defaultAction": "SCMP_ACT_ERRNO", "defaultErrnoRet": 38, + "defaultErrno": "ENOSYS", "archMap": [ { "architecture": "SCMP_ARCH_X86_64", @@ -87,7 +88,8 @@ "comment": "", "includes": {}, "excludes": {}, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -174,6 +176,7 @@ "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -189,7 +192,6 @@ "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -241,6 +243,7 @@ "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -254,6 +257,7 @@ "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -277,9 +281,9 @@ "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -298,8 +302,12 @@ "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -358,7 +366,6 @@ "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -372,6 +379,7 @@ "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -393,10 +401,15 @@ "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -410,6 +423,7 @@ "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -422,6 +436,7 @@ "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -559,10 +574,10 @@ "names": [ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", - "set_tls" + "set_tls", + "sync_file_range2" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -650,7 +665,8 @@ "CAP_DAC_READ_SEARCH" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -693,7 +709,8 @@ "CAP_SYS_ADMIN" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -722,13 +739,14 @@ "CAP_SYS_CHROOT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ALLOW", @@ -744,8 +762,8 @@ { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ERRNO", @@ -757,7 +775,8 @@ "CAP_SYS_MODULE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -786,15 +805,13 @@ "CAP_SYS_PACCT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -809,10 +826,7 @@ { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -823,12 +837,13 @@ "CAP_SYS_PTRACE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -842,8 +857,8 @@ }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -854,14 +869,15 @@ "CAP_SYS_RAWIO" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -875,10 +891,10 @@ }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -889,7 +905,8 @@ "CAP_SYS_TIME" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -918,7 +935,8 @@ "CAP_SYS_TTY_CONFIG" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -946,7 +964,8 @@ "CAP_AUDIT_WRITE" ] }, - "errnoRet": 22 + "errnoRet": 22, + "errno": "EINVAL" }, { "names": [ diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index af36b9990b5..f7adde8aba0 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // SPDX-License-Identifier: Apache-2.0 @@ -10,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "strconv" "github.com/opencontainers/runtime-spec/specs-go" libseccomp "github.com/seccomp/libseccomp-golang" @@ -66,6 +68,37 @@ func inSlice(slice []string, s string) bool { return false } +func getArchitectures(config *Seccomp, newConfig *specs.LinuxSeccomp) error { + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + return nil +} + +func getErrno(errno string, def *uint) (*uint, error) { + if errno == "" { + return def, nil + } + v, err := strconv.ParseUint(errno, 10, 32) + if err == nil { + v2 := uint(v) + return &v2, nil + } + + v2, found := errnoArch[errno] + if !found { + return nil, fmt.Errorf("unknown errno %s", errno) + } + return &v2, nil +} + func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { if config == nil { return nil, nil @@ -79,22 +112,22 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) newConfig := &specs.LinuxSeccomp{} var arch string - var native, err = libseccomp.GetNativeArch() + native, err := libseccomp.GetNativeArch() if err == nil { arch = native.String() } - if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { - return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + if err := getArchitectures(config, newConfig); err != nil { + return nil, err } - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) != 0 { - for _, a := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) - } + for _, flag := range config.Flags { + newConfig.Flags = append(newConfig.Flags, specs.LinuxSeccompFlag(flag)) } + newConfig.ListenerPath = config.ListenerPath + newConfig.ListenerMetadata = config.ListenerMetadata + if len(config.ArchMap) != 0 { for _, a := range config.ArchMap { seccompArch, ok := nativeToSeccomp[arch] @@ -111,7 +144,11 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) } newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) - newConfig.DefaultErrnoRet = config.DefaultErrnoRet + + newConfig.DefaultErrnoRet, err = getErrno(config.DefaultErrno, config.DefaultErrnoRet) + if err != nil { + return nil, err + } Loop: // Loop through all syscall blocks and convert them to libcontainer format after filtering them @@ -145,12 +182,17 @@ Loop: return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") } + errno, err := getErrno(call.Errno, call.ErrnoRet) + if err != nil { + return nil, err + } + if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, errno)) } if len(call.Names) > 0 { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, errno)) } } diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go index 8b23ee2c04d..da5230c563f 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !seccomp // +build !linux !seccomp // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go index 86e1b66bbe3..f8a20e5364a 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/supported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -1,3 +1,4 @@ +//go:build linux && seccomp // +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go index 07751f72907..784b1ba8dd6 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/types.go +++ b/vendor/github.com/containers/common/pkg/seccomp/types.go @@ -6,13 +6,20 @@ package seccomp // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { - DefaultAction Action `json:"defaultAction"` + DefaultAction Action `json:"defaultAction"` + + // DefaultErrnoRet is obsolete, please use DefaultErrno DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` + DefaultErrno string `json:"defaultErrno,omitempty"` + // Architectures is kept to maintain backward compatibility with the old // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` + Flags []string `json:"flags,omitempty"` + ListenerPath string `json:"listenerPath,omitempty"` + ListenerMetadata string `json:"listenerMetadata,omitempty"` } // Architecture is used to represent a specific architecture @@ -107,5 +114,7 @@ type Syscall struct { Comment string `json:"comment"` Includes Filter `json:"includes"` Excludes Filter `json:"excludes"` - ErrnoRet *uint `json:"errnoRet,omitempty"` + // ErrnoRet is obsolete, please use Errno + ErrnoRet *uint `json:"errnoRet,omitempty"` + Errno string `json:"errno,omitempty"` } diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go index 1c5c4edc64e..669ab04a2ec 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/validate.go +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 80fcf54583d..e0c275851f5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -34,7 +34,7 @@ func NewDriver(rootPath string) (*Driver, error) { fileDriver := new(Driver) fileDriver.secretsDataFilePath = filepath.Join(rootPath, secretsDataFile) // the lockfile functions require that the rootPath dir is executable - if err := os.MkdirAll(rootPath, 0700); err != nil { + if err := os.MkdirAll(rootPath, 0o700); err != nil { return nil, err } @@ -95,7 +95,7 @@ func (d *Driver) Store(id string, data []byte) error { if err != nil { return err } - err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0o600) if err != nil { return err } @@ -119,7 +119,7 @@ func (d *Driver) Delete(id string) error { if err != nil { return err } - err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0o600) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go index 6dc00b34c85..50967b7cfbf 100644 --- a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go @@ -30,6 +30,8 @@ type driverConfig struct { Root string // KeyID contains the key id that will be used for encryption (i.e. user@domain.tld) KeyID string + // GPGHomedir is the homedir where the GPG keys are stored + GPGHomedir string } func (cfg *driverConfig) ParseOpts(opts map[string]string) { @@ -40,6 +42,9 @@ func (cfg *driverConfig) ParseOpts(opts map[string]string) { if val, ok := opts["key"]; ok { cfg.KeyID = val } + if val, ok := opts["gpghomedir"]; ok { + cfg.GPGHomedir = val + } } func defaultDriverConfig() *driverConfig { @@ -156,6 +161,9 @@ func (d *Driver) Delete(id string) error { } func (d *Driver) gpg(ctx context.Context, in io.Reader, out io.Writer, args ...string) error { + if d.GPGHomedir != "" { + args = append([]string{"--homedir", d.GPGHomedir}, args...) + } cmd := exec.CommandContext(ctx, "gpg", args...) cmd.Env = os.Environ() cmd.Stdin = in diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index aea983cb15a..4a04e2b2fab 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -102,7 +102,7 @@ func NewManager(rootPath string) (*SecretsManager, error) { return nil, errors.Wrapf(errInvalidPath, "path must be absolute: %s", rootPath) } // the lockfile functions require that the rootPath dir is executable - if err := os.MkdirAll(rootPath, 0700); err != nil { + if err := os.MkdirAll(rootPath, 0o700); err != nil { return nil, err } @@ -237,7 +237,6 @@ func (s *SecretsManager) List() ([]Secret, error) { var ls []Secret for _, v := range secrets { ls = append(ls, v) - } return ls, nil } diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go index 0c4929995b2..4d2ca0fca43 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go +++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go @@ -177,7 +177,7 @@ func (s *SecretsManager) store(entry *Secret) error { if err != nil { return err } - err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0o600) if err != nil { return err } @@ -203,7 +203,7 @@ func (s *SecretsManager) delete(nameOrID string) error { if err != nil { return err } - err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0o600) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go index 22aacb1ce9e..8eac200f7d5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go @@ -3,12 +3,12 @@ package shelldriver import ( "bytes" "context" + "fmt" "os" "os/exec" "sort" "strings" - "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -27,22 +27,33 @@ var ( type driverConfig struct { // DeleteCommand contains a shell command that deletes a secret. // The secret id is provided as environment variable SECRET_ID - DeleteCommand string `mapstructure:"delete"` + DeleteCommand string // ListCommand contains a shell command that lists all secrets. // The output is expected to be one id per line - ListCommand string `mapstructure:"list"` + ListCommand string // LookupCommand contains a shell command that retrieves a secret. // The secret id is provided as environment variable SECRET_ID - LookupCommand string `mapstructure:"lookup"` + LookupCommand string // StoreCommand contains a shell command that stores a secret. // The secret id is provided as environment variable SECRET_ID - // The secret value itself is provied over stdin - StoreCommand string `mapstructure:"store"` + // The secret value itself is provided over stdin + StoreCommand string } func (cfg *driverConfig) ParseOpts(opts map[string]string) error { - if err := mapstructure.Decode(opts, cfg); err != nil { - return err + for key, value := range opts { + switch key { + case "delete": + cfg.DeleteCommand = value + case "list": + cfg.ListCommand = value + case "lookup": + cfg.LookupCommand = value + case "store": + cfg.StoreCommand = value + default: + return fmt.Errorf("invalid shell driver option: %q", key) + } } if cfg.DeleteCommand == "" || cfg.ListCommand == "" || diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go index 305b9d21f41..21e09c9fef0 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -1,5 +1,5 @@ -// +build linux -// +build !mips,!mipsle,!mips64,!mips64le +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go index 45c9d5af1e6..52b07aaf463 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -1,3 +1,4 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go index 9d1733c02d5..0e8685a7c56 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux // Signal handling for Linux only. diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6c9321e7357..4410292a773 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -149,14 +149,15 @@ func getMountsMap(path string) (string, string, error) { //nolint // MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem // mountLabel: MAC/SELinux label for container content -// containerWorkingDir: Private data for storing subscriptions on the host mounted in container. +// containerRunDir: Private data for storing subscriptions on the host mounted in container. // mountFile: Additional mount points required for the container. -// mountPoint: Container image mountpoint +// mountPoint: Container image mountpoint, or the directory from the hosts perspective that +// corresponds to `/` in the container. // uid: to assign to content created for subscriptions // gid: to assign to content created for subscriptions // rootless: indicates whether container is running in rootless mode // disableFips: indicates whether system should ignore fips mode -func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { +func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { var ( subscriptionMounts []rspec.Mount mountFiles []string @@ -174,7 +175,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str } for _, file := range mountFiles { if _, err := os.Stat(file); err == nil { - mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid) + mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid) if err != nil { logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err) } @@ -191,7 +192,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str _, err := os.Stat("/etc/system-fips") switch { case err == nil: - if err := addFIPSModeSubscription(&subscriptionMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil { + if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { logrus.Errorf("Adding FIPS mode subscription to container: %v", err) } case os.IsNotExist(err): @@ -210,7 +211,7 @@ func rchown(chowndir string, uid, gid int) error { // addSubscriptionsFromMountsFile copies the contents of host directory to container directory // and returns a list of mounts -func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) { +func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { var mounts []rspec.Mount defaultMountsPaths := getMounts(filePath) for _, path := range defaultMountsPaths { @@ -228,7 +229,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return nil, err } - ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile) + ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile) // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost _, err = os.Stat(ctrDirOrFileOnHost) @@ -261,7 +262,6 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st data, err := readFileOrDir("", hostDirOrFile, mode.Perm()) if err != nil { return nil, err - } for _, s := range data { if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), s.dirMode); err != nil { @@ -300,15 +300,19 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return mounts, nil } -// addFIPSModeSubscription creates /run/secrets/system-fips in the container -// root filesystem if /etc/system-fips exists on hosts. -// This enables the container to be FIPS compliant and run openssl in -// FIPS mode as the host is also in FIPS mode. -func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error { +// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode +// (i.e: be FIPs compliant). +// It should only be called if /etc/system-fips exists on host. +// It primarily does two things: +// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice. +// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates +// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well. +// It is done from within the container to ensure to avoid policy incompatibility between the container and host. +func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { subscriptionsDir := "/run/secrets" - ctrDirOnHost := filepath.Join(containerWorkingDir, subscriptionsDir) + ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { - if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint + if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint return err } if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { @@ -322,7 +326,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo if err != nil { return errors.Wrap(err, "creating system-fips file in container for FIPS mode") } - defer file.Close() + file.Close() } if !mountExists(*mounts, subscriptionsDir) { diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go index aeb1a3a8040..d9d8cfb3e54 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go index 2b664c7f8dd..0adf5835855 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go index 1d89dd55030..94160ad5790 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go index 1fc4e6d19d8..859791e369b 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go index e3c851fe617..c9e4184aa9f 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go @@ -1,4 +1,5 @@ -// +build windows, osx +//go:build (windows && ignore) || osx +// +build windows,ignore osx package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go index 7463cdd8f03..af1c77d6050 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package sysinfo @@ -45,7 +46,7 @@ func IsCPUSharesAvailable() bool { // New returns a new SysInfo, using the filesystem to detect which features // the kernel supports. -//NOTE Solaris: If we change the below capabilities be sure +// NOTE Solaris: If we change the below capabilities be sure // to update verifyPlatformContainerSettings() in daemon_solaris.go func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} @@ -63,7 +64,6 @@ func New(quiet bool) *SysInfo { // setCgroupMem reads the memory information for Solaris. func setCgroupMem(quiet bool) cgroupMemInfo { - return cgroupMemInfo{ MemoryLimit: true, SwapLimit: true, @@ -76,7 +76,6 @@ func setCgroupMem(quiet bool) cgroupMemInfo { // setCgroupCPU reads the cpu information for Solaris. func setCgroupCPU(quiet bool) cgroupCPUInfo { - return cgroupCPUInfo{ CPUShares: true, CPUCfsPeriod: false, @@ -88,7 +87,6 @@ func setCgroupCPU(quiet bool) cgroupCPUInfo { // blkio switches are not supported in Solaris. func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { - return cgroupBlkioInfo{ BlkioWeight: false, BlkioWeightDevice: false, @@ -97,7 +95,6 @@ func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { // setCgroupCPUsetInfo reads the cpuset information for Solaris. func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { - return cgroupCpusetInfo{ Cpuset: true, Cpus: getCPUCount(), diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go index 45f3ef1c65f..4aa9401f6c1 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go @@ -1,3 +1,4 @@ +//go:build !linux && !solaris && !windows // +build !linux,!solaris,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go index 4e6255bc592..455a8892f21 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go index ce2cb64f28b..3cbfe40980b 100644 --- a/vendor/github.com/containers/common/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -34,13 +34,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - if strings.Contains(value, ".") { // nolint:gocritic + switch { + case strings.Contains(value, "."): if parseInLocation { format = rFC3339NanoLocal } else { format = time.RFC3339Nano } - } else if strings.Contains(value, "T") { + case strings.Contains(value, "T"): // we want the number of colons in the T portion of the timestamp tcolons := strings.Count(value, ":") // if parseInLocation is off and we have a +/- zone offset (not Z) then @@ -68,9 +69,9 @@ func GetTimestamp(value string, reference time.Time) (string, error) { format = time.RFC3339 } } - } else if parseInLocation { + case parseInLocation: format = dateLocal - } else { + default: format = dateWithZone } @@ -112,7 +113,7 @@ func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) return parseTimestamp(value) } -func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go index bb589f7ac04..4f5527cb63b 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package umask @@ -9,8 +10,8 @@ import ( ) func Check() { - oldUmask := syscall.Umask(0022) //nolint - if (oldUmask & ^0022) != 0 { + oldUmask := syscall.Umask(0o022) //nolint + if (oldUmask & ^0o022) != 0 { logrus.Debugf("umask value too restrictive. Forcing it to 022") } } diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go index 9041d5f2098..cf76ea1d37f 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package umask diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go new file mode 100644 index 00000000000..98890a686f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -0,0 +1,24 @@ +package util + +import "regexp" + +// StringInSlice determines if a string is in a string slice, returns bool +func StringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go similarity index 75% rename from vendor/github.com/containers/common/pkg/config/util_supported.go rename to vendor/github.com/containers/common/pkg/util/util_supported.go index 33e4a9e8fc2..35201f93237 100644 --- a/vendor/github.com/containers/common/pkg/config/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -1,6 +1,7 @@ -// +build linux darwin +//go:build linux || darwin || freebsd +// +build linux darwin freebsd -package config +package util import ( "fmt" @@ -19,8 +20,14 @@ var ( rootlessRuntimeDir string ) -// getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error rootlessRuntimeDirOnce.Do(func() { @@ -39,21 +46,21 @@ func getRuntimeDir() (string, error) { uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) if runtimeDir == "" { tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir: %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } if runtimeDir == "" { tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } diff --git a/vendor/github.com/containers/common/pkg/config/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go similarity index 71% rename from vendor/github.com/containers/common/pkg/config/util_windows.go rename to vendor/github.com/containers/common/pkg/util/util_windows.go index 995301f5df0..1cffb21fc31 100644 --- a/vendor/github.com/containers/common/pkg/config/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -1,12 +1,13 @@ +//go:build windows // +build windows -package config +package util import ( "github.com/pkg/errors" ) // getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index b6ceabce531..61fce9d2298 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.46.1-dev" +const Version = "0.48.0" diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index e1649ba8e18..d28cc4a3ffd 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "reflect" "strings" @@ -15,8 +14,10 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -31,7 +32,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -63,8 +63,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource + dest private.ImageDestination + rawSource private.ImageSource reportWriter io.Writer progressOutput io.Writer progressInterval time.Duration @@ -80,13 +80,13 @@ type copier struct { // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool - ociEncryptLayers *[]int + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + ociEncryptLayers *[]int } const ( @@ -122,17 +122,23 @@ type ImageListSelection int // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination ReportWriter io.Writer SourceCtx *types.SystemContext DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. // The encryption options is derived from the construction of EncryptConfig object. // Note: During initial encryption process of a layer, the resultant digest is not known @@ -191,26 +197,28 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return nil, err } - reportWriter := ioutil.Discard + reportWriter := io.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter } - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) } + dest := imagedestination.FromPublic(publicDest) defer func() { if err := dest.Close(); err != nil { retErr = errors.Wrapf(retErr, " (dest: %v)", err) } }() - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) } + rawSource := imagesource.FromPublic(publicRawSource) defer func() { if err := rawSource.Close(); err != nil { retErr = errors.Wrapf(retErr, " (src: %v)", err) @@ -222,7 +230,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { - progressOutput = ioutil.Discard + progressOutput = io.Discard } c := &copier{ @@ -410,7 +418,36 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) } } - canModifyManifestList := (len(sigs) == 0) + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyOneImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } // Determine if we'll need to convert the manifest list to a different format. forceListMIMEType := options.ForceManifestMIMEType @@ -425,8 +462,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "determining manifest list type to write to destination") } if selectedListType != originalList.MIMEType() { - if !canModifyManifestList { - return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) } } @@ -510,8 +547,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // If we can't just use the original value, but we have to change it, flag an error. if !bytes.Equal(attemptedManifestList, originalManifestList) { - if !canModifyManifestList { - return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) } logrus.Debugf("Manifest list has been updated") } else { @@ -536,7 +573,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Sign the manifest list. if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy) + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, err } @@ -629,13 +666,27 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } } + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + ic := imageCopier{ c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - ociEncryptLayers: options.OciEncryptLayers, + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, } // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: @@ -643,7 +694,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -660,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - // If encrypted and decryption keys provided, we should try to decrypt - ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { @@ -710,10 +759,10 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -744,7 +793,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, "", "", err } @@ -813,9 +862,9 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) } ic.manifestUpdates.EmbeddedDockerReference = destRef return nil @@ -833,7 +882,7 @@ func isTTY(w io.Writer) bool { return false } -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) @@ -844,8 +893,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfosUpdated := false // If we only need to check authorization, no updates required. if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -975,8 +1024,8 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src if !ic.noPendingManifestUpdates() { - if !ic.canModifyManifest { - return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + if ic.cannotModifyManifestReason != "" { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -1011,96 +1060,12 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrapf(err, "writing manifest %q", string(man)) + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", errors.Wrapf(err, "writing manifest") } return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress. -// The caller must eventually call pool.Wait() after the pool will no longer be updated. -// NOTE: Every progress bar created within the progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. -func (c *copier) newProgressPool() *mpb.Progress { - return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) -} - -// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar -func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator { - producer := func(filler interface{}) decor.DecorFunc { - return func(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) - } - } - return decor.Any(producer(filler), wcc...) -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -// NOTE: Every progress bar created within a progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // onComplete will replace prefix once the bar/spinner has completed - onComplete = prefix + " " + onComplete - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft() - if info.Size > 0 { - if partial { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - customPartialBlobCounter(sstyle.Build()), - ), - ) - } else { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) - } - } else { - bar = pool.Add(0, - sstyle.Build(), - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() @@ -1111,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { } defer c.concurrentBlobCopiesSemaphore.Release(1) - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) - } - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool := c.newProgressPool() defer progressPool.Wait() bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) + } + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) + + bar.mark100PercentComplete() return destInfo, nil }() if err != nil { @@ -1171,48 +1137,37 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - // Diffs are needed if we are encrypting an image or trying to decrypt an image - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - var ( - blobInfo types.BlobInfo - reused bool - err error - ) - // Note: the storage destination optimizes the committing of - // layers which requires passing the index of the layer. - // Hence, we need to special case and cast. - dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, - SrcRef: srcRef, - EmptyLayer: emptyLayer, - } - options.LayerIndex = &layerIndex - reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) - } else { - reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - } - + reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + }) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) - bar.SetTotal(0, true) + bar.mark100PercentComplete() }() // Throw an event that the layer has been skipped @@ -1243,9 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) - imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) - if okSource && okDest && !diffIDIsNeeded { + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1253,33 +1206,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar.Abort(hideProgressBar) }() - progress := make(chan int64) - terminate := make(chan interface{}) - - defer close(terminate) - defer close(progress) - - proxy := imageSourceSeekableProxy{ - source: imgSource, - progress: progress, + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, } - go func() { - for { - select { - case written := <-progress: - bar.IncrInt64(written) - case <-terminate: - return - } - } - }() - - bar.SetTotal(srcInfo.Size, false) - info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, info @@ -1292,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") defer bar.Abort(false) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) if err != nil { return types.BlobInfo{}, "", err @@ -1317,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } diffID = diffIDResult.digest } } - bar.SetTotal(srcInfo.Size, true) + bar.mark100PercentComplete() return blobInfo, diffID, nil }() } @@ -1334,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1359,7 +1303,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1411,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { if isConfig { // This is guaranteed by the caller, but set it here to be explicit. canModifyBlob = false } @@ -1431,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } var destStream io.Reader = digestingReader + // === Update progress bars + destStream = bar.ProxyReader(destStream) + // === Decrypt the stream, if required. var decrypted bool if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { @@ -1465,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) } - // === Update progress bars - destStream = bar.ProxyReader(destStream) - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { @@ -1615,24 +1559,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - var uploadedInfo types.BlobInfo - // Note: the storage destination optimizes the committing of layers - // which requires passing the index of the layer. Hence, we need to - // special case and cast. - dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - } else { - uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + options := private.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "writing blob") } @@ -1664,7 +1599,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // sent there if we are not already at EOF. if getOriginalLayerCopyWriter != nil { logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) + _, err := io.Copy(io.Discard, originalLayerReader) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) } @@ -1677,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encrypted && !decrypted { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } } if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index b97edbf08b7..86ec8863aea 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -79,10 +79,10 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller + if ic.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000000..585d860570d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,148 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go similarity index 68% rename from vendor/github.com/containers/image/v5/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_channel.go index 42f490d326a..d5e9e09bda9 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -1,15 +1,13 @@ package copy import ( - "context" "io" "time" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/types" ) -// progressReader is a reader that reports its progress on an interval. +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. type progressReader struct { source io.Reader channel chan<- types.ProgressProperties @@ -79,26 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) { } return n, err } - -// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes -// are received. -type imageSourceSeekableProxy struct { - // source is the seekable input to read from. - source internalTypes.ImageSourceSeekable - // progress is the chan where the total number of bytes read so far are reported. - progress chan int64 -} - -// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received -// to the progress chan. -func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks) - if err == nil { - total := int64(0) - for _, c := range chunks { - total += int64(c.Length) - } - s.progress <- total - } - return rc, errs, err -} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 61612a4d3d7..aa42674bcdc 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -1,13 +1,14 @@ package copy import ( + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/pkg/errors" ) // createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "initializing GPG") @@ -17,13 +18,19 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } } c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) if err != nil { return nil, errors.Wrap(err, "creating signature") } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index ea20e7c5e41..3b135e68e50 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) } if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) + contents, err := os.ReadFile(d.ref.versionPath()) if err != nil { return nil, err } @@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag } } // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644) if err != nil { return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) } @@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { - return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) } // PutSignatures writes a set of signatures to the destination. @@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { return err } } @@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) { // returns true if directory is empty func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return false, err } @@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) { // deletes the contents of a directory func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index ad9129d4012..8b509112aa8 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "github.com/containers/image/v5/manifest" @@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error { // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) if err != nil { return nil, "", err } @@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { signatures := [][]byte{} for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest)) if err != nil { if os.IsNotExist(err) { break diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 3fe9a11d003..f537de72da3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,13 +1,11 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -463,7 +461,11 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) } @@ -500,7 +502,7 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat // makeRequest should generally be preferred. // In case of an HTTP 429 status code in the response, it may automatically retry a few times. // TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { delay := backoffInitialDelay attempts := 0 for { @@ -518,7 +520,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url if delay > backoffMaxDelay { delay = backoffMaxDelay } - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds()) + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds()) select { case <-ctx.Done(): return nil, ctx.Err() @@ -533,12 +535,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url, stream) +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, url.String(), stream) if err != nil { return nil, err } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") @@ -553,7 +555,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, return nil, err } } - logrus.Debugf("%s %s", method, url) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.client.Do(req) if err != nil { return nil, err @@ -650,10 +652,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("refresh_token", c.auth.IdentityToken) params.Add("client_id", "containers/image") - authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err @@ -705,13 +707,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, } authReq.Header.Add("User-Agent", c.userAgent) - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bear token"); err != nil { + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { return nil, err } tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) @@ -735,14 +737,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { c.client = &http.Client{Transport: tr} ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + if err != nil { + return err + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return err } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return httpResponseToError(resp, "") } @@ -762,14 +767,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + if err != nil { + return false + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return false } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 56c39c141d7..d02100cf80b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -182,7 +181,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // returns, so there isn’t a way for the error text to be provided to any of our callers. defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) if err != nil { logrus.Debugf("Error uploading layer chunked %v", err) return nil, err @@ -207,7 +206,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, locationQuery := uploadLocation.Query() locationQuery.Set("digest", blobDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } @@ -257,9 +256,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc "from": {reference.Path(srcRepo)}, }.Encode(), } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope) + logrus.Debugf("Trying to mount %s", u.Redacted()) + res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) if err != nil { return err } @@ -276,8 +274,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc if err != nil { return errors.Wrap(err, "determining upload URL after a mount attempt") } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) if err != nil { logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) } else { @@ -464,6 +462,18 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst } return err } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed” uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } return nil } @@ -581,16 +591,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) if err != nil { return err } - err = ioutil.WriteFile(url.Path, signature, 0644) + err = os.WriteFile(url.Path, signature, 0644) if err != nil { return err } return nil case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted()) } } @@ -608,9 +618,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error return false, err case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted()) } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 314e9b394ee..085a3afcc5c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -16,7 +15,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" @@ -147,6 +146,11 @@ func (s *dockerImageSource) Close() error { return nil } +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *dockerImageSource) SupportsGetBlobAt() bool { + return true +} + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() // to read the image's layers. @@ -248,13 +252,14 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } for _, u := range urls { - if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + url, err := url.Parse(u) + if err != nil || (url.Scheme != "http" && url.Scheme != "https") { continue // unsupported url. skip this url. } // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil) + resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err == nil { if resp.StatusCode != http.StatusOK { err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) @@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { } // splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { defer close(streams) defer close(errs) currentOffset := uint64(0) @@ -302,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, break } toSkip := c.Offset - currentOffset - if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { errs <- err break } @@ -310,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } s := signalCloseReader{ closed: make(chan interface{}), - stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } streams <- s @@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } // handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { defer close(streams) defer close(errs) if !strings.HasPrefix(mediaType, "multipart/") { @@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } } -// GetBlobAt returns a stream for the specified blob. +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. // The specified chunks must be not overlapping and sorted by their offset. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) var rangeVals []string @@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, return streams, errs, nil case http.StatusBadRequest: res.Body.Close() - return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status} + return nil, nil, private.BadPartialRequestError{Status: res.Status} default: err := httpResponseToError(res, "Error fetching partial blob") if err == nil { @@ -506,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) + sig, err := os.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil @@ -516,7 +524,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil case "http", "https": - logrus.Debugf("GET %s", url) + logrus.Debugf("GET %s", url.Redacted()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { return nil, false, err @@ -529,7 +537,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) } sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) if err != nil { @@ -538,7 +546,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted()) } } @@ -602,8 +610,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" @@ -621,11 +632,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - for i := 0; ; i++ { url := signatureStorageURL(c.signatureBase, manifestDigest, i) missing, err := c.deleteOneSignature(url) @@ -756,7 +762,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) { func (s signalCloseReader) Close() error { defer close(s.closed) if s.consumeStream { - if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + if _, err := io.Copy(io.Discard, s.stream); err != nil { s.stream.Close() return err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 6164ceb66ed..c77c002d15e 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -4,7 +4,6 @@ import ( "archive/tar" "encoding/json" "io" - "io/ioutil" "os" "path" @@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") if err != nil { return nil, errors.Wrap(err, "creating temporary file") } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go index b8d84d2452a..8e9be17c189 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path" "sync" @@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif uncompressedSize := h.Size if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) if err != nil { return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) } @@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B } if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 515e59327d3..d0a3f1be069 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -2,7 +2,6 @@ package docker import ( "fmt" - "io/ioutil" "net/url" "os" "path" @@ -82,7 +81,7 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, } else { // returns default directory if no sigstore specified in configuration file url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted()) } // NOTE: Keep this in sync with docs/signature-protocols.md! // FIXME? Restrict to explicitly supported schemes? @@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { continue } configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) + configBytes, err := os.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 00000000000..7b15871f7b6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 00000000000..82734a6cdc3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,69 @@ +package imagedestination + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ImageDestination: dest} +} + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + types.ImageDestination +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (w *wrapped) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name()) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go new file mode 100644 index 00000000000..fe1be8d9eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -0,0 +1,47 @@ +package imagesource + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(src) returns an object that provides the private.ImageSource API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageSource, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageSource. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(src types.ImageSource) private.ImageSource { + if src2, ok := src.(private.ImageSource); ok { + return src2 + } + return &wrapped{ImageSource: src} +} + +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + types.ImageSource +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (w *wrapped) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name()) +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go index 3fed1995cb8..49fa410e91f 100644 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -2,7 +2,6 @@ package iolimits import ( "io" - "io/ioutil" "github.com/pkg/errors" ) @@ -47,7 +46,7 @@ const ( func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { limitedReader := io.LimitReader(reader, int64(limit+1)) - res, err := ioutil.ReadAll(limitedReader) + res, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go deleted file mode 100644 index bf6cc87d421..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} - -// Describe returns a string describing the attributes of a specified key -func (k *Key) Describe() (string, error) { - keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) - if err != nil { - return "", err - } - return keyAttr, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go deleted file mode 100644 index 5eaad615c7c..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -package keyctl - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it -func ReadUserKeyring() ([]*Key, error) { - var ( - b []byte - err error - sizeRead int - ) - krSize := 4 - size := krSize - b = make([]byte, size) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - krSize = sizeRead - } - } - keyIDs := getKeyIDsFromByte(b[:krSize]) - return keyIDs, err -} - -func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { - idSize := 4 - var keys []*Key - for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { - tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) - keys = append(keys, &Key{id: keyID(tempID)}) - } - return keys -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go deleted file mode 100644 index 5f4d2157ae9..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go deleted file mode 100644 index f61666e42c2..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go new file mode 100644 index 00000000000..65788651fb5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -0,0 +1,106 @@ +package private + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + + // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. + SupportsGetBlobAt() bool + // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). + BlobChunkAccessor +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + + // SupportsPutBlobPartial returns true if PutBlobPartial is supported. + SupportsPutBlobPartial() bool + + // PutBlobWithOptions writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + + // PutBlobPartial attempts to create a blob using the data that is already present + // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. + // It is available only if SupportsPutBlobPartial(). + // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller + // should fall back to PutBlobWithOptions. + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) + + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + Cache types.BlobInfoCache // Cache to use and/or update. + // If true, it is allowed to use an equivalent of the desired blob; + // in that case the returned info may not match the input. + CanSubstitute bool + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// BlobChunkAccessor allows fetching discontiguous chunks of a blob. +type BlobChunkAccessor interface { + // GetBlobAt returns a sequential channel of readers that contain data for the requested + // blob chunks, and a channel that might get a single error value. + // The specified chunks must be not overlapping and sorted by their offset. + // The readers must be fully consumed, in the order they are returned, before blocking + // to read the next chunk. + GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 306220585b6..84bb656ac71 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -3,7 +3,6 @@ package streamdigest import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/image/v5/internal/putblobdigest" @@ -16,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go deleted file mode 100644 index 388f8cf3b49..00000000000 --- a/vendor/github.com/containers/image/v5/internal/types/types.go +++ /dev/null @@ -1,91 +0,0 @@ -package types - -import ( - "context" - "io" - - "github.com/containers/image/v5/docker/reference" - publicTypes "github.com/containers/image/v5/types" -) - -// ImageDestinationWithOptions is an internal extension to the ImageDestination -// interface. -type ImageDestinationWithOptions interface { - publicTypes.ImageDestination - - // PutBlobWithOptions is a wrapper around PutBlob. If - // options.LayerIndex is set, the blob will be committed directly. - // Either by the calling goroutine or by another goroutine already - // committing layers. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) - - // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If - // options.LayerIndex is set, the reused blob will be recoreded as - // already pulled. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Denotes whether the blob is a config or not. - IsConfig bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Use an equivalent of the desired blob. - CanSubstitute bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int - // The reference of the image that contains the target blob. - SrcRef reference.Named -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - Offset uint64 - Length uint64 -} - -// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceSeekable interface { - // GetBlobAt returns a stream for the specified blob. - // The specified chunks must be not overlapping and sorted by their offset. - GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. -// This API is experimental and can be changed without bumping the major version number. -type ImageDestinationPartial interface { - // PutBlobPartial writes contents of stream and returns data representing the result. - PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error) -} - -// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 511cdcc37e5..20955ab7ff4 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -51,7 +51,7 @@ const ( // other than the ones the caller specifically allows. // expectedMIMEType is used only for diagnostics. // NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous // data that just isn’t what was expected, as opposed to actually ambiguous data. func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, allowed allowedManifestFields) error { @@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, Manifests interface{} `json:"manifests"` }{} if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this shold not happen; + // The caller was supposed to already validate version numbers, so this should not happen; // let’s not bother with making this error “nice”. return err } diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 4b644f2539a..2e3e5da15ea 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -110,7 +110,8 @@ func GuessMIMEType(manifest []byte) string { } switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. return meta.MediaType } // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. @@ -121,9 +122,9 @@ func GuessMIMEType(manifest []byte) string { } return DockerV2Schema1MediaType case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. ociMan := struct { Config struct { MediaType string `json:"mediaType"` diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index c4616b96500..5892184df19 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -66,6 +66,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript return &OCI1{ imgspecv1.Manifest{ Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, Config: config, Layers: layers, }, diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 5bec43ff998..c4f11e09c56 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -119,6 +119,7 @@ func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[ index := OCI1Index{ imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: make([]imgspecv1.Descriptor, len(components)), Annotations: dupStringStringMap(annotations), }, @@ -195,6 +196,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { index := OCI1Index{ Index: imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: []imgspecv1.Descriptor{}, Annotations: make(map[string]string), }, diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 54d325d34df..4fa9127659b 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -3,7 +3,6 @@ package archive import ( "context" "fmt" - "io/ioutil" "os" "strings" @@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index d0ee7263527..77e8fd87637 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -112,7 +111,7 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false + return true } // PutBlob writes contents of stream and returns data representing the result. @@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc if err := ensureParentDirectoryExists(blobPath); err != nil { return err } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + if err := os.WriteFile(blobPath, m, 0644); err != nil { return err } @@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][] // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) if err != nil { return err } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) } func ensureDirectoryExists(path string) error { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 9d8ab689ba4..8973f461c9a 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -3,7 +3,6 @@ package layout import ( "context" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return nil, "", err } - m, err := ioutil.ReadFile(manifestPath) + m, err := os.ReadFile(manifestPath) if err != nil { return nil, "", err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 4ffbced6bd6..a6473ae68f4 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // LoadFromFile takes a filename and deserializes the contents into Config object func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c7c6cf6945a..67612d800c2 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -95,7 +95,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re req.Header.Set("Content-Type", "application/json") } - logrus.Debugf("%s %s", method, url.String()) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 3eb2a2cba22..011118fa52b 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err } @@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) + entries, err := os.ReadDir(dir) if err != nil { return err } - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if err := os.Remove(fullpath); err != nil { return err } continue } + info, err := entry.Info() + if err != nil { + return err + } if selinuxHnd != nil { relPath, err := filepath.Rel(root, fullpath) if err != nil { @@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user } } - if info.IsDir() { + if entry.IsDir() { if usermode { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { return err @@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user if err != nil { return err } - } else if usermode && (info.Mode().IsRegular()) { + } else if usermode && (entry.Type().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } @@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [ } d.digest = digest - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) + return os.WriteFile(manifestPath, manifestBlob, 0644) } // PutSignatures writes signatures to the destination. @@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [ for i, sig := range signatures { signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + if err := os.WriteFile(signaturePath, sig, 0644); err != nil { return err } } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index d30c764a630..1e1f2be03cd 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -9,7 +9,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "strconv" "strings" "unsafe" @@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d } defer sigReader.Close() - sig, err := ioutil.ReadAll(sigReader) + sig, err := os.ReadAll(sigReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go new file mode 100644 index 00000000000..8b22733ac3a --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go @@ -0,0 +1,538 @@ +package blobcache + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + _ types.ImageReference = &BlobCache{} + _ types.ImageSource = &blobCacheSource{} + _ types.ImageDestination = &blobCacheDestination{} +) + +const ( + compressedNote = ".compressed" + decompressedNote = ".decompressed" +) + +// BlobCache is an object which saves copies of blobs that are written to it while passing them +// through to some real destination, and which can be queried directly in order to read them +// back. +// +// Implements types.ImageReference. +type BlobCache struct { + reference types.ImageReference + // WARNING: The contents of this directory may be accessed concurrently, + // both within this process and by multiple different processes + directory string + compress types.LayerCompression +} + +type blobCacheSource struct { + reference *BlobCache + source types.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +type blobCacheDestination struct { + reference *BlobCache + destination types.ImageDestination +} + +func makeFilename(blobSum digest.Digest, isConfig bool) string { + if isConfig { + return blobSum.String() + ".config" + } + return blobSum.String() +} + +// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are +// written to the destination image created from the resulting reference will also be stored +// as-is to the specified directory or a temporary directory. +// The compress argument controls whether or not the cache will try to substitute a compressed +// or different version of a blob when preparing the list of layers when reading an image. +func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) { + if directory == "" { + return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + } + switch compress { + case types.Compress, types.Decompress, types.PreserveOriginal: + // valid value, accept it + default: + return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + } + return &BlobCache{ + reference: ref, + directory: directory, + compress: compress, + }, nil +} + +func (b *BlobCache) Transport() types.ImageTransport { + return b.reference.Transport() +} + +func (b *BlobCache) StringWithinTransport() string { + return b.reference.StringWithinTransport() +} + +func (b *BlobCache) DockerReference() reference.Named { + return b.reference.DockerReference() +} + +func (b *BlobCache) PolicyConfigurationIdentity() string { + return b.reference.PolicyConfigurationIdentity() +} + +func (b *BlobCache) PolicyConfigurationNamespaces() []string { + return b.reference.PolicyConfigurationNamespaces() +} + +func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return b.reference.DeleteImage(ctx, sys) +} + +func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, nil + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(b.directory, makeFilename(blobinfo.Digest, isConfig)) + fileInfo, err := os.Stat(filename) + if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { + return true, fileInfo.Size(), nil + } + if !os.IsNotExist(err) { + return false, -1, errors.Wrap(err, "checking size") + } + } + + return false, -1, nil +} + +func (b *BlobCache) Directory() string { + return b.directory +} + +func (b *BlobCache) ClearCache() error { + f, err := os.Open(b.directory) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + return errors.Wrapf(err, "error reading directory %q", b.directory) + } + for _, name := range names { + pathname := filepath.Join(b.directory, name) + if err = os.RemoveAll(pathname); err != nil { + return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(b)) + } + } + return nil +} + +func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := b.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference)) + } + return image.FromSource(ctx, sys, src) +} + +func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := b.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress) + return &blobCacheSource{reference: b, source: src, sys: *sys}, nil +} + +func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := b.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory) + return &blobCacheDestination{reference: b, destination: dest}, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) + manifestBytes, err := os.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", errors.Wrap(err, "checking for manifest file") + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + present, size, err := s.reference.HasBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if present { + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, errors.Wrap(err, "checking for cache") + } + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) + } + return rc, size, nil +} + +func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.source.GetSignatures(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) + } + canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) + + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + if infos == nil { + img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + infos = img.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = os.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = os.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) + fileInfo, err := os.Stat(alternate) + if err == nil { + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + info.CompressionAlgorithm = nil + } + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + // nope, not going to suggest anything, it's not allowed by the spec + replacedInfos = append(replacedInfos, info) + continue + } + } + logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) + info.CompressionOperation = s.reference.compress + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + logrus.Debugf("info = %#v", info) + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err := io.Copy(io.Discard, decompressReader); err != nil { + logrus.Debugf("error draining the pipe: %v", err) + } + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (d *blobCacheDestination) HasThreadSafePutBlob() bool { + return d.destination.HasThreadSafePutBlob() +} + +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + var closer io.Closer + wg := new(sync.WaitGroup) + needToWait := false + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) + } + if !isConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression == archive.Gzip { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) + decompressedTemp.Close() + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + closer = decompressWriter + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + needToWait = true + go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + if closer != nil { + closer.Close() + } + if needToWait { + wg.Wait() + } + if err != nil { + return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) + if err != nil || present { + return present, reusedInfo, err + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) +} + +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) +} + +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index c28e8179296..34c90dd77e4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -5,7 +5,6 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/types" @@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil + return io.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. @@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. @@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { return nil, false, errors.Wrapf(err, "initializing decompression") } } else { - res = ioutil.NopCloser(stream) + res = io.NopCloser(stream) } return res, decompressor != nil, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 63f5bd53e1b..d0bdd08e9a7 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/hashicorp/go-multierror" @@ -54,8 +54,8 @@ var ( // SetCredentials stores the username and password in a location // appropriate for sys and the users’ configuration. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. // Returns a human-redable description of the location that was updated. // NOTE: The return value is only intended to be read by humans; its form is not an API, // it may change (or new forms can be added) any time. @@ -128,11 +128,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allRegistries := make(map[string]bool) - addRegistry := func(s string) { - allRegistries[s] = true + allKeys := make(map[string]bool) + addKey := func(s string) { + allKeys[s] = true } + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + helpers, err := sysregistriesv2.CredentialHelpers(sys) if err != nil { return nil, err @@ -151,10 +155,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addRegistry(registry) + addKey(registry) } - for registry := range auths.AuthConfigs { - addRegistry(registry) + for key := range auths.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + addKey(key) } } // External helpers. @@ -166,7 +174,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon switch errors.Cause(err) { case nil: for registry := range creds { - addRegistry(registry) + addKey(registry) } case exec.ErrNotFound: // It's okay if the helper doesn't exist. @@ -179,17 +187,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for registry := range allRegistries { - authConf, err := GetCredentials(sys, registry) + for key := range allKeys { + authConf, err := GetCredentials(sys, key) if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - // Ignore if the credentials could not be found (anymore). - continue - } // Note: we rely on the logging in `GetCredentials`. return nil, err } - authConfigs[registry] = authConf + if authConf != (types.DockerAuthConfig{}) { + authConfigs[key] = authConf + } } return authConfigs, nil @@ -230,16 +236,14 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { return paths } -// GetCredentials returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json -// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. // If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // -// GetCredentialsForRef should almost always be used in favor of this API to -// allow different credentials for different repositories on the same registry. -func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get()) +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) } // GetCredentialsForRef returns the registry credentials necessary for @@ -247,35 +251,39 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // appropriate for sys and the users’ configuration. // If an entry is not found, an empty struct is returned. func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get()) + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) } // getCredentialsWithHomeDir is an internal implementation detail of // GetCredentialsForRef and GetCredentials. It exists only to allow testing it // with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) { - // consistency check of the ref and registry arguments - if ref != nil && reference.Domain(ref) != registry { - return types.DockerAuthConfig{}, errors.Errorf( - "internal error: provided reference domain %q name does not match registry %q", - reference.Domain(ref), registry, - ) +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err } if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry) + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) return *sys.DockerAuthConfig, nil } + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat) + authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) if err != nil { return types.DockerAuthConfig{}, "", err } - if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + if authConfig != (types.DockerAuthConfig{}) { return authConfig, path.path, nil } } @@ -291,57 +299,61 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re for _, helper := range helpers { var ( creds types.DockerAuthConfig + helperKey string credHelperPath string err error ) switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: + helperKey = key creds, credHelperPath, err = getCredentialsFromAuthFiles() // External helpers. default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry creds, err = getAuthFromCredHelper(helper, registry) } if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err) + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) multiErr = multierror.Append(multiErr, err) continue } - if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { - continue - } - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil } - logrus.Debug(msg) - return creds, nil } if multiErr != nil { return types.DockerAuthConfig{}, multiErr } - logrus.Debugf("No credentials for %s found", registry) + logrus.Debugf("No credentials for %s found", key) return types.DockerAuthConfig{}, nil } -// GetAuthentication returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json file or -// .docker/config.json +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // // Deprecated: This API only has support for username and password. To get the // support for oauth2 in container registry authentication, we added the new -// GetCredentials API. The new API should be used and this API is kept to +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) } // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir) +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } @@ -353,8 +365,8 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st // RemoveAuthentication removes credentials for `key` from all possible // sources such as credential helpers and auth files. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. func RemoveAuthentication(sys *types.SystemContext, key string) error { isNamespaced, err := validateKey(key) if err != nil { @@ -531,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -593,7 +605,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", errors.Wrapf(err, "marshaling JSON %q", path) } - if err = ioutil.WriteFile(path, newData, 0600); err != nil { + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { return "", errors.Wrapf(err, "writing to file %q", path) } } @@ -606,6 +618,10 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } return types.DockerAuthConfig{}, err } @@ -639,26 +655,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { return helperclient.Erase(p, registry) } -// findAuthentication looks for auth of registry in path. If ref is -// not nil, then it will be taken into account when looking up the -// authentication credentials. -func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) } // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. if ch, exists := auths.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) return getAuthFromCredHelper(ch, registry) } - // Support for different paths in auth. + // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) var keys []string - if !legacyFormat && ref != nil { - keys = authKeysForRef(ref) + if !legacyFormat { + keys = authKeysForKey(key) } else { keys = []string{registry} } @@ -686,26 +704,28 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat } } + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path) return types.DockerAuthConfig{}, nil } -// authKeysForRef returns the valid paths for a provided reference. For example, -// when given a reference "quay.io/repo/ns/image:tag", then it would return +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForRef(ref reference.Named) (res []string) { - name := ref.Name() - +func authKeysForKey(key string) (res []string) { for { - res = append(res, name) + res = append(res, key) - lastSlash := strings.LastIndex(name, "/") + lastSlash := strings.LastIndex(key, "/") if lastSlash == -1 { break } - name = name[:lastSlash] + key = key[:lastSlash] } return res @@ -759,11 +779,24 @@ func normalizeRegistry(registry string) string { // validateKey verifies that the input key does not have a prefix that is not // allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (isNamespaced bool, err error) { +func validateKey(key string) (bool, error) { if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key) + return false, errors.Errorf("key %s contains http[s]:// prefix", key) } + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } // check if the provided key contains one or more subpaths. - return strings.ContainsRune(key, '/'), nil + return isNamespaced, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go deleted file mode 100644 index 0bf16125919..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,119 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/v5/internal/pkg/keyctl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// NOTE: none of the functions here are currently used. If we ever want to -// re-enable keyring support, we should introduce a similar built-in credential -// helpers as for `sysregistriesv2.AuthenticationFileHelper`. - -const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - keys, err := keyctl.ReadUserKeyring() - if err != nil { - return err - } - - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return err - } - - for _, k := range keys { - keyAttr, err := k.Describe() - if err != nil { - return err - } - // split string "type;uid;gid;perm;description" - keyAttrs := strings.SplitN(keyAttr, ";", 5) - if len(keyAttrs) < 5 { - return errors.Errorf("Key attributes of %d are not available", k.ID()) - } - keyDescribe := keyAttrs[4] - if strings.HasPrefix(keyDescribe, keyDescribePrefix) { - err := keyctl.Unlink(userkeyring, k) - if err != nil { - return errors.Wrapf(err, "unlinking key %d", k.ID()) - } - logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) - } - } - return nil -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { //nolint:deadcode,unused - return fmt.Sprintf("%s%s", keyDescribePrefix, registry) -} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go deleted file mode 100644 index d9827d8edbc..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !linux && (!386 || !amd64) -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index fb0a15b993e..46c10ff631a 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -118,6 +118,7 @@ type Resolved struct { } func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) } @@ -138,6 +139,8 @@ const ( rationaleUSR // Resolved value has been selected by the user (via the prompt). rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub ) // Description returns a human-readable description about the resolution @@ -152,6 +155,8 @@ func (r *Resolved) Description() string { return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -265,8 +270,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } if !isShort { // no short name - named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" return resolved, nil } @@ -295,9 +312,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - resolved.addCandidate(namedAlias) resolved.rationale = rationaleAlias resolved.originDescription = aliasOriginDescription @@ -325,9 +339,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { if err != nil { return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - resolved.addCandidate(named) } @@ -412,6 +423,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e var candidates []reference.Named + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + // Strip off the tag to normalize the short name for looking it up in // the config files. isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) @@ -434,9 +462,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed candidates = append(candidates, namedAlias) } @@ -447,16 +473,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e } // Note that "localhost" has precedence over the unqualified-search registries. - for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) { - named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) - if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) - } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - - candidates = append(candidates, named) - } - - return candidates, nil + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7122e869fe6..6909ea0a60f 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -13,6 +13,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -315,11 +316,14 @@ func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAlia func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { conf := shortNameAliasConf{} - _, err := toml.DecodeFile(confPath, &conf) + meta, err := toml.DecodeFile(confPath, &conf) if err != nil && !os.IsNotExist(err) { // It's okay if the config doesn't exist. Other errors are not. return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) + } // Even if we don’t always need the cache, doing so validates the machine-generated config. The // file could still be corrupted by another process or user. diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index c8a603c4ef0..c1753c8457a 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "io/fs" "os" "path/filepath" "reflect" @@ -43,6 +44,16 @@ const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" // helper. const AuthenticationFileHelper = "containers-auth.json" +const ( + // configuration values for "pull-from-mirror" + // mirrors will be used for both digest pulls and tag pulls + MirrorAll = "all" + // mirrors will only be used for digest pulls + MirrorByDigestOnly = "digest-only" + // mirrors will only be used for tag pulls + MirrorByTagOnly = "tag-only" +) + // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. Can be empty iff Prefix contains @@ -53,6 +64,18 @@ type Endpoint struct { // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. Insecure bool `toml:"insecure,omitempty"` + // PullFromMirror is used for adding restrictions to image pull through the mirror. + // Set to "all", "digest-only", or "tag-only". + // If "digest-only", mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror + // that it is less likely to be out of sync if tags move, it should not be unnecessarily + // used for digest references. + // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. + // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. + // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. + PullFromMirror string `toml:"pull-from-mirror,omitempty"` } // userRegistriesFile is the path to the per user registry configuration file. @@ -115,7 +138,7 @@ type Registry struct { Blocked bool `toml:"blocked,omitempty"` // If true, mirrors will only be used for digest pulls. Pulling images by // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // we pull from. Restricting mirrors to pulls by digest avoids that issue. MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` } @@ -130,17 +153,29 @@ type PullSource struct { // reference. func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { var endpoints []Endpoint - + _, isDigested := ref.(reference.Canonical) if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} + // Only use mirrors when the reference is a digested one. + if isDigested { + endpoints = append(endpoints, r.Mirrors...) } } else { - endpoints = append(r.Mirrors, r.Endpoint) + for _, mirror := range r.Mirrors { + // skip the mirror if per mirror setting exists but reference does not match the restriction + switch mirror.PullFromMirror { + case MirrorByDigestOnly: + if !isDigested { + continue + } + case MirrorByTagOnly: + if isDigested { + continue + } + } + endpoints = append(endpoints, mirror) + } } + endpoints = append(endpoints, r.Endpoint) sources := []PullSource{} for _, ep := range endpoints { @@ -374,6 +409,10 @@ func (config *V2RegistriesConf) postProcessRegistries() error { } } + // validate the mirror usage settings does not apply to primary registry + if reg.PullFromMirror != "" { + return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) + } // make sure mirrors are valid for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) @@ -387,6 +426,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error { if mir.Location == "" { return &InvalidRegistries{s: "invalid condition: mirror location is unset"} } + + if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { + return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} + } + if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && + mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { + return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} + } } if reg.Location == "" { regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) @@ -597,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { dirPaths = append(dirPaths, wrapper.userConfigDirPath) } for _, dirPath := range dirPaths { - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: + case d == nil: // this should only happen when err != nil but let's be sure return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -877,10 +924,13 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. var combinedTOML tomlConfig - _, err := toml.DecodeFile(path, &combinedTOML) + meta, err := toml.DecodeFile(path, &combinedTOML) if err != nil { return nil, err } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, path) + } if combinedTOML.V1RegistriesConf.Nonempty() { // Enforce the v2 format if requested. diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 7e2142b1f58..c766417d0ec 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,7 +2,6 @@ package tlsclientconfig import ( "crypto/tls" - "io/ioutil" "net" "net/http" "os" @@ -19,7 +18,7 @@ import ( // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) + fs, err := os.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil @@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) + data, err := os.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? @@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { return nil } -func hasFile(files []os.FileInfo, name string) bool { +func hasFile(files []os.DirEntry, name string) bool { for _, f := range files { if f.Name() == name { return true diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..70758ad4399 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,210 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ccf1259660e --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,216 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index 07fdd42a9af..8e9ce0dd236 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -3,22 +3,46 @@ package signature import ( + "errors" "fmt" + "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/opencontainers/go-digest" ) +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + // SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { manifestDigest, err := manifest.Digest(m) if err != nil { return nil, err } sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) } // VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 2c08c231ea8..249b5a1fe46 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -6,16 +6,19 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "strings" - "golang.org/x/crypto/openpgp" + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // SigningMechanism abstracts a way to sign binary blobs and verify their signatures. // Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) type SigningMechanism interface { // Close removes resources associated with the mechanism, if any. Close() error @@ -34,6 +37,15 @@ type SigningMechanism interface { UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) } +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + // SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. type SigningNotSupportedError string @@ -70,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents if !md.IsSigned { return nil, "", errors.New("The input is not a signature") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: An error during reading the body can happen only if // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index 6ae74d430d6..4c7968417ed 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -5,11 +5,11 @@ package signature import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" - "github.com/mtrmac/gpgme" + "github.com/proglottis/gpgme" ) // A GPG/OpenPGP signing mechanism, implemented using gpgme. @@ -20,7 +20,7 @@ type gpgmeSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { ctx, err := newGPGMEContext(optionalDir) if err != nil { return nil, err @@ -35,8 +35,8 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err } @@ -117,9 +117,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error { return nil } -// Sign creates a (non-detached) signature of input using keyIdentity. +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { key, err := m.ctx.GetKey(keyIdentity, true) if err != nil { return nil, err @@ -133,12 +133,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, if err != nil { return nil, err } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { return nil, err } return sigBuffer.Bytes(), nil } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { signedBuffer := bytes.Buffer{} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 0a09788f989..63cb7788bbb 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -7,14 +7,21 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "strings" "time" "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. @@ -24,7 +31,7 @@ type openpgpSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -37,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er } } - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -55,7 +62,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -104,10 +111,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error { // Sign creates a (non-detached) signature of input using keyIdentity. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) @@ -117,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if !md.IsSigned { return nil, "", errors.New("not signed") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // (and possibly also signed, but it _must_ be encrypted) and the signing diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 82fbb68cb14..bb91cae8c16 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -16,7 +16,6 @@ package signature import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri // NewPolicyFromFile returns a policy configured in the specified file. func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) + contents, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 26cca4759e0..65e8259732b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -5,7 +5,7 @@ package signature import ( "context" "fmt" - "io/ioutil" + "os" "strings" "github.com/containers/image/v5/manifest" @@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types if pr.KeyData != nil { data = pr.KeyData } else { - d, err := ioutil.ReadFile(pr.KeyPath) + d, err := os.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go index 09f4f85e0a8..05bf8229e7d 100644 --- a/vendor/github.com/containers/image/v5/signature/signature.go +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { // of the system just because it is a private key — actually the presence of a private key // on the system increases the likelihood of an a successful attack on that private key // on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { json, err := json.Marshal(s) if err != nil { return nil, err } + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + return mech.Sign(json, keyIdentity) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 7329ef6eee0..8071e3b32fa 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -10,7 +10,6 @@ import ( stderrors "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -18,9 +17,9 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" @@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool { // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } // NOTE: the blob is first written to a temporary file and subsequently @@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } defer rc.Close() - tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") if err != nil { return nil, 0, err } @@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC } r := bytes.NewReader(b) logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil + return io.NopCloser(r), int64(r.Len()), "", nil } // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could @@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest * // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") if err != nil { return nil, errors.Wrapf(err, "creating a temporary directory") } @@ -446,15 +445,20 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } -// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is -// set, the blob will be committed directly. Either by the calling goroutine -// or by another goroutine already committing layers. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with non "WithOptions" functions is not -// supported. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig) +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) if err != nil { return info, err } @@ -466,11 +470,6 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) } -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -480,6 +479,15 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ + Cache: cache, + IsConfig: isConfig, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ @@ -534,7 +542,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, @@ -542,80 +550,40 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } -// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If -// options.LayerIndex is set, the reused blob will be recoreded as already -// pulled. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with the non "WithOptions" functions -// is not supported. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef) - if err != nil || !reused || options.LayerIndex == nil { - return reused, info, err - } - - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob. -// If ref is provided, this function first tries to get layer from Additional Layer Store. -func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.lock.Lock() - defer s.lock.Unlock() - - if ref != nil { - // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String()) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) - } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, - }, nil - } - } - - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} - type zstdFetcher struct { - stream internalTypes.ImageSourceSeekable - ctx context.Context - blobInfo types.BlobInfo + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo } -// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt. +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []internalTypes.ImageSourceChunk + var newChunks []private.ImageSourceChunk for _, v := range chunks { - i := internalTypes.ImageSourceChunk{ + i := private.ImageSourceChunk{ Offset: v.Offset, Length: v.Length, } newChunks = append(newChunks, i) } - rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks) - if _, ok := err.(internalTypes.BadPartialRequestError); ok { + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { err = chunked.ErrBadRequest{} } return rc, errs, err } -// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed -// in a non-sequential way to retrieve the missing chunks. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { fetcher := zstdFetcher{ - stream: stream, - ctx: ctx, - blobInfo: srcInfo, + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, } differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) @@ -640,6 +608,22 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int return srcInfo, nil } +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -650,16 +634,36 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ + Cache: cache, + CanSubstitute: canSubstitute, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } -// tryReusingBlobLocked implements a core functionality of TryReusingBlob. -// This must be called with a lock being held on storageImageDestination. -func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } @@ -708,9 +712,9 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) @@ -720,8 +724,8 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, blobinfo, nil } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest return true, types.BlobInfo{ @@ -786,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er } // Assume it's a file, since we're only calling this from a place that expects to read files. if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) + contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) } @@ -1131,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t delete(dataBlobs, layerBlob.Digest) } for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.filenames[blob]) if err != nil { return errors.Wrapf(err, "copying non-layer blob %q to image", blob) } @@ -1192,21 +1196,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } logrus.Debugf("saved image metadata %q", string(metadata)) } - // Set the reference's name on the image. We don't need to worry about avoiding duplicate - // values because SetNames() will deduplicate the list that we pass to it. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return errors.Wrapf(err, "adding names %v to image %q", name, img.ID) } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) + logrus.Debugf("added name %q to image %q", name, img.ID) } commitSucceeded = true @@ -1261,6 +1257,11 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { return true // Yes, we want the unmodified manifest } +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (s *storageImageDestination) SupportsPutBlobPartial() bool { + return true +} + // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { sizes := []int{} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 694ad17bd1a..aedfdf5de65 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) + n, err := io.Copy(io.Discard, reader) if err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } @@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool { func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. for i := range is.blobIDs { if blobinfo.Digest == is.blobIDs[i] { // We want to read that layer: open the file or memory block and hand it back. if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil } reader, err := os.Open(is.filenames[i]) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index d407c657fae..63d835530b5 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -3,7 +3,7 @@ package tarball import ( "errors" "fmt" - "io/ioutil" + "io" "os" "strings" @@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc filenames := strings.Split(reference, separator) for _, filename := range filenames { if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) + stdin, err = io.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf("error buffering stdin: %v", err) } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 2110a091d28..0bae8b2599d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -12,7 +12,9 @@ import ( _ "github.com/containers/image/v5/oci/archive" _ "github.com/containers/image/v5/oci/layout" _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" + // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go "github.com/containers/image/v5/transports" diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index c98a6c6fda0..dcff8caf76b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -561,6 +561,11 @@ type SystemContext struct { UserShortNameAliasConfPath string // If set, short-name resolution in pkg/shortnames must follow the specified mode ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool // If not "", overrides the default path for the authentication file, but only new format files AuthFilePath string // if not "", overrides the default path for the authentication file, but with the legacy format; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index ffb2a4ce259..6295b549305 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 17 + VersionMinor = 21 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..83b061c70b2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 00000000000..fab2c41e89b --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index d080d790c1a..fd3d310548d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,17 +17,17 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" UBUNTU_NAME: "ubuntu-2104" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - _BUILT_IMAGE_SUFFIX: "c6431352024203264" - FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" + IMAGE_SUFFIX: "c4512539143831552" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -117,7 +117,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -132,7 +132,7 @@ lint_task: meta_task: container: - image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}" + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" cpu: 1 memory: 1 @@ -154,7 +154,7 @@ meta_task: vendor_task: container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -172,6 +172,6 @@ success_task: - meta - vendor container: - image: golang:1.15 + image: golang:1.16 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index dbc1f7c9987..2c1e4a18566 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -51,6 +51,9 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*. containers-storage: $(sources) ## build using gc on the host $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host @@ -66,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd done cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ docs: install.tools ## build the docs on the host $(MAKE) -C docs docs gccgo: ## build using gccgo using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ test: local-binary ## build the binaries and run the tests using VMs - $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) test-unit: local-binary ## run the unit tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash test-integration: local-binary ## run the integration tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-validate: ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ install.tools: - make -C tests/tools + $(MAKE) -C tests/tools $(FFJSON): - make -C tests/tools + $(MAKE) -C tests/tools install.docs: docs - make -C docs install + $(MAKE) -C docs install install: install.docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index bf50e910e62..148dabb457e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.37.0 +1.40.2 diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile deleted file mode 100644 index c82c1f81bd3..00000000000 --- a/vendor/github.com/containers/storage/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# The fedora/28-cloud-base and debian/jessie64 boxes are also available for -# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to -# "virtualbox" to use them instead. -# -Vagrant.configure("2") do |config| - config.vm.define "fedora" do |c| - c.vm.box = "fedora/28-cloud-base" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end - config.vm.define "debian" do |c| - c.vm.box = "debian/jessie64" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end -end diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index b4f773f2b73..a8b20f03a01 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -84,8 +84,17 @@ type ContainerStore interface { // SetNames updates the list of names associated with the container // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -324,6 +333,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } if err == nil { container = &Container{ ID: id, @@ -371,22 +386,40 @@ func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) } - container.Names = names - return r.Save() + r.byname[name] = container } - return ErrContainerUnknown + container.Names = names + return r.Save() } func (r *containerStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a566fbffa0f..e66613c098a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -26,6 +27,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -649,11 +651,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) { // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { if err != nil { return err } - if !info.IsDir() { + if !d.IsDir() { return nil } dirs = append(dirs, path) diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 3903b1dddd9..339aa0d3809 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package btrfs @@ -16,6 +17,7 @@ import "C" import ( "fmt" + "io/fs" "io/ioutil" "math" "os" @@ -256,7 +258,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration @@ -267,20 +269,20 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { } // we want to check children only so skip itself // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { + if d.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } } return nil } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 63bfd2d136f..2db6764c91b 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -50,11 +50,14 @@ func chownByMapsMain() { if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { toHost = nil } + + chowner := newLChowner() + chown := func(path string, info os.FileInfo, _ error) error { if path == "." { return nil } - return platformLChown(path, info, toHost, toContainer) + return chowner.LChown(path, info, toHost, toContainer) } if err := pwalk.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 0387adfc123..c598b936d64 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package graphdriver @@ -6,17 +7,50 @@ import ( "errors" "fmt" "os" + "sync" "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { st, ok := info.Sys().(*syscall.Stat_t) if !ok { return nil } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + // Map an on-disk UID/GID pair from host to container // using the first map, then back to the host using the // second map. Skip that first step if they're 0, to @@ -42,7 +76,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. UID: uid, GID: gid, } - mappedPair, err := toHost.ToHost(pair) + mappedPair, err := toHost.ToHostOverflow(pair) if err != nil { return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) } @@ -50,7 +84,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %v", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 31bd5bb52dd..1845a4e086c 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package graphdriver @@ -9,6 +10,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { return &os.PathError{"lchown", path, syscall.EWINDOWS} } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index c5168bfdd25..e604b7e3186 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper @@ -6,6 +7,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -419,40 +421,35 @@ func (devices *DeviceSet) constructDeviceIDMap() { } } -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { + if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if strings.HasPrefix(finfo.Name(), ".") { + if strings.HasPrefix(name, ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == deviceSetMetaFile { + if name == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == transactionMetaFile { + if name == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) - hash := finfo.Name() - if hash == base { - hash = "" - } - // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", name, err) } return nil @@ -462,21 +459,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, info os.FileInfo, err error) error { + var scan = func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories - if info.IsDir() { + if d.IsDir() { return nil } - return devices.deviceFileWalkFunction(path, info) + return devices.deviceFileWalkFunction(path, d.Name()) } - return filepath.Walk(devices.metadataDir(), scan) + return filepath.WalkDir(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index e1320ee07f6..143cccf92ec 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -2,15 +2,43 @@ package graphdriver import ( "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", } ) +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index a534630df08..b7e681ace45 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -138,6 +138,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if parent != "" { options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } parentFs, err = driver.Get(parent, options) if err != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 44b3515a854..48fb7a550fa 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -11,6 +12,7 @@ import ( "syscall" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" @@ -218,3 +220,55 @@ func doesVolatile(d string) (bool, error) { }() return true, nil } + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, errors.Wrapf(err, "create mapped mount") + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go deleted file mode 100644 index 9ad1b863d8d..00000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check_115.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !go1.16 - -package overlay - -import ( - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/system" -) - -func scanForMountProgramIndicators(home string) (detected bool, err error) { - err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error { - if detected { - return filepath.SkipDir - } - if err != nil { - return err - } - basename := filepath.Base(path) - if strings.HasPrefix(basename, archive.WhiteoutPrefix) { - detected = true - return filepath.SkipDir - } - if info.IsDir() { - xattrs, err := system.Llistxattr(path) - if err != nil { - return err - } - for _, xattr := range xattrs { - if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { - detected = true - return filepath.SkipDir - } - } - } - return nil - }) - return detected, err -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 00000000000..2af33a6fce0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,160 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +const ( + // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr + _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint + + // _OPEN_TREE_CLONE - Clone the source path mount + _OPEN_TREE_CLONE = 0x00000001 //nolint:golint + + // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd + _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint +) + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := _MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path) + } + + var attr attr + attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 1efe7316d3e..09d24ae8b0d 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -25,7 +26,6 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -38,7 +38,6 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -119,7 +118,8 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool - locker *locker.Locker + + supportsIDMappedMounts *bool } type additionalLayerStore struct { @@ -155,6 +155,15 @@ func hasMetacopyOption(opts []string) bool { return false } +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + func hasVolatileOption(opts []string) bool { for _, s := range opts { if s == "volatile" { @@ -195,6 +204,26 @@ func checkSupportVolatile(home, runhome string) (bool, error) { return usingVolatile, nil } +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + return true, nil + } + logrus.Debugf("Cached value indicated that overlay is not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, errors.Wrap(err2, "recording overlay idmapped mounts support status") + } + return supportsIDMappedMounts, err +} + func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { var supportsDType bool @@ -282,6 +311,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) backingFs = fsName } + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { m := os.FileMode(0700) @@ -306,20 +360,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { - return nil, err - } - runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { - return nil, err - } - var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -380,7 +420,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, - locker: locker.New(), options: *opts, } @@ -559,14 +598,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) ( return err } -func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { - if os.Geteuid() != 0 || graphroot == "" || rundir == "" { +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { return false, nil } - home := filepath.Join(graphroot, "overlay") - runhome := filepath.Join(rundir, "overlay") - var contents string flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) if err == nil { @@ -881,11 +917,18 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err != nil { return err } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { return err } if parent != "" { @@ -896,14 +939,26 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable rootUID = int(st.UID()) rootGID = int(st.GID()) } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { - os.RemoveAll(dir) + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } } }() @@ -1039,17 +1094,22 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l + return l, true } } } - return newpath + return newpath, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1122,9 +1182,6 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -1145,6 +1202,9 @@ func (d *Driver) Remove(id string) error { // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { @@ -1162,6 +1222,7 @@ func (d *Driver) recreateSymlinks() error { // Keep looping as long as we take some corrective action in each iteration var errs *multierror.Error madeProgress := true + iterations := 0 for madeProgress { errs = nil madeProgress = false @@ -1175,7 +1236,7 @@ func (d *Driver) recreateSymlinks() error { // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir)) + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1212,7 +1273,12 @@ func (d *Driver) recreateSymlinks() error { if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - os.Remove(filepath.Join(linksDir, link.Name())) + if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } madeProgress = true continue } @@ -1222,6 +1288,8 @@ func (d *Driver) recreateSymlinks() error { linkFile := filepath.Join(d.dir(targetID), "link") data, err := ioutil.ReadFile(linkFile) if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) continue @@ -1229,6 +1297,11 @@ func (d *Driver) recreateSymlinks() error { madeProgress = true } } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } } if errs != nil { return errs.ErrorOrNil() @@ -1242,18 +1315,20 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) + dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } - readWrite := true + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { disableShifting = true } + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } optsList := options.Options if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") @@ -1262,16 +1337,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // options otherwise the kernel refuses to follow the metacopy xattr. if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") optsList = append(optsList, "metacopy=on") - } else { - logLevel := logrus.WarnLevel - if unshare.IsRootless() { - logLevel = logrus.DebugLevel - } - logrus.StandardLogger().Logf(logLevel, "Ignoring metacopy option from storage.conf, not supported with booted kernel") } } } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + for _, o := range optsList { if o == "ro" { readWrite = false @@ -1322,7 +1399,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1416,29 +1493,74 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workdir := path.Join(dir, "work") - var opts string - if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) - } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) - } - if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) - } - if d.options.mountProgram == "" && unshare.IsRootless() { - opts = fmt.Sprintf("%s,userxattr", opts) + optsList = append(optsList, "userxattr") } - // If "volatile" is not supported by the file system, just ignore the request - if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + if options.Volatile && !hasVolatileOption(optsList) { supported, err := d.getSupportsVolatile() if err != nil { return "", err } + // If "volatile" is not supported by the file system, just ignore the request if supported { - opts = fmt.Sprintf("%s,volatile", opts) + optsList = append(optsList, "volatile") + } + } + + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData := label.FormatMountLabel(opts, options.MountLabel) @@ -1447,10 +1569,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO pageSize := unix.Getpagesize() - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { @@ -1476,18 +1594,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } return nil } - } else if len(mountData) > pageSize { + } else if len(mountData) >= pageSize { + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + workdir = path.Join(id, "work") //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + if len(mountData) >= pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) @@ -1513,8 +1639,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err @@ -1529,6 +1653,18 @@ func (d *Driver) Put(id string) error { unmounted := false + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := ioutil.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + if d.options.mountProgram != "" { // Attempt to unmount the FUSE mount using either fusermount or fusermount3. // If they fail, fallback to unix.Unmount @@ -1606,11 +1742,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { return whiteoutFormat } -type fileGetNilCloser struct { - storage.FileGetter +type overlayFileGetter struct { + diffDirs []string } -func (f fileGetNilCloser) Close() error { +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) +} + +func (g *overlayFileGetter) Close() error { return nil } @@ -1619,13 +1768,18 @@ func (d *Driver) getStagingDir() string { } // DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { p, err := d.getDiffPath(id) if err != nil { return nil, err } - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -1900,12 +2054,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return nil } +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } - return d.options.mountProgram != "" + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() } // dumbJoin is more or less a dumber version of filepath.Join, but one which @@ -2074,3 +2247,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { } return diffPath, nil } + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index c748468e5cb..4623e7f4648 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e034bf152c9..f29dc8f8556 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -344,7 +344,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { return errors.Wrap(err, "error creating zfs mount") } defer func() { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } }() @@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error { logger.Debugf(`unmount("%s")`, mountpoint) - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index bf690515984..61a2ed871a4 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -2,7 +2,6 @@ package zfs import ( "fmt" - "strings" "github.com/containers/storage/drivers" "github.com/pkg/errors" @@ -26,14 +25,10 @@ func checkRootdirFs(rootdir string) error { } func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } + return id +} - return id[:maxlen] +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index edcb1da36b7..44c68f394ec 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -4,6 +4,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootDir string) error { @@ -27,3 +28,7 @@ func checkRootdirFs(rootDir string) error { func getMountpoint(id string) string { return id } + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 5fc810b89d4..de6e377541c 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "github.com/containers/storage/types" ) @@ -55,4 +57,9 @@ var ( ErrStoreIsReadOnly = types.ErrStoreIsReadOnly // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") ) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index f870b9ceed3..0a06a43235f 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,6 +1,9 @@ package storage import ( + "fmt" + "strings" + "github.com/containers/storage/pkg/idtools" "github.com/google/go-intervals/intervalset" "github.com/pkg/errors" @@ -218,3 +221,45 @@ func maxInt(a, b int) int { } return a } + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + } + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index bca25a65b8c..a4c3ed22c75 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -136,8 +136,19 @@ type ImageStore interface { // SetNames replaces the list of names associated with an image with the // supplied values. The values are expected to be valid normalized // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + // Delete removes the record of the image. Delete(id string) error @@ -425,37 +436,36 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if created.IsZero() { created = time.Now().UTC() } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Digests: nil, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - err := image.recomputeDigests() - if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - for _, name := range names { - r.byname[name] = image - } - for _, digest := range image.Digests { - list := r.bydigest[digest] - r.bydigest[digest] = append(list, image) - } - err = r.Save() - image = copyImage(image) + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) } + err = r.Save() + image = copyImage(image) return image, err } @@ -506,26 +516,44 @@ func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - image.addNameToHistory(name) + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) } - image.Names = names - return r.Save() + r.byname[name] = image + image.addNameToHistory(name) } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + image.Names = names + return r.Save() } func (r *imageStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index fbf6ad3621f..bba8d7588f7 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -23,6 +23,7 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" @@ -220,8 +221,17 @@ type LayerStore interface { // SetNames replaces the list of names associated with a layer with the // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -398,14 +408,13 @@ func (r *layerStore) Load() error { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err = r.deleteInternal(layer.ID) + if err != nil { + break } + shouldSave = true } } } @@ -674,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) } if layer.UncompressedDigest != "" { - r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } if err := r.Save(); err != nil { r.driver.Remove(id) @@ -683,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s return copyLayer(layer), nil } -func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } - size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err } @@ -716,12 +724,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parent = parentLayer.ID } var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) if moreOptions.TemplateLayer != "" { + var tserr error templateLayer, ok := r.lookup(moreOptions.TemplateLayer) if !ok { return nil, -1, ErrLayerUnknown } + templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } } else { templateIDMappings = &idtools.IDMappings{} } @@ -733,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab if mountLabel != "" { label.ReserveLabel(mountLabel) } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.Save() + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, @@ -740,89 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab IDMappings: idMappings, } if moreOptions.TemplateLayer != "" { - if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) - } - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) } oldMappings = templateIDMappings } else { if writeable { - if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating read-write layer") + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) } } else { - if err = r.driver.Create(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating layer") + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) } } oldMappings = parentMappings } if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { - if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err } } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), - BigDataNames: []string{}, - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) - if err != nil { - if r.Delete(layer.ID) != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err } - err = r.Save() + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + cleanupFailureContext = "applying layer diff" return nil, -1, err } - layer = copyLayer(layer) + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } } + delete(layer.Flags, incompleteFlag) + err = r.Save() + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true return layer, size, err } @@ -854,7 +914,6 @@ func (r *layerStore) Mounted(id string) (int, error) { } func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { - // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { for _, item := range opts { @@ -1031,25 +1090,43 @@ func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) } - layer.Names = names - return r.Save() + r.byname[name] = layer } - return ErrLayerUnknown + layer.Names = names + return r.Save() } func (r *layerStore) datadir(id string) string { @@ -1148,6 +1225,17 @@ func (r *layerStore) tspath(id string) string { return filepath.Join(r.layerdir, id+tarSplitSuffix) } +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + func (r *layerStore) deleteInternal(id string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) @@ -1156,6 +1244,18 @@ func (r *layerStore) deleteInternal(id string) error { if !ok { return ErrLayerUnknown } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.Save(); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID err := r.driver.Remove(id) if err != nil { @@ -1463,34 +1563,48 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } return maybeCompressReadCloser(diff) } - defer tsfile.Close() decompressor, err := pgzip.NewReader(tsfile) if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } return nil, err } - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + metadata = storage.NewJSONUnpacker(decompressor) fgetter, err := r.newFileGetter(to) if err != nil { - return nil, err + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() } - return err2 + return nil }) return maybeCompressReadCloser(rc) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 76544ff289b..d4f129ee634 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -7,6 +7,7 @@ import ( "compress/bzip2" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -77,6 +78,10 @@ const ( containersOverrideXattr = "user.containers.override_stat" ) +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + // Archiver allows the reuse of most utility functions of this package with a // pluggable Untar function. To facilitate the passing of specific id mappings // for untar, an archiver can be created with maps which will then be passed to @@ -507,6 +512,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } hdr, err := FileInfoHeader(name, fi, link) if err != nil { @@ -743,6 +752,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errs []string for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support @@ -852,14 +864,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil @@ -892,7 +904,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. - if !f.IsDir() { + if !d.IsDir() { return nil } @@ -962,7 +974,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) + doChown := !options.NoLchown if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) @@ -1067,7 +1082,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 2f548b661ce..51fbd9a2197 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index bbbd8c9de87..8769f2291b6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,9 +1,11 @@ +//go:build !linux // +build !linux package archive import ( "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -41,7 +43,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 14ffad5c0d4..ca8832fe421 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -4,6 +4,7 @@ import ( "archive/tar" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -134,7 +135,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err != nil { return 0, err } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..b8b278a1329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,627 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache areadly exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if errors.Cause(err) != os.ErrNotExist { + return err + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 092cf584af3..aeb7cfd4f01 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -5,6 +5,7 @@ package compressor // larger software like the graph drivers. import ( + "bufio" "encoding/base64" "io" "io/ioutil" @@ -15,6 +16,189 @@ import ( "github.com/vbatts/tar-split/archive/tar" ) +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) @@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if _, err := zstdWriter.Write(rawBytes); err != nil { return err } - payloadDigester := digest.Canonical.Digester() - payloadChecksum := payloadDigester.Hash() - payloadDest := io.MultiWriter(payloadChecksum, zstdWriter) + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() // Now handle the payload, if any - var startOffset, endOffset int64 + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) for { - read, errRead := tr.Read(buf) + mustSplit, read, errRead := rcReader.Read(buf) if errRead != nil && errRead != io.EOF { return err } - - // restart the compression only if there is - // a payload. + // restart the compression only if there is a payload. if read > 0 { if startOffset == 0 { startOffset, err = restartCompression() if err != nil { return err } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err } - _, err := payloadDest.Write(buf[:read]) + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() if err != nil { return err } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) } if errRead == io.EOF { if startOffset > 0 { - endOffset, err = restartCompression() - if err != nil { - return err - } checksum = payloadDigester.Digest().String() } break @@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r for k, v := range hdr.Xattrs { xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) } - m := internal.FileMetadata{ - Type: typ, - Name: hdr.Name, - Linkname: hdr.Linkname, - Mode: hdr.Mode, - Size: hdr.Size, - UID: hdr.Uid, - GID: hdr.Gid, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - Devmajor: hdr.Devmajor, - Devminor: hdr.Devminor, - Xattrs: xattrs, - Digest: checksum, - Offset: startOffset, - EndOffset: endOffset, - - // ChunkSize is 0 for the last chunk - ChunkSize: 0, - ChunkOffset: 0, - ChunkDigest: checksum, - } - metadata = append(metadata, m) + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) } rawBytes := tr.RawBytes() @@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { if level == nil { - l := 3 + l := 10 level = &l } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000000..f4dfad822e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index c91c43d85cd..3bb5286d92d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,11 +8,11 @@ import ( "archive/tar" "bytes" "encoding/binary" - "encoding/json" "fmt" "io" "time" + jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" ) @@ -20,6 +20,9 @@ import ( type TOC struct { Version int `json:"version"` Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` } type FileMetadata struct { @@ -27,25 +30,33 @@ type FileMetadata struct { Name string `json:"name"` Linkname string `json:"linkName,omitempty"` Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size"` - UID int `json:"uid"` - GID int `json:"gid"` - ModTime time.Time `json:"modtime"` - AccessTime time.Time `json:"accesstime"` - ChangeTime time.Time `json:"changetime"` - Devmajor int64 `json:"devMajor"` - Devminor int64 `json:"devMinor"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"` Digest string `json:"digest,omitempty"` Offset int64 `json:"offset,omitempty"` EndOffset int64 `json:"endOffset,omitempty"` - // Currently chunking is not supported. ChunkSize int64 `json:"chunkSize,omitempty"` ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` } +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + const ( TypeReg = "reg" TypeChunk = "chunk" @@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Entries: metadata, } + var json = jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 6efc6a4c845..7de20feaaa0 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -4,8 +4,8 @@ import ( archivetar "archive/tar" "context" "encoding/base64" - "encoding/json" "fmt" + "hash" "io" "io/ioutil" "os" @@ -13,6 +13,8 @@ import ( "reflect" "sort" "strings" + "sync" + "sync/atomic" "syscall" "time" @@ -25,6 +27,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -41,24 +44,35 @@ const ( bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota - fileTypeEstargz = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 ) type compressedFileType int type chunkedDiffer struct { - stream ImageSourceSeekable - manifest []byte - layersMetadata map[string][]internal.FileMetadata - layersTarget map[string]string - tocOffset int64 - fileType compressedFileType + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, } -func timeToTimespec(time time.Time) (ts unix.Timespec) { - if time.IsZero() { +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) @@ -67,11 +81,29 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) { return unix.NsecToTimespec(time.UnixNano()) } +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true @@ -83,20 +115,7 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us if err == nil { defer destDir.Close() - doLink := func() error { - // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses - // /proc/self/fd doesn't and can be used with rootless. - srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) - return unix.Linkat(unix.AT_FDCWD, srcPath, int(destDir.Fd()), destBase, unix.AT_SYMLINK_FOLLOW) - } - - err := doLink() - - // if the destination exists, unlink it first and try again - if err != nil && os.IsExist(err) { - unix.Unlinkat(int(destDir.Fd()), destBase, 0) - err = doLink() - } + err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } @@ -106,63 +125,15 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) if err != nil { dstFile.Close() - return nil, -1, err - } - return dstFile, st.Size(), err -} - -func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata { - maps := make(map[string]map[string][]*internal.FileMetadata) - - for layerID, v := range layersMetadata { - r := make(map[string][]*internal.FileMetadata) - for i := range v { - if v[i].Digest != "" { - r[v[i].Digest] = append(r[v[i].Digest], &v[i]) - } - } - maps[layerID] = r - } - return maps -} - -func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) { - allLayers, err := store.Layers() - if err != nil { - return nil, nil, err - } - - layersMetadata := make(map[string][]internal.FileMetadata) - layersTarget := make(map[string]string) - for _, r := range allLayers { - manifestReader, err := store.LayerBigData(r.ID, bigDataKey) - if err != nil { - continue - } - defer manifestReader.Close() - manifest, err := ioutil.ReadAll(manifestReader) - if err != nil { - return nil, nil, err - } - var toc internal.TOC - if err := json.Unmarshal(manifest, &toc); err != nil { - continue - } - layersMetadata[r.ID] = toc.Entries - target, err := store.DifferTarget(r.ID) - if err != nil { - return nil, nil, err - } - layersTarget[r.ID] = target + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } - - return layersMetadata, layersTarget, nil + return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -179,67 +150,71 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeEstargz, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, }, nil } +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. -// otherFile contains the metadata for the file. +// name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file: %w", err) } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } - return true, dstFile, written, err + return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. @@ -275,10 +250,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return false } - xattrsToIgnore := map[string]interface{}{ - "security.selinux": true, - } - xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) @@ -301,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return canDedupMetadataWithHardLink(file, &otherFile) } -// findFileInOtherLayers finds the specified file in other layers. -// file is the file to look for. -// dirfd is an open file descriptor to the checkout root directory. -// layersMetadata contains the metadata for each layer in the storage. -// layersTarget maps each layer to its checkout on disk. -// useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) { - // this is ugly, needs to be indexed - for layerID, checksums := range layersMetadata { - source, ok := layersTarget[layerID] - if !ok { - continue - } - files, found := checksums[file.Digest] - if !found { - continue - } - for _, candidate := range files { - // check if it is a valid candidate to dedup file - if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) { - continue - } - - found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks) - if found && err == nil { - return found, dstFile, written, err - } - } - } - // If hard links deduplication was used and it has failed, try again without hard links. - if useHardLinks { - return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false) - } - return false, nil, 0, nil -} - -func getFileDigest(f *os.File) (digest.Digest, error) { +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() - if _, err := io.Copy(digester.Hash(), f); err != nil { + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil @@ -401,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. @@ -430,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return false, nil, 0, err } - checksum, err := getFileDigest(f) + checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } @@ -452,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool dstFile.Close() return false, nil, 0, err } - checksum, err = getFileDigest(f) + checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err @@ -464,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return true, dstFile, written, nil } +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil @@ -490,22 +438,50 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } -type missingFile struct { - File *internal.FileMetadata +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 } -func (m missingFile) Length() int64 { - return m.File.EndOffset - m.File.Offset +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk } -type missingChunk struct { - RawChunk ImageSourceChunk - Files []missingFile +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } @@ -515,209 +491,600 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetada if err != nil { return err } + + // If it is a symlink, force to use the path if t == tar.TypeSymlink { - return nil + usePath = true } - if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil { + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { if !options.IgnoreChownErrors { - return err + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } data, err := base64.StdEncoding.DecodeString(v) if err != nil { - return err + return fmt.Errorf("decode xattr %q: %w", v, err) } - if err := unix.Fsetxattr(fd, k, data, 0); err != nil { - return err + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } - ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} - if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) { - return err + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } - if err := unix.Fchmod(fd, uint32(mode)); err != nil { - return err + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } -// openFileUnderRoot safely opens a file under the specified root directory using openat2 -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// flags are the flags top pass to the open syscall. -// mode specifies the mode to use for newly created files. -func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } + return unix.Openat2(dirfd, name, &how) +} - fd, err := unix.Openat2(dirfd, name, &how) - if err != nil { - return nil, err +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } } - return os.NewFile(uintptr(fd), name), nil + return fd, err } -func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) { - file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) - if err != nil { - return err - } - defer func() { - err2 := file.Close() - if err == nil { - err = err2 +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } } - }() + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} - digester := digest.Canonical.Digester() - checksum := digester.Hash() - to := io.MultiWriter(file, checksum) +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() - switch c.fileType { - case fileTypeZstdChunked: - z, err := zstd.NewReader(reader) - if err != nil { - return err - } - defer z.Close() + baseName := filepath.Base(name) - if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil { - return err + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { - return err + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } } - case fileTypeEstargz: + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { - r, err := pgzip.NewReader(reader) + r, err := pgzip.NewReader(c.rawReader) if err != nil { - return err + return partCompression, err } c.gzipReader = r } else { - if err := c.gzipReader.Reset(reader); err != nil { - return err + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err } } - defer c.gzipReader.Close() + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} - if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil { +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { return err } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} - manifestChecksum, err := digest.Parse(metadata.Digest) +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } - if digester.Digest() != manifestChecksum { - return fmt.Errorf("checksum mismatch for %q", dest) + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) } - return setFileAttrs(file, mode, metadata, options) + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) } -func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { - for mc := 0; ; mc++ { - var part io.ReadCloser - select { - case p := <-streams: - part = p - case err := <-errs: - return err - } - if part == nil { - if mc == len(missingChunks) { - break +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e } - return errors.Errorf("invalid stream returned") } - if mc == len(missingChunks) { - part.Close() - return errors.Errorf("too many chunks returned") + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") } - for _, mf := range missingChunks[mc].Files { + for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) - _, err := io.Copy(ioutil.Discard, limitReader) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { - part.Close() - return err + Err = err + goto exit } continue } - limitReader := io.LimitReader(part, mf.Length()) + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } - if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil { - part.Close() - return err + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break } } - part.Close() } + + if destFile != nil { + return destFile.Close() + } + return nil } -func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk { - if len(missingChunks) <= target { - return missingChunks +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } } + missingParts = newMissingParts - getGap := func(missingChunks []missingChunk, i int) int { - prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length - return int(missingChunks[i].RawChunk.Offset - prev) + if len(missingParts) <= target { + return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. - var gaps []int - for i := 1; i < len(missingChunks); i++ { - gaps = append(gaps, getGap(missingChunks, i)) + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) } - sort.Ints(gaps) + sort.Ints(costs) - toShrink := len(missingChunks) - target - targetValue := gaps[toShrink-1] + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] - newMissingChunks := missingChunks[0:1] - for i := 1; i < len(missingChunks); i++ { - gap := getGap(missingChunks, i) - if gap > targetValue { - newMissingChunks = append(newMissingChunks, missingChunks[i]) + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) } else { - prev := &newMissingChunks[len(newMissingChunks)-1] - prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil if gap > 0 { - gapFile := missingFile{ + gapFile := missingFileChunk{ Gap: int64(gap), } - prev.Files = append(prev.Files, gapFile) + prev.Chunks = append(prev.Chunks, gapFile) } - prev.Files = append(prev.Files, missingChunks[i].Files...) + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } - return newMissingChunks + return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk - for _, c := range missingChunks { - chunksToRequest = append(chunksToRequest, c.RawChunk) + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } } // There are some missing files. Prepare a multirange request for the missing chunks. @@ -731,32 +1098,32 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingChunks) + requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request - missingChunks = mergeMissingChunks(missingChunks, requested/2) + missingParts = mergeMissingChunks(missingParts, requested/2) continue } return err } - if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil { + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } -func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - parent := filepath.Dir(metadata.Name) - base := filepath.Base(metadata.Name) +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) parentFd := dirfd if parent != "." { - parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0) + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } @@ -766,21 +1133,21 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opt if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { - return err + return fmt.Errorf("mkdir %q: %w", name, err) } } - file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0) + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() - return setFileAttrs(file, mode, metadata, options) + return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0) + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } @@ -789,7 +1156,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -797,25 +1164,35 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDirFd = int(f.Fd()) } - err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH) + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { - return err + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0) + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } return err } defer newFile.Close() - return setFileAttrs(newFile, mode, metadata, options) + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -823,7 +1200,10 @@ func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, o destDirFd = int(f.Fd()) } - return unix.Symlinkat(metadata.Linkname, destDirFd, destBase) + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil } type whiteoutHandler struct { @@ -832,13 +1212,16 @@ type whiteoutHandler struct { } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { - file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0) + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() - return unix.Fsetxattr(int(file.Fd()), name, value, 0) + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { @@ -847,7 +1230,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd := d.Dirfd if dir != "" { - dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0) + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } @@ -856,12 +1239,16 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd = int(dir.Fd()) } - return unix.Mknodat(dirfd, base, mode, dev) + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } @@ -899,7 +1286,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo return def } +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + bigData := map[string][]byte{ bigDataKey: c.manifest, } @@ -927,14 +1376,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest - var toc internal.TOC - if err := json.Unmarshal(c.manifest, &toc); err != nil { + toc, err := unmarshalToc(c.manifest) + if err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) - var missingChunks []missingChunk + var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { @@ -956,17 +1405,61 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { - return output, err + return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) - otherLayersCache := prepareOtherLayersCache(c.layersMetadata) - // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate - missingChunksSize, totalChunksSize := int64(0), int64(0) + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) @@ -1016,7 +1509,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra return err } defer file.Close() - if err := setFileAttrs(file, mode, &r, options); err != nil { + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil @@ -1028,7 +1521,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } case tar.TypeDir: - if err := safeMkdir(dirfd, mode, &r, options); err != nil { + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue @@ -1062,74 +1555,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra totalChunksSize += r.Size - finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dstFile, mode, &r, options); err != nil { - return err - } + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, } - return nil + copyFileJobs <- job + filesToWaitFor++ } + } - found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue - } + close(copyFileJobs) + copyFileJobs = nil - found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) - if err != nil { - return output, err + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + if res.err != nil { + return output, res.err } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } + // the file was already copied to its destination + // so nothing left to do. + if res.found { continue } - if enableHostDedup { - found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue + r := &mergedEntries[res.index] + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize } - } + remainingSize = remainingSize - size - missingChunksSize += r.Size - if t == tar.TypeReg { rawChunk := ImageSourceChunk{ - Offset: uint64(r.Offset), - Length: uint64(r.EndOffset - r.Offset), + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), } - - file := missingFile{ - File: &mergedEntries[i], + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, } - - missingChunks = append(missingChunks, missingChunk{ - RawChunk: rawChunk, - Files: []missingFile{ + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ file, }, - }) + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) } } // There are some missing files. Prepare a multirange request for the missing chunks. - if len(missingChunks) > 0 { - missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks) - if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil { + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } @@ -1141,31 +1655,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } if totalChunksSize > 0 { - logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize)) + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var mergedEntries []internal.FileMetadata - var prevEntry *internal.FileMetadata - for _, entry := range entries { - e := entry + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } - // ignore the metadata files for the estargz format. - if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) { + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { continue } + if entry.Type != TypeChunk { + size++ + } + } + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } if e.Type == TypeChunk { - if prevEntry == nil || prevEntry.Type != TypeReg { - return nil, errors.New("chunk type without a regular file") + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset } - prevEntry.EndOffset = e.EndOffset - continue + i += nChunks } - mergedEntries = append(mergedEntries, e) - prevEntry = &e + mergedEntries[m] = e + m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset @@ -1176,6 +1728,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } } return mergedEntries, nil } + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index e6622cf1466..f6e0cfcfe86 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct { // grown. This is specified in terms of % of pool size. So a value of // 20 means that when threshold is hit, pool will be grown by 20% of // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent"` + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` // AutoExtendThreshold determines the pool extension threshold in terms // of percentage of pool size. For example, if threshold is 60, that // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold"` + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` // BaseSize specifies the size to use when creating the base device, // which limits the size of images and containers. - BaseSize string `toml:"basesize"` + BaseSize string `toml:"basesize,omitempty"` // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize"` + BlockSize string `toml:"blocksize,omitempty"` // DirectLvmDevice specifies a custom block storage device to use for // the thin pool. - DirectLvmDevice string `toml:"directlvm_device"` + DirectLvmDevice string `toml:"directlvm_device,omitempty"` // DirectLvmDeviceForcewipes device even if device already has a // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force"` + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs"` + Fs string `toml:"fs,omitempty"` // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level"` + LogLevel string `toml:"log_level,omitempty"` // MetadataSize specifies the size of the metadata for the thinpool // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize"` + MetadataSize string `toml:"metadatasize,omitempty"` // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to - MinFreeSpace string `toml:"min_free_space"` + MinFreeSpace string `toml:"min_free_space,omitempty"` // MkfsArg specifies extra mkfs arguments to be used when creating the // basedevice. - MkfsArg string `toml:"mkfsarg"` + MkfsArg string `toml:"mkfsarg,omitempty"` // MountOpt specifies extra mount options used when mounting the thin // devices. - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion"` + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal"` + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of // retries XFS should attempt to complete IO when ENOSPC (no space) // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` } type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` } type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device - MinSpace string `toml:"min_space"` + MinSpace string `toml:"min_space,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } type OverlayOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // Inodes is used to set a maximum inodes of the container image. - Inodes string `toml:"inodes"` + Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories - ForceMask string `toml:"force_mask"` + ForceMask string `toml:"force_mask,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` } type ZfsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Name is the File System name of the ZFS File system - Name string `toml:"fsname"` + Name string `toml:"fsname,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } // OptionsConfig represents the "storage.options" TOML config table. @@ -122,82 +122,82 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores"` + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids"` + RemapUIDs string `toml:"remap-uids,omitempty"` // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids"` + RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. - ForceMask os.FileMode `toml:"force_mask"` + ForceMask os.FileMode `toml:"force_mask,omitempty"` // RemapUser is the name of one or more entries in /etc/subuid which // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user"` + RemapUser string `toml:"remap-user,omitempty"` // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group"` + RemapGroup string `toml:"remap-group,omitempty"` // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. - RootAutoUsernsUser string `toml:"root-auto-userns-user"` + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` // AutoUsernsMinSize is the minimum size for a user namespace that is // created automatically. - AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` // AutoUsernsMaxSize is the maximum size for a user namespace that is // created automatically. - AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` // Btrfs container options to be handed to btrfs drivers - Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers - Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` // Vfs container options to be handed to VFS drivers - Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` // Zfs container options to be handed to ZFS drivers - Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // PullOptions specifies options to be handed to pull managers // This API is experimental and can be changed without bumping the major version number. - PullOptions map[string]string `toml:"pull_options"` + PullOptions map[string]string `toml:"pull_options,omitempty"` // DisableVolatile doesn't allow volatile mounts when it is set. - DisableVolatile bool `toml:"disable-volatile"` + DisableVolatile bool `toml:"disable-volatile,omitempty"` } // GetGraphDriverOptions returns the driver specific options diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 8d58d24cac8..36e1bdd5fc8 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,8 +1,10 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package directory import ( + "io/fs" "os" "path/filepath" "syscall" @@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) { func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { return err } - if fileInfo == nil { - return nil + fileInfo, err := entry.Info() + if err != nil { + return err } // Check inode to only count the sizes of files with multiple hard links once. @@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) { // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} - // Ignore directory sizes - if fileInfo.IsDir() { + if entry.IsDir() { return nil } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index a7a81240bc2..482bc51a26e 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,8 +1,10 @@ +//go:build windows // +build windows package directory import ( + "io/fs" "os" "path/filepath" ) @@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) { // Usage walks a directory tree and returns its total size in bytes and the number of inodes. func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if os.IsNotExist(err) && path != dir { return nil } return err @@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) { usage.InodeCount++ // Ignore directory sizes - if fileInfo == nil { + if d.IsDir() { return nil } - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil + fileInfo, err := d.Info() + if err != nil { + return err } - - usage.Size += s + usage.Size += fileInfo.Size() return nil }) diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000000..85c5e76c844 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 06b53854b93..027db259c19 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -18,18 +18,3 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetCacheHome is unsupported on non-linux system. -func GetCacheHome() (string, error) { - return "", errors.New("homedir.GetCacheHome() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 2475e351bb5..33177bdf306 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -93,48 +93,3 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index 4f2615ed32f..af65f2c03de 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -17,7 +17,12 @@ func Key() string { // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { - return os.Getenv(Key()) + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home } // GetShortcutString returns the string that is shortcut to user's home directory diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 83bc8c34ff4..7a8fec0ce5f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -3,15 +3,18 @@ package idtools import ( "bufio" "fmt" + "io/ioutil" "os" "os/user" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -82,7 +85,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(uidMap) == 1 && uidMap[0].Size == 1 { uid = uidMap[0].HostID } else { - uid, err = toHost(0, uidMap) + uid, err = RawToHost(0, uidMap) if err != nil { return -1, -1, err } @@ -90,7 +93,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(gidMap) == 1 && gidMap[0].Size == 1 { gid = gidMap[0].HostID } else { - gid, err = toHost(0, gidMap) + gid, err = RawToHost(0, gidMap) if err != nil { return -1, -1, err } @@ -98,10 +101,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -114,10 +121,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -182,31 +193,87 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = RawToHost(pair.UID, i.uids) if err != nil { - return target, err + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } } - return target, err + return target, nil } // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) + uid, err := RawToContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := RawToContainer(pair.GID, i.gids) return uid, gid, err } @@ -293,7 +360,7 @@ func parseSubidFile(path, username string) (ranges, error) { func checkChownErr(err error, name string, uid, gid int) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name) } return err } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index db50a62e4c0..6e6e3b22bc9 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -12,11 +12,21 @@ import ( #cgo LDFLAGS: -l subid #include #include +#include const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + struct subid_range get_range(struct subid_range *ranges, int i) { - return ranges[i]; + shadow_logfd = stderr; + return ranges[i]; } + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + */ import "C" @@ -32,9 +42,9 @@ func readSubid(username string, isUser bool) (ranges, error) { var nRanges C.int var cRanges *C.struct_subid_range if isUser { - nRanges = C.get_subuid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) } else { - nRanges = C.get_subgid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) } if nRanges < 0 { return nil, errors.New("cannot read subids") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 9776b2a1287..7f270c61f82 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -46,6 +46,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..3ba99cf9351 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c06..ee0f593a50a 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0ff..72ceec3ddad 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -28,14 +28,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000000..6f63ae99170 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,37 @@ +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 372bee7321f..d3dd86d349f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -17,6 +17,7 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 1ecaa906fed..a56ada2161e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec @@ -17,6 +18,7 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -24,6 +26,7 @@ func Command(args ...string) *exec.Cmd { // CommandContext returns *exec.Cmd which has Path as current binary. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9d937426854..5b3605f319c 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -9,10 +9,12 @@ import ( // Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } // CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 673ab476abd..d868564767f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -17,6 +17,7 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index c56671d9192..a1938cd4f34 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -7,7 +7,10 @@ import ( "path/filepath" ) -var registeredInitializers = make(map[string]func()) +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { @@ -22,6 +25,7 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true if exists { initializer() @@ -30,6 +34,21 @@ func Init() bool { return false } +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848bdb..6b47c4e717f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5fe..3fc27f0b139 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -10,6 +10,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 6d351ce80a9..baeb8f1aab5 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -9,6 +10,7 @@ import ( "io" "os" "os/exec" + "os/signal" "os/user" "runtime" "strconv" @@ -75,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capabilitiy +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -214,15 +238,26 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return errors.Wrapf(err, "error finding newgidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) @@ -261,17 +296,29 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return errors.Wrapf(err, "error finding newuidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) @@ -484,6 +531,30 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // Finish up. logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + ExecRunnable(cmd, nil) } @@ -501,11 +572,11 @@ func ExecRunnable(cmd Runnable, cleanup func()) { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(waitStatus.ExitStatus()) } if waitStatus.Signaled() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(int(waitStatus.Signal()) + 128) } } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 722750c0cca..c17dd6d37ea 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,5 +1,14 @@ # This file is is the configuration file for all tools -# that use the containers/storage library. +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] @@ -11,8 +20,14 @@ driver = "overlay" runroot = "/run/containers/storage" # Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" + # Storage path for rootless users # # rootless_storage_path = "$HOME/.local/share/containers/storage" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 00000000000..cc655c62e56 --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 169c7d1513e..30d3e8715ee 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -31,6 +31,14 @@ import ( "github.com/pkg/errors" ) +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + var ( stores []*store storesLock sync.Mutex @@ -368,8 +376,17 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + // ListImageBigData retrieves a list of the (possibly large) chunks of // named data associated with an image. ListImageBigData(id string) ([]string, error) @@ -575,10 +592,11 @@ type ContainerOptions struct { // container's layer will inherit settings from the image's top layer // or, if it is not being created based on an image, the Store object. types.IDMappingOptions - LabelOpts []string - Flags map[string]interface{} - MountOpts []string - Volatile bool + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string } type store struct { @@ -646,17 +664,21 @@ func GetStore(options types.StoreOptions) (Store, error) { storesLock.Lock() defer storesLock.Unlock() + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { return s, nil } } - if options.GraphRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") - } - if options.RunRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -1384,7 +1406,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags["MountLabel"] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) if err != nil { return nil, err } @@ -1608,7 +1630,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { } } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id) + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) } return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) } @@ -2045,7 +2067,20 @@ func dedupeNames(names []string) []string { return deduped } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) rlstore, err := s.LayerStore() @@ -2058,7 +2093,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } ristore, err := s.ImageStore() @@ -2071,7 +2115,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if ristore.Exists(id) { - return ristore.SetNames(id, deduped) + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } // Check is id refers to a RO Store @@ -2109,7 +2162,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } return ErrLayerUnknown } @@ -2370,22 +2432,16 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err != nil { return nil, err } - childrenByParent := make(map[string]*[]string) + childrenByParent := make(map[string][]string) for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) - } + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) } - otherImagesByTopLayer := make(map[string]string) + otherImagesTopLayers := make(map[string]struct{}) for _, img := range images { if img.ID != id { - otherImagesByTopLayer[img.TopLayer] = img.ID + otherImagesTopLayers[img.TopLayer] = struct{}{} for _, layerID := range img.MappedTopLayers { - otherImagesByTopLayer[layerID] = img.ID + otherImagesTopLayers[layerID] = struct{}{} } } } @@ -2395,43 +2451,44 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } layer := image.TopLayer - lastRemoved := "" + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } for layer != "" { if rcstore.Exists(layer) { break } - if _, ok := otherImagesByTopLayer[layer]; ok { + if _, used := otherImagesTopLayers[layer]; used { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - hasOtherRefs := func() bool { + hasChildrenNotBeingRemoved := func() bool { layersToCheck := []string{layer} if layer == image.TopLayer { layersToCheck = append(layersToCheck, image.MappedTopLayers...) } for _, layer := range layersToCheck { - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - return true + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue } + return true } } } return false } - if hasOtherRefs() { + if hasChildrenNotBeingRemoved() { break } - lastRemoved = layer - if layer == image.TopLayer { - layersToRemove = append(layersToRemove, image.MappedTopLayers...) - } - layersToRemove = append(layersToRemove, lastRemoved) + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} layer = parent } } else { @@ -2499,23 +2556,29 @@ func (s *store) DeleteContainer(id string) error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) wg.Add(1) go func() { - var err error - for attempts := 0; attempts < 50; attempts++ { - err = os.RemoveAll(gcpath) - if err == nil || !system.IsEBUSY(err) { - break - } - time.Sleep(time.Millisecond * 100) + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return } - errChan <- err - wg.Done() + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) }() rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) wg.Add(1) go func() { - errChan <- os.RemoveAll(rcpath) - wg.Done() + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) }() go func() { @@ -2524,17 +2587,12 @@ func (s *store) DeleteContainer(id string) error { }() var errors []error - for { - select { - case err, ok := <-errChan: - if !ok { - return multierror.Append(nil, errors...).ErrorOrNil() - } - if err != nil { - errors = append(errors, err) - } + for err := range errChan { + if err != nil { + errors = append(errors, err) } } + return multierror.Append(nil, errors...).ErrorOrNil() } } return ErrNotAContainer @@ -2830,10 +2888,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if err != nil { return nil, err } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s store.RLock() if err := store.ReloadIfChanged(); err != nil { + store.Unlock() return nil, err } if store.Exists(to) { diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index d920d12eb50..ad12ffdbf2d 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -55,4 +55,6 @@ var ( ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f9bf7e6b658..a71c6d2efd8 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -3,44 +3,73 @@ package types import ( "fmt" "os" - "os/exec" "path/filepath" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/storage/drivers/overlay" cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/idtools" "github.com/sirupsen/logrus" ) // TOML-friendly explicit tables used for conversions. -type tomlConfig struct { +type TomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - RootlessStoragePath string `toml:"rootless_storage_path"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false // DefaultStoreOptions is a reasonable default set of options. defaultStoreOptions StoreOptions ) +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" +) + func init() { - defaultStoreOptions.RunRoot = "/run/containers/storage" - defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -168,7 +197,6 @@ func isRootlessDriver(driver string) bool { // getRootlessStorageOpts returns the storage opts for containers running as non root func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - const overlayDriver = "overlay" dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) if err != nil { @@ -190,25 +218,16 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { opts.GraphDriverName = driver } - if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { - supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) - if err != nil { - return opts, err - } - if supported { - opts.GraphDriverName = overlayDriver - } else { - if path, err := exec.LookPath("fuse-overlayfs"); err == nil { - opts.GraphDriverName = overlayDriver - opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} - } - } - if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break } } } @@ -271,7 +290,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { - config := new(tomlConfig) + config := new(TomlConfig) meta, err := toml.DecodeFile(configFile, &config) if err == nil { @@ -286,7 +305,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } } - // Clear storeOptions of previos settings + // Clear storeOptions of previous settings *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver @@ -295,6 +314,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { config.Storage.Driver = os.Getenv("STORAGE_DRIVER") storeOptions.GraphDriverName = config.Storage.Driver } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } if storeOptions.GraphDriverName == "" { logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) } @@ -385,3 +408,39 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { func Options() StoreOptions { return defaultStoreOptions } + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 80d56041b06..37d4b79b01b 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error { } return nil } + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3c8..b6bca4cef11 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index bb7e4e33695..296c96a6334 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -8,11 +8,6 @@ import ( "strings" ) -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address @@ -158,30 +153,33 @@ type PortMapping struct { func splitParts(rawport string) (string, string, string) { parts := strings.Split(rawport, ":") n := len(parts) - containerport := parts[n-1] + containerPort := parts[n-1] switch n { case 1: - return "", "", containerport + return "", "", containerPort case 2: - return "", parts[0], containerport + return "", parts[0], containerPort case 3: - return parts[0], parts[1], containerport + return parts[0], parts[1], containerPort default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort } } // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) + ip, hostPort, containerPort := splitParts(rawPort) proto, containerPort = SplitProtoPort(containerPort) - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + if ip != "" && ip[0] == '[' { + // Strip [] from IPV6 addresses + rawIP, _, err := net.SplitHostPort(ip + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + } + ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { return nil, fmt.Errorf("Invalid ip address: %s", ip) diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go index ce950171e31..b6eed145e1c 100644 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -43,7 +43,7 @@ type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// sort the port so that the order is: +// Less sorts the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol @@ -58,7 +58,7 @@ func (s portMapSorter) Less(i, j int) bool { func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { - if binding, ok := bindings[p]; ok { + if binding, ok := bindings[p]; ok && len(binding) > 0 { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index 98e9a1dc61b..c897cb02ade 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -2,11 +2,8 @@ package sockets import ( "net" - "net/url" "os" "strings" - - "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of @@ -20,32 +17,12 @@ func GetProxyEnv(key string) string { return proxyValue } -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil +// DialerFromEnvironment was previously used to configure a net.Dialer to route +// connections through a SOCKS proxy. +// DEPRECATED: SOCKS proxies are now supported by configuring only +// http.Transport.Proxy, and no longer require changing http.Transport.Dial. +// Therefore, only sockets.ConfigureTransport() needs to be called, and any +// sockets.DialerFromEnvironment() calls can be dropped. +func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { + return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index a1d7beb4d80..2e9e9006f61 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,14 +3,9 @@ package sockets import ( "errors" - "net" "net/http" - "time" ) -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -26,13 +21,6 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 386cf0dbbde..10d76342652 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,6 +3,7 @@ package sockets import ( + "context" "fmt" "net" "net/http" @@ -10,7 +11,10 @@ import ( "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { @@ -18,8 +22,11 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { } // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 5c21644e1fe..7acafc5a2ad 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -1,6 +1,7 @@ package sockets import ( + "context" "net" "net/http" "time" @@ -15,8 +16,8 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6fdc..e7591e6edbf 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,25 +54,73 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options. +// By default, socket permissions are 0000 (i.e.: no access for anyone); pass +// WithChmod() and WithChown() to set the desired ownership and permissions. +// +// This function temporarily changes the system's "umask" to 0777 to work around +// a race condition between creating the socket and setting its permissions. While +// this should only be for a short duration, it may affect other processes that +// create files/directories during that period. +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0777) l, err := net.Listen("unix", path) + syscall.Umask(origUmask) if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + _ = l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb469..992968373eb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -53,18 +53,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +63,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,11 +99,11 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil @@ -141,7 +132,7 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } @@ -157,8 +148,8 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go new file mode 100644 index 00000000000..d8215f8e78a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go @@ -0,0 +1,16 @@ +// +build go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go new file mode 100644 index 00000000000..a5ba7f4a388 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go @@ -0,0 +1,15 @@ +// +build !go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} diff --git a/vendor/github.com/docker/go-events/.gitignore b/vendor/github.com/docker/go-events/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/docker/go-events/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000000..d813af779b0 --- /dev/null +++ b/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000000..6d630cf595e --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000000..e414d82e964 --- /dev/null +++ b/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000000..0acafc279a3 --- /dev/null +++ b/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000000..5120078dfb8 --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000000..802cf51ffec --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000000..56db7c25103 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000000..f0f1d9ea5ff --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000000..e6c0eb69dd5 --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000000..4bb770afc2e --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000000..b7f0a542252 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml index d5b0fe128a7..9921997daff 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -5,4 +5,4 @@ linters: disable-all: true enable: - gofumpt - - gci + - goimports diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE index 707a0ed49b2..20837167a9a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/LICENSE +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013-2021, go-dockerclient authors +Copyright (c) go-dockerclient authors All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index 431458441da..2f5d9fcc6a9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -7,12 +7,12 @@ test: pretest gotest .PHONY: golangci-lint golangci-lint: - cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest golangci-lint run .PHONY: staticcheck staticcheck: - cd /tmp && GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@master + go install honnef.co/go/tools/cmd/staticcheck@master staticcheck ./... .PHONY: lint diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index bc949dc3590..d867c47363f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net/http" "os" "os/exec" @@ -272,7 +271,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { return authStatus, err } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return authStatus, err } @@ -318,7 +317,7 @@ func NewAuthConfigurationsFromCredsHelpers(registry string) (*AuthConfiguration, func getHelperProviderFromDockerCfg(pathsToTry []string, registry string) (string, error) { for _, path := range pathsToTry { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { // if we can't read the file keep going continue diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index d0814a5c0bd..1bbf611a23f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/httputil" @@ -240,19 +239,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri var keyPEMBlock []byte var caPEMCert []byte if _, err := os.Stat(cert); !os.IsNotExist(err) { - certPEMBlock, err = ioutil.ReadFile(cert) + certPEMBlock, err = os.ReadFile(cert) if err != nil { return nil, err } } if _, err := os.Stat(key); !os.IsNotExist(err) { - keyPEMBlock, err = ioutil.ReadFile(key) + keyPEMBlock, err = os.ReadFile(key) if err != nil { return nil, err } } if _, err := os.Stat(ca); !os.IsNotExist(err) { - caPEMCert, err = ioutil.ReadFile(ca) + caPEMCert, err = os.ReadFile(ca) if err != nil { return nil, err } @@ -565,10 +564,10 @@ func (c *Client) streamURL(method, url string, streamOptions streamOptions) erro protocol := c.endpointURL.Scheme address := c.endpointURL.Path if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard + streamOptions.stdout = io.Discard } if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard + streamOptions.stderr = io.Discard } if protocol == unixProtocol || protocol == namedPipeProtocol { @@ -798,10 +797,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close // will "hang" until the container terminates, even though you're not reading // stdout/stderr if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard + hijackOptions.stdout = io.Discard } if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard + hijackOptions.stderr = io.Discard } go func() { @@ -1024,7 +1023,7 @@ func newError(resp *http.Response) *Error { Message string `json:"message"` } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} } diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go index cd2034304be..e9f13f3948d 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package docker diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index be45607b90f..7951e362752 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "errors" - "io/ioutil" + "io" "net/http" ) @@ -53,7 +53,7 @@ func (c *Client) InstallPlugins(opts InstallPluginOptions) error { defer resp.Body.Close() // PullPlugin streams back the progress of the pull, we must consume the whole body // otherwise the pull will be canceled on the engine. - if _, err := ioutil.ReadAll(resp.Body); err != nil { + if _, err := io.ReadAll(resp.Body); err != nil { return err } return nil @@ -297,7 +297,7 @@ func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { return nil, err } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -386,7 +386,7 @@ func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { return "", err } defer resp.Body.Close() - containerNameBytes, err := ioutil.ReadAll(resp.Body) + containerNameBytes, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go index 46b9faf00e2..f1791f6cea5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/system.go +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -9,7 +9,6 @@ import ( // VolumeUsageData represents usage data from the docker system api // More Info Here https://dockr.ly/2PNzQyO type VolumeUsageData struct { - // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. // diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index f27a7bbf21f..cbe72206550 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -7,7 +7,6 @@ package docker import ( "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -112,7 +111,7 @@ func validateContextDirectory(srcPath string, excludes []string) error { func parseDockerignore(root string) ([]string, error) { var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + ignore, err := os.ReadFile(path.Join(root, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return excludes, fmt.Errorf("error reading .dockerignore: %w", err) } diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go index 412ff549739..6dc9600c860 100644 --- a/vendor/github.com/jinzhu/copier/copier.go +++ b/vendor/github.com/jinzhu/copier/copier.go @@ -24,6 +24,13 @@ const ( // Denotes that the value as been copied hasCopied + + // Some default converter types for a nicer syntax + String string = "" + Bool bool = false + Int int = 0 + Float32 float32 = 0 + Float64 float64 = 0 ) // Option sets copy options @@ -32,6 +39,18 @@ type Option struct { // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go) IgnoreEmpty bool DeepCopy bool + Converters []TypeConverter +} + +type TypeConverter struct { + SrcType interface{} + DstType interface{} + Fn func(src interface{}) (interface{}, error) +} + +type converterPair struct { + SrcType reflect.Type + DstType reflect.Type } // Tag Flags @@ -59,12 +78,27 @@ func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) { var ( - isSlice bool - amount = 1 - from = indirect(reflect.ValueOf(fromValue)) - to = indirect(reflect.ValueOf(toValue)) + isSlice bool + amount = 1 + from = indirect(reflect.ValueOf(fromValue)) + to = indirect(reflect.ValueOf(toValue)) + converters map[converterPair]TypeConverter ) + // save convertes into map for faster lookup + for i := range opt.Converters { + if converters == nil { + converters = make(map[converterPair]TypeConverter) + } + + pair := converterPair{ + SrcType: reflect.TypeOf(opt.Converters[i].SrcType), + DstType: reflect.TypeOf(opt.Converters[i].DstType), + } + + converters[pair] = opt.Converters[i] + } + if !to.CanAddr() { return ErrInvalidCopyDestination } @@ -113,13 +147,16 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) for _, k := range from.MapKeys() { toKey := indirect(reflect.New(toType.Key())) - if !set(toKey, k, opt.DeepCopy) { + if !set(toKey, k, opt.DeepCopy, converters) { return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key()) } - elemType, _ := indirectType(toType.Elem()) + elemType := toType.Elem() + if elemType.Kind() != reflect.Slice { + elemType, _ = indirectType(elemType) + } toValue := indirect(reflect.New(elemType)) - if !set(toValue, from.MapIndex(k), opt.DeepCopy) { + if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) { if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil { return err } @@ -148,7 +185,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem())) } - if !set(to.Index(i), from.Index(i), opt.DeepCopy) { + if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt) if err != nil { @@ -203,6 +240,8 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) // check source if source.IsValid() { + copyUnexportedStructFields(dest, source) + // Copy from source field to dest field or method fromTypeFields := deepFields(fromType) for _, field := range fromTypeFields { @@ -249,7 +288,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) toField := dest.FieldByName(destFieldName) if toField.IsValid() { if toField.CanSet() { - if !set(toField, fromField, opt.DeepCopy) { + if !set(toField, fromField, opt.DeepCopy, converters) { if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil { return err } @@ -291,7 +330,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() { values := fromMethod.Call([]reflect.Value{}) if len(values) >= 1 { - set(toField, values[0], opt.DeepCopy) + set(toField, values[0], opt.DeepCopy, converters) } } } @@ -303,7 +342,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest.Addr())) } else { - if !set(to.Index(i), dest.Addr(), opt.DeepCopy) { + if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt) if err != nil { @@ -315,7 +354,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest)) } else { - if !set(to.Index(i), dest, opt.DeepCopy) { + if !set(to.Index(i), dest, opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt) if err != nil { @@ -334,6 +373,24 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } +func copyUnexportedStructFields(to, from reflect.Value) { + if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) { + return + } + + // create a shallow copy of 'to' to get all fields + tmp := indirect(reflect.New(to.Type())) + tmp.Set(from) + + // revert exported fields + for i := 0; i < to.NumField(); i++ { + if tmp.Field(i).CanSet() { + tmp.Field(i).Set(to.Field(i)) + } + } + to.Set(tmp) +} + func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool { if !ignoreEmpty { return false @@ -348,10 +405,15 @@ func deepFields(reflectType reflect.Type) []reflect.StructField { for i := 0; i < reflectType.NumField(); i++ { v := reflectType.Field(i) - if v.Anonymous { - fields = append(fields, deepFields(v.Type)...) - } else { + // PkgPath is the package path that qualifies a lower case (unexported) + // field name. It is empty for upper case (exported) field names. + // See https://golang.org/ref/spec#Uniqueness_of_identifiers + if v.PkgPath == "" { fields = append(fields, v) + if v.Anonymous { + // also consider fields of anonymous fields as fields of the root + fields = append(fields, deepFields(v.Type)...) + } } } @@ -376,8 +438,14 @@ func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) { return reflectType, isPtr } -func set(to, from reflect.Value, deepCopy bool) bool { +func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool { if from.IsValid() { + if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil { + return false + } else if ok { + return true + } + if to.Kind() == reflect.Ptr { // set `to` to nil if from is nil if from.Kind() == reflect.Ptr && from.IsNil() { @@ -411,6 +479,9 @@ func set(to, from reflect.Value, deepCopy bool) bool { toKind = reflect.TypeOf(to.Interface()).Kind() } } + if from.Kind() == reflect.Ptr && from.IsNil() { + return true + } if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice { return false } @@ -452,7 +523,7 @@ func set(to, from reflect.Value, deepCopy bool) bool { to.Set(rv) } } else if from.Kind() == reflect.Ptr { - return set(to, from.Elem(), deepCopy) + return set(to, from.Elem(), deepCopy, converters) } else { return false } @@ -461,6 +532,33 @@ func set(to, from reflect.Value, deepCopy bool) bool { return true } +// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field. +func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) { + pair := converterPair{ + SrcType: from.Type(), + DstType: to.Type(), + } + + if cnv, ok := converters[pair]; ok { + result, err := cnv.Fn(from.Interface()) + + if err != nil { + return false, err + } + + if result != nil { + to.Set(reflect.ValueOf(result)) + } else { + // in case we've got a nil value to copy + to.Set(reflect.Zero(to.Type())) + } + + return true, nil + } + + return false, nil +} + // parseTags Parses struct tags and returns uint8 bit flags. func parseTags(tag string) (flg uint8, name string, err error) { for _, t := range strings.Split(tag, ",") { diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf2..d31b3781527 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index e8ff994f8bc..5b7cf781a34 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,49 @@ This package provides various compression algorithms. # changelog +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + * Jan 11, 2022 (v1.14.1) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) @@ -53,6 +96,9 @@ This package provides various compression algorithms. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -74,9 +120,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -135,7 +182,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -297,11 +344,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -323,6 +365,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0b2e54972cd..d55ea2a7759 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index fd49efd75b1..25f6d1108fc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,6 +59,9 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, @@ -78,10 +89,10 @@ func init() { for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -97,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { @@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size = reuseSize } - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } // Check if we get a reasonable size decrease. if storable && ssize <= size { @@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) // Store predefined, if we don't get a reasonable improvement. - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } if storable && ssize <= size { @@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] + c := lits[t] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] + c := lengths[lengthCode] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) + extraLength := int32(length - lengthBase[lengthCode]) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { @@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] @@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63) - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1002,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 histogram(input, w.literalFreq[:numLiterals], fill) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } w.literalFreq[endBlockMarker] = 1 w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) if fill { @@ -1019,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -1031,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -1065,6 +1108,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } // Unroll, write 3 codes/loop. // Fastest number of unrolls. for len(input) > 3 { @@ -1074,13 +1120,16 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 - nbytes += uint8(n) + nbytes += n } if nbytes >= bufferFlushSize { if w.err != nil { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } @@ -1096,13 +1145,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len - if debugDeflate { - count += int(c.len) - } if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1114,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + if debugDeflate { + count += int(c.len) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index f35e00261d3..9ab497c275b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -17,7 +17,8 @@ const ( // hcode is a huffman code with a bit code and bit length. type hcode struct { - code, len uint16 + code uint16 + len uint8 } type huffmanEncoder struct { @@ -56,7 +57,7 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { +func (h *hcode) set(code uint16, length uint8) { h.len = length h.code = code } @@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = hcode{code: reverseBits(bits, size), len: size} } return h } @@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)} code++ } list = list[0 : len(list)-int(bits)] @@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i].len = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d5f62f6a2ca..414c0bea9fa 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27925c..8d632cea0f5 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -88,10 +87,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -224,6 +228,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -248,10 +253,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -332,10 +338,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -468,6 +479,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -492,10 +504,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -576,10 +589,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -712,6 +730,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -736,10 +755,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -818,12 +1089,14 @@ readLiteral: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -956,6 +1232,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -978,12 +1255,14 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3968a..0f14f8d63b4 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +154,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389ab3..8603fbd55ad 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a5c..039639f8989 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,7 +5,7 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. @@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits ) if debugDeflate && e.cur < 0 { @@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -141,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -156,7 +166,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,30 +175,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 5 { + nextHash := hash4u(load3232(src, i), tableBits) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hash4u(uint32(x), tableBits) candidates := e.table[currHash] cv = uint32(x) e.table[currHash] = tableEntryPrev{ @@ -200,15 +211,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if cv == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + // Match at prev... + continue } } cv = uint32(x >> 8) diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b1e..1cbffa1aefe 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320b7..4b97576bd38 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec49..62888edf3cd 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e89912463..544162a4318 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index 3a9618ee193..d818790c132 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go new file mode 100644 index 00000000000..ff2c69d60cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/autogen.go @@ -0,0 +1,5 @@ +package huff0 + +//go:generate go run generate.go +//go:generate asmfmt -w decompress_amd64.s +//go:generate asmfmt -w decompress_8b_amd64.s diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868a..451160edda3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { return uint16(b.value >> ((64 - n) & 63)) } +// peekTopBits(n) is equvialent to peekBitFast(64 - n) +func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { + return uint16(b.value >> n) +} + func (b *bitReaderShifted) advance(n uint8) { b.bitsRead += n b.value <<= n & 63 @@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc05389..bc95ac623bd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1a7e5..04f6529955e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) @@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} } // Decompress1X will decompress a 1X encoded stream. @@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 for br.off >= 8 { @@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { bitsLeft -= nBits dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,195 +721,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -916,12 +764,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +790,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +799,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +808,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +817,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +835,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +844,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +853,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,25 +862,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1040,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +933,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +942,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1135,12 +999,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,104 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1257,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1166,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1175,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s new file mode 100644 index 00000000000..0d6cb1a962b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s @@ -0,0 +1,488 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // SECOND PART: + // val2 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 0+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // SECOND PART: + // val2 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 256+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // SECOND PART: + // val2 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 512+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // SECOND PART: + // val2 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 768+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in new file mode 100644 index 00000000000..6d477a2c11e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in @@ -0,0 +1,197 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // SECOND PART: + // val2 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..ce8e93bcd06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,181 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// that uses an asm implementation of its main loop. +package huff0 + +import ( + "errors" + "fmt" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +//go:noescape +func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +//go:noescape +func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + const debug = false + + // see: bitReaderShifted.peekBitsFast() + peekBits := uint8((64 - d.actualTableLog) & 63) + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + if use8BitTables { + off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } else { + off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } + if debug { + fmt.Print("DEBUG: ") + fmt.Printf("off=%d,", off) + for i := 0; i < 4; i++ { + fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}", + i, br[i].bitsRead, br[i].value, br[i].off) + } + fmt.Println("") + } + + if off != 0 { + break + } + + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..2edad3ea5a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,506 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in new file mode 100644 index 00000000000..330d86ae155 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in @@ -0,0 +1,195 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..126b4d68a94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,193 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb470..e8ad17ad08e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1e..beb7fa87201 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,62 +369,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17df634..d7cd15ba29d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -132,6 +133,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index dc587b2c949..b2bca330182 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,6 +43,9 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 @@ -76,16 +84,25 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 tmp [4]byte @@ -108,13 +125,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -132,11 +144,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -147,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -157,7 +175,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -168,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -192,85 +222,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -293,14 +252,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -310,30 +278,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -349,7 +305,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -360,7 +316,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -371,7 +327,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -381,7 +337,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -392,13 +348,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -406,7 +364,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -417,7 +375,6 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, litRegenSize) } else { b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } } } @@ -433,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -441,31 +398,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = maxCompressedBlockSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { b.literalBuf = make([]byte, 0, litRegenSize) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = maxCompressedBlockSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -474,27 +465,61 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -511,8 +536,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } - var seqs = &sequenceDecs{} + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -541,6 +574,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -548,34 +584,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -585,140 +623,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) return err } - if debugDecoder { - println("History merged ok") + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf85..b80191e4b1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b572..c65ea9795f3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,13 @@ package zstd import ( - "errors" + "bytes" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,33 +328,39 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. @@ -368,33 +395,170 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok } + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + return true } +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + d.frame.crc.Write(d.current.b) + if d.current.d.Last { + d.current.err = d.frame.checkCRC() + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +566,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +620,307 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 4 go routines. +// 0: Read frames and decode blocks. +// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. +// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. +// 3: Wait for stream history, execute sequences, send stream history. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqPrepare = make(chan *blockDec, d.o.concurrent) + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Prepare blocks... + go func() { + var hist history + var hasErr bool + for block := range seqPrepare { + if hasErr { + if block != nil { + seqDecode <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history") + } + hist.reset() + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + continue + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqDecode) + }() + + // Async 2: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) } - break + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} + seqExecute <- block + } + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 3: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) } - break decodeStream - default: } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + seqPrepare <- dec + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + seqPrepare <- dec + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqPrepare) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81f..fc52ebc403a 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -28,7 +28,10 @@ func (o *decoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), maxWindowSize: MaxWindowSize, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -54,7 +66,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 5f08a283023..f51ab529a0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b0..dcc987a7cb6 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 5f2e1d020ee..44d8dbd199a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c31..509d5cecea5 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,23 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { @@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + d.history.allocFrameBuffer = d.history.windowSize * 2 + // TODO: Maybe use FrameContent size } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { @@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) { + if !bytes.Equal(tmp[:], want) && !ignoreCRC { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -352,133 +317,28 @@ func (d *frameDec) checkCRC() error { return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -489,22 +349,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { var n int n, err = d.crc.Write(dst[crcStart:]) if err == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c31..fde4e6b6011 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go new file mode 100644 index 00000000000..7f2210e0530 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go @@ -0,0 +1,11 @@ +//go:build ignorecrc +// +build ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go new file mode 100644 index 00000000000..6811c68a893 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go @@ -0,0 +1,11 @@ +//go:build !ignorecrc +// +build !ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d251..28b40153cc2 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4cb69..e80139dd9c6 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,127 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + if true { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + } + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +272,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +297,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +350,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +382,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -291,9 +408,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + } + // Add final literals - s.out = append(s.out, s.literals...) - return nil + s.out = append(out, s.literals...) + return br.close() } // update states, at least 27 bits must be available. @@ -457,36 +579,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..4676b09cc18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,350 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + + ok := sequenceDecs_executeSimple_amd64(&ctx) + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..01cc23fa8a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,3519 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_56_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, R11 + JZ copy_1_word + MOVB (SI)(R14*1), R15 + MOVB R15, (BX)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, R11 + JZ copy_1_dword + MOVW (SI)(R14*1), R15 + MOVW R15, (BX)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, R11 + JZ copy_1_qword + MOVL (SI)(R14*1), R15 + MOVL R15, (BX)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, R11 + JZ copy_1_test + MOVQ (SI)(R14*1), R15 + MOVQ R15, (BX)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JGE copy_all_from_history + XORQ R11, R11 + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(R11*1), R12 + MOVB R12, (BX)(R11*1) + ADDQ $0x01, R11 + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(R11*1), R12 + MOVW R12, (BX)(R11*1) + ADDQ $0x02, R11 + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(R11*1), R12 + MOVL R12, (BX)(R11*1) + ADDQ $0x04, R11 + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(R11*1), R12 + MOVQ R12, (BX)(R11*1) + ADDQ $0x08, R11 + JMP copy_4_test + +copy_4: + MOVUPS (R14)(R11*1), X0 + MOVUPS X0, (BX)(R11*1) + ADDQ $0x10, R11 + +copy_4_test: + CMPQ R11, R13 + JB copy_4 + ADDQ R13, DI + ADDQ R13, BX + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, R11 + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (BX)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, R11 + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (BX)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, R11 + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (BX)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, R11 + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (BX)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (BX)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, R11 + JB copy_5 + ADDQ R11, BX + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + +copy_2: + MOVUPS (R11)(R12*1), X0 + MOVUPS X0, (BX)(R12*1) + ADDQ $0x10, R12 + CMPQ R12, R13 + JB copy_2 + ADDQ R13, BX + ADDQ R13, DI + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (R11)(R12*1), R14 + MOVB R14, (BX)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, BX + ADDQ R13, DI + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ CX, CX + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + ADDQ R13, R12 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ CX, CX + +copy_slow_3: + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + INCQ CX + CMPQ CX, R13 + JB copy_slow_3 + ADDQ R13, R10 + ADDQ R13, R12 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + ADDQ R13, R11 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, R9 + ADDQ R13, R11 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + ADDQ $0x01, CX + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (AX)(CX*1), R14 + MOVW R14, (R10)(CX*1) + ADDQ $0x02, CX + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (AX)(CX*1), R14 + MOVL R14, (R10)(CX*1) + ADDQ $0x04, CX + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (AX)(CX*1), R14 + MOVQ R14, (R10)(CX*1) + ADDQ $0x08, CX + JMP copy_2_test + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + +copy_2_test: + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + ADDQ R13, R12 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ CX, CX + +copy_slow_3: + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + INCQ CX + CMPQ CX, R13 + JB copy_slow_3 + ADDQ R13, R10 + ADDQ R13, R12 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + ADDQ $0x01, R12 + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (CX)(R12*1), R14 + MOVW R14, (R9)(R12*1) + ADDQ $0x02, R12 + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (CX)(R12*1), R14 + MOVL R14, (R9)(R12*1) + ADDQ $0x04, R12 + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (CX)(R12*1), R14 + MOVQ R14, (R9)(R12*1) + ADDQ $0x08, R12 + JMP copy_2_test + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + +copy_2_test: + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + ADDQ R13, R11 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, R9 + ADDQ R13, R11 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..c3452bc3a9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120..b53f606a18a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -20,34 +20,49 @@ const ZipMethodPKWare = 20 var zipReaderPool sync.Pool -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +72,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009c..c1c90b4a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -39,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -52,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -75,6 +82,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index 45ddad236f3..242f82cc72a 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -4,7 +4,6 @@ package mountinfo import ( - "fmt" "os" "path/filepath" @@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) { func normalizePath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + return "", err } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + return "", err } if _, err := os.Stat(realPath); err != nil { - return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + return "", err } return realPath, nil } diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml index 97b530b4fd7..ff6afee5a0f 100644 --- a/vendor/github.com/openshift/imagebuilder/.travis.yml +++ b/vendor/github.com/openshift/imagebuilder/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - "1.9" - - "1.10" + - "1.16" + - "1.17" install: diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index df52699046b..71dc41ea560 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -44,6 +44,7 @@ type Run struct { type Executor interface { Preserve(path string) error EnsureContainerPath(path string) error + EnsureContainerPathAs(path, user string, mode *os.FileMode) error Copy(excludes []string, copies ...Copy) error Run(run Run, config docker.Config) error UnrecognizedInstruction(step *Step) error @@ -61,6 +62,15 @@ func (logExecutor) EnsureContainerPath(path string) error { return nil } +func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + if mode != nil { + log.Printf("ENSURE %s AS %q with MODE=%q", path, user, *mode) + } else { + log.Printf("ENSURE %s AS %q", path, user) + } + return nil +} + func (logExecutor) Copy(excludes []string, copies ...Copy) error { for _, c := range copies { log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod) @@ -88,6 +98,10 @@ func (noopExecutor) EnsureContainerPath(path string) error { return nil } +func (noopExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return nil +} + func (noopExecutor) Copy(excludes []string, copies ...Copy) error { return nil } @@ -303,6 +317,9 @@ type Builder struct { PendingCopies []Copy Warnings []string + // Raw platform string specified with `FROM --platform` of the stage + // It's up to the implementation or client to parse and use this field + Platform string } func NewBuilder(args map[string]string) *Builder { @@ -375,7 +392,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error { } if len(b.RunConfig.WorkingDir) > 0 { - if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil { + if err := exec.EnsureContainerPathAs(b.RunConfig.WorkingDir, b.RunConfig.User, nil); err != nil { return err } } @@ -470,7 +487,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error { b.RunConfig.Env = nil // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS + // have one as it's set by HCS if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") { b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv) } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index 0d82136e704..c437e4c2382 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -234,6 +234,22 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri return fmt.Errorf("Windows does not support FROM scratch") } } + defaultArgs := envMapAsSlice(builtinBuildArgs) + userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env) + userArgs = mergeEnv(defaultArgs, userArgs) + for _, a := range flagArgs { + arg, err := ProcessWord(a, userArgs) + if err != nil { + return err + } + switch { + case strings.HasPrefix(arg, "--platform="): + platformString := strings.TrimPrefix(arg, "--platform=") + b.Platform = platformString + default: + return fmt.Errorf("FROM only supports the --platform flag") + } + } b.RunConfig.Image = name // TODO: handle onbuild return nil @@ -573,65 +589,63 @@ var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"} // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { - if len(args) != 1 { - return fmt.Errorf("ARG requires exactly one argument definition") - } - var ( name string value string hasDefault bool ) - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - name = parts[0] - value = parts[1] - hasDefault = true - if name == "TARGETPLATFORM" { - p, err := platforms.Parse(value) - if err != nil { - return fmt.Errorf("error parsing TARGETPLATFORM argument") - } - for _, val := range targetArgs { - b.AllowedArgs[val] = true - } - b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture - b.Args["TARGETOS"] = p.OS - b.Args["TARGETARCH"] = p.Architecture - b.Args["TARGETVARIANT"] = p.Variant - if p.Variant != "" { - b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + for _, argument := range args { + arg := argument + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + if name == "TARGETPLATFORM" { + p, err := platforms.Parse(value) + if err != nil { + return fmt.Errorf("error parsing TARGETPLATFORM argument") + } + for _, val := range targetArgs { + b.AllowedArgs[val] = true + } + b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture + b.Args["TARGETOS"] = p.OS + b.Args["TARGETARCH"] = p.Architecture + b.Args["TARGETVARIANT"] = p.Variant + if p.Variant != "" { + b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + } } + } else if val, ok := builtinBuildArgs[arg]; ok { + name = arg + value = val + hasDefault = true + } else { + name = arg + hasDefault = false } - } else if val, ok := builtinBuildArgs[arg]; ok { - name = arg - value = val - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.AllowedArgs[name] = true + // add the arg to allowed list of build-time args from this step on. + b.AllowedArgs[name] = true - // If there is still no default value, a value can be assigned from the heading args - if val, ok := b.HeadingArgs[name]; ok && !hasDefault { - b.Args[name] = val - } + // If there is still no default value, a value can be assigned from the heading args + if val, ok := b.HeadingArgs[name]; ok && !hasDefault { + b.Args[name] = val + } - // If there is a default value associated with this arg then add it to the - // b.buildArgs, later default values for the same arg override earlier ones. - // The args passed to builder (UserArgs) override the default value of 'arg' - // Don't add them here as they were already set in NewBuilder. - if _, ok := b.UserArgs[name]; !ok && hasDefault { - b.Args[name] = value + // If there is a default value associated with this arg then add it to the + // b.buildArgs, later default values for the same arg override earlier ones. + // The args passed to builder (UserArgs) override the default value of 'arg' + // Don't add them here as they were already set in NewBuilder. + if _, ok := b.UserArgs[name]; !ok && hasDefault { + b.Args[name] = value + } } return nil diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 79d16ec398d..143bfc355e7 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.8.1 -%{!?version: %global version 1.2.2-dev} +%{!?version: %global version 1.2.4-dev} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/openshift/imagebuilder/internals.go b/vendor/github.com/openshift/imagebuilder/internals.go index f33dc70bbf7..d4b95390381 100644 --- a/vendor/github.com/openshift/imagebuilder/internals.go +++ b/vendor/github.com/openshift/imagebuilder/internals.go @@ -52,7 +52,7 @@ func hasSlash(input string) bool { // makeAbsolute ensures that the provided path is absolute. func makeAbsolute(dest, workingDir string) string { - // Twiddle the destination when its a relative path - meaning, make it + // Twiddle the destination when it's a relative path - meaning, make it // relative to the WORKINGDIR if dest == "." { if !hasSlash(workingDir) { diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf deleted file mode 100644 index 8074ce80a12..00000000000 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ /dev/null @@ -1,25 +0,0 @@ -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/containerd/containerd v1.3.0 -github.com/containers/storage v1.2 -github.com/docker/docker b68221c37ee597950364788204546f9c9d0e46a1 -github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e -github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e -github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git -github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080 -github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e -github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8 -github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 -github.com/opencontainers/go-digest ac19fd6e7483ff933754af248d80be865e543d22 -github.com/opencontainers/image-spec 243ea084a44451d27322fed02b682d99e2af3ba9 -github.com/opencontainers/runc 923a8f8a9a07aceada5fc48c4d37e905d9b019b5 -github.com/pkg/errors 27936f6d90f9c8e1145f11ed52ffffbfdb9e0af7 -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb -golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 -golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 -golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba -k8s.io/klog 8e90cee79f823779174776412c13478955131846 -google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 -github.com/golang/protobuf v1.2.0 -google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 - diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go index 30572ea87ff..6a6acfd9ab3 100644 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant { } func (v *GVariant) Ptr() unsafe.Pointer { + if v == nil { + return nil + } return v.ptr } diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.appveyor.yml rename to vendor/github.com/proglottis/gpgme/.appveyor.yml diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.gitignore rename to vendor/github.com/proglottis/gpgme/.gitignore diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.travis.yml rename to vendor/github.com/proglottis/gpgme/.travis.yml diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE similarity index 100% rename from vendor/github.com/mtrmac/gpgme/LICENSE rename to vendor/github.com/proglottis/gpgme/LICENSE diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md similarity index 100% rename from vendor/github.com/mtrmac/gpgme/README.md rename to vendor/github.com/proglottis/gpgme/README.md diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/callbacks.go rename to vendor/github.com/proglottis/gpgme/callbacks.go diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/data.go rename to vendor/github.com/proglottis/gpgme/data.go diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c similarity index 100% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.c rename to vendor/github.com/proglottis/gpgme/go_gpgme.c diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h similarity index 92% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.h rename to vendor/github.com/proglottis/gpgme/go_gpgme.h index d4826ab368e..eb3a4ba88b1 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -6,11 +6,6 @@ #include -/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */ -#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402 -typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */ -#endif - extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go similarity index 96% rename from vendor/github.com/mtrmac/gpgme/gpgme.go rename to vendor/github.com/proglottis/gpgme/gpgme.go index c19b9aebc5c..9833057a663 100644 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -53,13 +53,13 @@ const ( type PinEntryMode int -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) +const ( + PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT + PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK + PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL + PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR + PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +) type EncryptFlag uint @@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode { return res } -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// runtime.KeepAlive(c) -// return err -// } +func (c *Context) SetPinEntryMode(m PinEntryMode) error { + err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) + runtime.KeepAlive(c) + return err +} -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// runtime.KeepAlive(c) -// return res -// } +func (c *Context) PinEntryMode() PinEntryMode { + res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} func (c *Context) SetCallback(callback Callback) error { var err error diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info.go diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md new file mode 100644 index 00000000000..dea3e409e11 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -0,0 +1,29 @@ +# LICENSE + +Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go new file mode 100644 index 00000000000..d6f667378a2 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go @@ -0,0 +1,72 @@ +// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +var ( + hdrArchUnknown archType = [...]byte{'0', '0', '\x00'} + hdrArch386 archType = [...]byte{'0', '1', '\x00'} + hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'} + hdrArchARM archType = [...]byte{'0', '3', '\x00'} + hdrArchARM64 archType = [...]byte{'0', '4', '\x00'} + hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'} + hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'} + hdrArchMIPS archType = [...]byte{'0', '7', '\x00'} + hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'} + hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'} + hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'} + hdrArchS390x archType = [...]byte{'1', '1', '\x00'} + hdrArchRISCV64 archType = [...]byte{'1', '2', '\x00'} +) + +type archType [3]byte + +// getSIFArch returns the archType corresponding to go runtime arch. +func getSIFArch(arch string) archType { + archMap := map[string]archType{ + "386": hdrArch386, + "amd64": hdrArchAMD64, + "arm": hdrArchARM, + "arm64": hdrArchARM64, + "ppc64": hdrArchPPC64, + "ppc64le": hdrArchPPC64le, + "mips": hdrArchMIPS, + "mipsle": hdrArchMIPSle, + "mips64": hdrArchMIPS64, + "mips64le": hdrArchMIPS64le, + "s390x": hdrArchS390x, + "riscv64": hdrArchRISCV64, + } + + t, ok := archMap[arch] + if !ok { + return hdrArchUnknown + } + return t +} + +// GoArch returns the go runtime arch corresponding to t. +func (t archType) GoArch() string { + archMap := map[archType]string{ + hdrArch386: "386", + hdrArchAMD64: "amd64", + hdrArchARM: "arm", + hdrArchARM64: "arm64", + hdrArchPPC64: "ppc64", + hdrArchPPC64le: "ppc64le", + hdrArchMIPS: "mips", + hdrArchMIPSle: "mipsle", + hdrArchMIPS64: "mips64", + hdrArchMIPS64le: "mips64le", + hdrArchS390x: "s390x", + hdrArchRISCV64: "riscv64", + } + + arch, ok := archMap[t] + if !ok { + arch = "unknown" + } + return arch +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go new file mode 100644 index 00000000000..d706fb1a59c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go @@ -0,0 +1,103 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "io" +) + +// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The +// zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte + pos int64 +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +var errNegativeOffset = errors.New("negative offset") + +// ReadAt implements the io.ReaderAt interface. +func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errNegativeOffset + } + + if off >= int64(len(b.buf)) { + return 0, io.EOF + } + + n = copy(p, b.buf[off:]) + if n < len(p) { + err = io.EOF + } + return n, err +} + +var errNegativePosition = errors.New("negative position") + +// Write implements the io.Writer interface. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.pos < 0 { + return 0, errNegativePosition + } + + if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need { + b.buf = append(b.buf, make([]byte, need-have)...) + } + + n = copy(b.buf[b.pos:], p) + b.pos += int64(n) + return n, nil +} + +var errInvalidWhence = errors.New("invalid whence") + +// Seek implements the io.Seeker interface. +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = b.pos + offset + case io.SeekEnd: + abs = int64(len(b.buf)) + offset + default: + return 0, errInvalidWhence + } + + if abs < 0 { + return 0, errNegativePosition + } + + b.pos = abs + return abs, nil +} + +var errTruncateRange = errors.New("truncation out of range") + +// Truncate discards all but the first n bytes from the buffer. +func (b *Buffer) Truncate(n int64) error { + if n < 0 || n > int64(len(b.buf)) { + return errTruncateRange + } + + b.buf = b.buf[:n] + return nil +} + +// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer +// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate). +func (b *Buffer) Bytes() []byte { return b.buf } + +// Len returns the number of bytes in the buffer. +func (b *Buffer) Len() int64 { return int64(len(b.buf)) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go new file mode 100644 index 00000000000..e65bdb7476d --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -0,0 +1,680 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/google/uuid" +) + +// nextAligned finds the next offset that satisfies alignment. +func nextAligned(offset int64, alignment int) int64 { + align64 := uint64(alignment) + offset64 := uint64(offset) + + if align64 != 0 && offset64%align64 != 0 { + offset64 = (offset64 & ^(align64 - 1)) + align64 + } + + return int64(offset64) +} + +// writeDataObjectAt writes the data object described by di to ws, using time t, recording details +// in d. The object is written at the first position that satisfies the alignment requirements +// described by di following offsetUnaligned. +func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll + offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + if err != nil { + return err + } + + n, err := io.Copy(ws, di.r) + if err != nil { + return err + } + + if err := di.fillDescriptor(t, d); err != nil { + return err + } + d.Used = true + d.Offset = offset + d.Size = n + d.SizeWithPadding = offset - offsetUnaligned + n + + return nil +} + +var ( + errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") + errPrimaryPartition = errors.New("image already contains a primary partition") +) + +// writeDataObject writes the data object described by di to f, using time t, recording details in +// the descriptor at index i. +func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error { + if i >= len(f.rds) { + return errInsufficientCapacity + } + + // If this is a primary partition, verify there isn't another primary partition, and update the + // architecture in the global header. + if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys { + if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { + return errPrimaryPartition + } + + f.h.Arch = p.Arch + } + + d := &f.rds[i] + d.ID = uint32(i) + 1 + + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { + return err + } + + // Update minimum object ID map. + if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID { + f.minIDs[d.GroupID] = d.ID + } + + f.h.DescriptorsFree-- + f.h.DataSize += d.SizeWithPadding + + return nil +} + +// writeDescriptors writes the descriptors in f to backing storage. +func (f *FileImage) writeDescriptors() error { + if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.rds) +} + +// writeHeader writes the the global header in f to backing storage. +func (f *FileImage) writeHeader() error { + if _, err := f.rw.Seek(0, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.h) +} + +// createOpts accumulates container creation options. +type createOpts struct { + launchScript [hdrLaunchLen]byte + id uuid.UUID + descriptorsOffset int64 + descriptorCapacity int64 + dis []DescriptorInput + t time.Time + closeOnUnload bool +} + +// CreateOpt are used to specify container creation options. +type CreateOpt func(*createOpts) error + +var errLaunchScriptLen = errors.New("launch script too large") + +// OptCreateWithLaunchScript specifies s as the launch script. +func OptCreateWithLaunchScript(s string) CreateOpt { + return func(co *createOpts) error { + b := []byte(s) + + if len(b) >= len(co.launchScript) { + return errLaunchScriptLen + } + + copy(co.launchScript[:], b) + + return nil + } +} + +// OptCreateDeterministic sets header/descriptor fields to values that support deterministic +// creation of images. +func OptCreateDeterministic() CreateOpt { + return func(co *createOpts) error { + co.id = uuid.Nil + co.t = time.Time{} + return nil + } +} + +// OptCreateWithID specifies id as the unique ID. +func OptCreateWithID(id string) CreateOpt { + return func(co *createOpts) error { + id, err := uuid.Parse(id) + co.id = id + return err + } +} + +// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a +// maximum of n descriptors. +func OptCreateWithDescriptorCapacity(n int64) CreateOpt { + return func(co *createOpts) error { + co.descriptorCapacity = n + return nil + } +} + +// OptCreateWithDescriptors appends dis to the list of descriptors. +func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt { + return func(co *createOpts) error { + co.dis = append(co.dis, dis...) + return nil + } +} + +// OptCreateWithTime specifies t as the image creation time. +func OptCreateWithTime(t time.Time) CreateOpt { + return func(co *createOpts) error { + co.t = t + return nil + } +} + +// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptCreateWithCloseOnUnload(b bool) CreateOpt { + return func(co *createOpts) error { + co.closeOnUnload = b + return nil + } +} + +// createContainer creates a new SIF container file in rw, according to opts. +func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + rds := make([]rawDescriptor, co.descriptorCapacity) + rdsSize := int64(binary.Size(rds)) + + h := header{ + LaunchScript: co.launchScript, + Magic: hdrMagic, + Version: CurrentVersion.bytes(), + Arch: hdrArchUnknown, + ID: co.id, + CreatedAt: co.t.Unix(), + ModifiedAt: co.t.Unix(), + DescriptorsFree: co.descriptorCapacity, + DescriptorsTotal: co.descriptorCapacity, + DescriptorsOffset: co.descriptorsOffset, + DescriptorsSize: rdsSize, + DataOffset: co.descriptorsOffset + rdsSize, + } + + f := &FileImage{ + rw: rw, + h: h, + rds: rds, + minIDs: make(map[uint32]uint32), + } + + for i, di := range co.dis { + if err := f.writeDataObject(i, di, co.t); err != nil { + return nil, err + } + } + + if err := f.writeDescriptors(); err != nil { + return nil, err + } + + if err := f.writeHeader(); err != nil { + return nil, err + } + + return f, nil +} + +// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects +// can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptCreateWithCloseOnUnload. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + + co := createOpts{ + id: id, + descriptorsOffset: 4096, + descriptorCapacity: 48, + t: time.Now(), + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&co); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := createContainer(rw, co) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = co.closeOnUnload + return f, nil +} + +// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more +// data objects can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { + fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := CreateContainer(fp, opts...) + if err != nil { + fp.Close() + os.Remove(fp.Name()) + + return nil, err + } + + f.closeOnUnload = true + return f, nil +} + +func zeroData(fimg *FileImage, descr *rawDescriptor) error { + // first, move to data object offset + if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil { + return err + } + + var zero [4096]byte + n := descr.Size + upbound := int64(4096) + for { + if n < 4096 { + upbound = n + } + + if _, err := fimg.rw.Write(zero[:upbound]); err != nil { + return err + } + n -= 4096 + if n <= 0 { + break + } + } + + return nil +} + +func resetDescriptor(fimg *FileImage, index int) error { + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if fimg.rds[index].isPartitionOfType(PartPrimSys) { + fimg.h.Arch = hdrArchUnknown + } + + offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0])) + + // first, move to descriptor offset + if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil { + return err + } + + var emptyDesc rawDescriptor + return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc) +} + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time. To override this, consider +// using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// isLast return true if the data object associated with d is the last in f. +func (f *FileImage) isLast(d *rawDescriptor) bool { + isLast := true + + end := d.Offset + d.Size + f.WithDescriptors(func(d Descriptor) bool { + isLast = d.Offset()+d.Size() <= end + return !isLast + }) + + return isLast +} + +// truncateAt truncates f at the start of the padded data object described by d. +func (f *FileImage) truncateAt(d *rawDescriptor) error { + start := d.Offset + d.Size - d.SizeWithPadding + + if err := f.rw.Truncate(start); err != nil { + return err + } + + return nil +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +var errCompactNotImplemented = errors.New("compact not implemented for non-last object") + +// DeleteObject deletes the data object with id, according to opts. +// +// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following +// object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to time.Now(). To override this, consider using +// OptDeleteDeterministic or OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + do := deleteOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + d, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if do.compact && !f.isLast(d) { + return fmt.Errorf("%w", errCompactNotImplemented) + } + + if do.zero { + if err := zeroData(f, d); err != nil { + return fmt.Errorf("%w", err) + } + } + + if do.compact { + if err := f.truncateAt(d); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.DataSize -= d.SizeWithPadding + } + + f.h.DescriptorsFree++ + f.h.ModifiedAt = do.t.Unix() + + index := 0 + for i, od := range f.rds { + if od.ID == id { + index = i + break + } + } + + if err := resetDescriptor(f, index); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to time.Now(). To override this, +// consider using OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + fs, pt, arch, err := descr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if pt == PartPrimSys { + return nil + } + + if pt != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + f.h.Arch = getSIFArch(arch) + + extra := partition{ + Fstype: fs, + Parttype: PartPrimSys, + } + copy(extra.Arch[:], arch) + + if err := descr.setExtra(extra); err != nil { + return fmt.Errorf("%w", err) + } + + if olddescr != nil { + oldfs, _, oldarch, err := olddescr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + oldextra := partition{ + Fstype: oldfs, + Parttype: PartSystem, + Arch: getSIFArch(oldarch), + } + + if err := olddescr.setExtra(oldextra); err != nil { + return fmt.Errorf("%w", err) + } + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go new file mode 100644 index 00000000000..da7a6a7c7be --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -0,0 +1,267 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "bytes" + "crypto" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" + "time" +) + +// rawDescriptor represents an on-disk object descriptor. +type rawDescriptor struct { + DataType DataType + Used bool + ID uint32 + GroupID uint32 + LinkedID uint32 + Offset int64 + Size int64 + SizeWithPadding int64 + + CreatedAt int64 + ModifiedAt int64 + UID int64 // Deprecated: UID exists for historical compatibility and should not be used. + GID int64 // Deprecated: GID exists for historical compatibility and should not be used. + Name [descrNameLen]byte + Extra [descrMaxPrivLen]byte +} + +// partition represents the SIF partition data object descriptor. +type partition struct { + Fstype FSType + Parttype PartType + Arch archType +} + +// signature represents the SIF signature data object descriptor. +type signature struct { + Hashtype hashType + Entity [descrEntityLen]byte +} + +// cryptoMessage represents the SIF crypto message object descriptor. +type cryptoMessage struct { + Formattype FormatType + Messagetype MessageType +} + +var errNameTooLarge = errors.New("name value too large") + +// setName encodes name into the name field of d. +func (d *rawDescriptor) setName(name string) error { + if len(name) > len(d.Name) { + return errNameTooLarge + } + + for i := copy(d.Name[:], name); i < len(d.Name); i++ { + d.Name[i] = 0 + } + + return nil +} + +var errExtraTooLarge = errors.New("extra value too large") + +// setExtra encodes v into the extra field of d. +func (d *rawDescriptor) setExtra(v interface{}) error { + if v == nil { + return nil + } + + if binary.Size(v) > len(d.Extra) { + return errExtraTooLarge + } + + b := new(bytes.Buffer) + if err := binary.Write(b, binary.LittleEndian, v); err != nil { + return err + } + + for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ { + d.Extra[i] = 0 + } + + return nil +} + +// getPartitionMetadata gets metadata for a partition data object. +func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + if got, want := d.DataType, DataPartition; got != want { + return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}} + } + + var p partition + + b := bytes.NewReader(d.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &p); err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + + return p.Fstype, p.Parttype, p.Arch.GoArch(), nil +} + +// isPartitionOfType returns true if d is a partition data object of type pt. +func (d rawDescriptor) isPartitionOfType(pt PartType) bool { + _, t, _, err := d.getPartitionMetadata() + if err != nil { + return false + } + return t == pt +} + +// Descriptor represents the SIF descriptor type. +type Descriptor struct { + r io.ReaderAt // Backing storage. + + raw rawDescriptor // Raw descriptor from image. + + relativeID uint32 // ID relative to minimum ID of object group. +} + +// DataType returns the type of data object. +func (d Descriptor) DataType() DataType { return d.raw.DataType } + +// ID returns the data object ID of d. +func (d Descriptor) ID() uint32 { return d.raw.ID } + +// GroupID returns the data object group ID of d, or zero if d is not part of a data object +// group. +func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask } + +// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked +// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a +// data object ID. +func (d Descriptor) LinkedID() (id uint32, isGroup bool) { + return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask +} + +// Offset returns the offset of the data object. +func (d Descriptor) Offset() int64 { return d.raw.Offset } + +// Size returns the data object size. +func (d Descriptor) Size() int64 { return d.raw.Size } + +// CreatedAt returns the creation time of the data object. +func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) } + +// ModifiedAt returns the modification time of the data object. +func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) } + +// Name returns the name of the data object. +func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } + +// PartitionMetadata gets metadata for a partition data object. +func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + return d.raw.getPartitionMetadata() +} + +var errHashUnsupported = errors.New("hash algorithm unsupported") + +// getHashType converts ht into a crypto.Hash. +func getHashType(ht hashType) (crypto.Hash, error) { + switch ht { + case hashSHA256: + return crypto.SHA256, nil + case hashSHA384: + return crypto.SHA384, nil + case hashSHA512: + return crypto.SHA512, nil + case hashBLAKE2S: + return crypto.BLAKE2s_256, nil + case hashBLAKE2B: + return crypto.BLAKE2b_256, nil + } + return 0, errHashUnsupported +} + +// SignatureMetadata gets metadata for a signature data object. +func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { + if got, want := d.raw.DataType, DataSignature; got != want { + return ht, fp, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s signature + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + if ht, err = getHashType(s.Hashtype); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + fp = make([]byte, 20) + copy(fp, s.Entity[:]) + + return ht, fp, nil +} + +// CryptoMessageMetadata gets metadata for a crypto message data object. +func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { + if got, want := d.raw.DataType, DataCryptoMessage; got != want { + return 0, 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var m cryptoMessage + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &m); err != nil { + return 0, 0, fmt.Errorf("%w", err) + } + + return m.Formattype, m.Messagetype, nil +} + +// GetData returns the data object associated with descriptor d. +func (d Descriptor) GetData() ([]byte, error) { + b := make([]byte, d.raw.Size) + if _, err := io.ReadFull(d.GetReader(), b); err != nil { + return nil, err + } + return b, nil +} + +// GetReader returns a io.Reader that reads the data object associated with descriptor d. +func (d Descriptor) GetReader() io.Reader { + return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size) +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d. +func (d Descriptor) GetIntegrityReader() io.Reader { + fields := []interface{}{ + d.raw.DataType, + d.raw.Used, + d.relativeID, + d.raw.LinkedID, + d.raw.Size, + d.raw.CreatedAt, + d.raw.UID, + d.raw.GID, + } + + // Encode endian-sensitive fields. + data := bytes.Buffer{} + for _, f := range fields { + if err := binary.Write(&data, binary.LittleEndian, f); err != nil { + panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error. + } + } + + return io.MultiReader( + &data, + bytes.NewReader(d.raw.Name[:]), + bytes.NewReader(d.raw.Extra[:]), + ) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go new file mode 100644 index 00000000000..c55cf51f9de --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -0,0 +1,300 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "crypto" + "errors" + "fmt" + "io" + "os" + "time" +) + +// descriptorOpts accumulates data object options. +type descriptorOpts struct { + groupID uint32 + linkID uint32 + alignment int + name string + extra interface{} + t time.Time +} + +// DescriptorInputOpt are used to specify data object options. +type DescriptorInputOpt func(DataType, *descriptorOpts) error + +// OptNoGroup specifies the data object is not contained within a data object group. +func OptNoGroup() DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.groupID = 0 + return nil + } +} + +// OptGroupID specifies groupID as data object group ID. +func OptGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.groupID = groupID + return nil + } +} + +// OptLinkedID specifies that the data object is linked to the data object with the specified ID. +func OptLinkedID(id uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if id == 0 { + return ErrInvalidObjectID + } + opts.linkID = id + return nil + } +} + +// OptLinkedGroupID specifies that the data object is linked to the data object group with the +// specified groupID. +func OptLinkedGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.linkID = groupID | descrGroupMask + return nil + } +} + +// OptObjectAlignment specifies n as the data alignment requirement. +func OptObjectAlignment(n int) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.alignment = n + return nil + } +} + +// OptObjectName specifies name as the data object name. +func OptObjectName(name string) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.name = name + return nil + } +} + +// OptObjectTime specifies t as the data object creation time. +func OptObjectTime(t time.Time) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.t = t + return nil + } +} + +type unexpectedDataTypeError struct { + got DataType + want []DataType +} + +func (e *unexpectedDataTypeError) Error() string { + return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want) +} + +func (e *unexpectedDataTypeError) Is(target error) bool { + //nolint:errorlint // don't compare wrapped errors in Is() + t, ok := target.(*unexpectedDataTypeError) + if !ok { + return false + } + + if len(t.want) > 0 { + // Use a map to check that the "want" errors in e and t contain the same values, ignoring + // any ordering differences. + acc := make(map[DataType]int, len(t.want)) + + // Increment counter for each data type in e. + for _, dt := range e.want { + if _, ok := acc[dt]; !ok { + acc[dt] = 0 + } + acc[dt]++ + } + + // Decrement counter for each data type in e. + for _, dt := range t.want { + if _, ok := acc[dt]; !ok { + return false + } + acc[dt]-- + } + + // If the "want" errors in e and t are equivalent, all counters should be zero. + for _, n := range acc { + if n != 0 { + return false + } + } + } + + return (e.got == t.got || t.got == 0) +} + +// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set +// to ft, and the message type is set to mt. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataCryptoMessage; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + m := cryptoMessage{ + Formattype: ft, + Messagetype: mt, + } + + opts.extra = m + return nil + } +} + +var errUnknownArchitcture = errors.New("unknown architecture") + +// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to +// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch +// should be the architecture as represented by the Go runtime. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataPartition; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + sifarch := getSIFArch(arch) + if sifarch == hdrArchUnknown { + return fmt.Errorf("%w: %v", errUnknownArchitcture, arch) + } + + p := partition{ + Fstype: fs, + Parttype: pt, + Arch: sifarch, + } + + opts.extra = p + return nil + } +} + +// sifHashType converts h into a HashType. +func sifHashType(h crypto.Hash) hashType { + switch h { + case crypto.SHA256: + return hashSHA256 + case crypto.SHA384: + return hashSHA384 + case crypto.SHA512: + return hashSHA512 + case crypto.BLAKE2s_256: + return hashBLAKE2S + case crypto.BLAKE2b_256: + return hashBLAKE2B + } + return 0 +} + +// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and +// the signing entity fingerprint is set to fp. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSignature; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := signature{ + Hashtype: sifHashType(ht), + } + copy(s.Entity[:], fp) + + opts.extra = s + return nil + } +} + +// DescriptorInput describes a new data object. +type DescriptorInput struct { + dt DataType + r io.Reader + opts descriptorOpts +} + +// DefaultObjectGroup is the default group that data objects are placed in. +const DefaultObjectGroup = 1 + +// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents +// read from r, configured according to opts. +// +// It is possible (and often necessary) to store additional metadata related to certain types of +// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, +// and OptSignatureMetadata for this purpose. +// +// By default, the data object will be placed in the default data object group (1). To override +// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or +// OptLinkedGroupID. +// +// By default, the data object will be aligned according to the system's memory page size. To +// override this behavior, consider using OptObjectAlignment. +// +// By default, no name is set for data object. To set a name, use OptObjectName. +// +// When creating a new image, data object creation/modification times are set to the image creation +// time. When modifying an existing image, the data object creation/modification time is set to the +// image modification time. To override this behavior, consider using OptObjectTime. +func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) { + dopts := descriptorOpts{ + groupID: DefaultObjectGroup, + alignment: os.Getpagesize(), + } + + for _, opt := range opts { + if err := opt(t, &dopts); err != nil { + return DescriptorInput{}, fmt.Errorf("%w", err) + } + } + + di := DescriptorInput{ + dt: t, + r: r, + opts: dopts, + } + + return di, nil +} + +// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t. +func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { + d.DataType = di.dt + d.GroupID = di.opts.groupID | descrGroupMask + d.LinkedID = di.opts.linkID + + if !di.opts.t.IsZero() { + t = di.opts.t + } + d.CreatedAt = t.Unix() + d.ModifiedAt = t.Unix() + + d.UID = 0 + d.GID = 0 + + if err := d.setName(di.opts.name); err != nil { + return err + } + + return d.setExtra(di.opts.extra) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go new file mode 100644 index 00000000000..75266e1945c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go @@ -0,0 +1,174 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" +) + +var ( + errInvalidMagic = errors.New("invalid SIF magic") + errIncompatibleVersion = errors.New("incompatible SIF version") +) + +// isValidSif looks at key fields from the global header to assess SIF validity. +func isValidSif(f *FileImage) error { + if f.h.Magic != hdrMagic { + return errInvalidMagic + } + + if f.h.Version != CurrentVersion.bytes() { + return errIncompatibleVersion + } + + return nil +} + +// populateMinIDs populates the minIDs field of f. +func (f *FileImage) populateMinIDs() { + f.minIDs = make(map[uint32]uint32) + f.WithDescriptors(func(d Descriptor) bool { + if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID { + f.minIDs[d.raw.GroupID] = d.ID() + } + return false + }) +} + +// loadContainer loads a SIF image from rw. +func loadContainer(rw ReadWriter) (*FileImage, error) { + f := FileImage{rw: rw} + + // Read global header. + err := binary.Read( + io.NewSectionReader(rw, 0, int64(binary.Size(f.h))), + binary.LittleEndian, + &f.h, + ) + if err != nil { + return nil, fmt.Errorf("reading global header: %w", err) + } + + if err := isValidSif(&f); err != nil { + return nil, err + } + + // Read descriptors. + f.rds = make([]rawDescriptor, f.h.DescriptorsTotal) + err = binary.Read( + io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize), + binary.LittleEndian, + &f.rds, + ) + if err != nil { + return nil, fmt.Errorf("reading descriptors: %w", err) + } + + f.populateMinIDs() + + return &f, nil +} + +// loadOpts accumulates container loading options. +type loadOpts struct { + flag int + closeOnUnload bool +} + +// LoadOpt are used to specify container loading options. +type LoadOpt func(*loadOpts) error + +// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file. +func OptLoadWithFlag(flag int) LoadOpt { + return func(lo *loadOpts) error { + lo.flag = flag + return nil + } +} + +// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptLoadWithCloseOnUnload(b bool) LoadOpt { + return func(lo *loadOpts) error { + lo.closeOnUnload = b + return nil + } +} + +// LoadContainerFromPath loads a new SIF container from path, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the file is opened for read and write access. To change this behavior, consider +// using OptLoadWithFlag. +func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + flag: os.O_RDWR, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + fp, err := os.OpenFile(path, lo.flag, 0) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := loadContainer(fp) + if err != nil { + fp.Close() + + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = true + return f, nil +} + +// LoadContainer loads a new SIF container from rw, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptLoadWithCloseOnUnload. +func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := loadContainer(rw) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = lo.closeOnUnload + return f, nil +} + +// UnloadContainer unloads f, releasing associated resources. +func (f *FileImage) UnloadContainer() error { + if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload { + if err := c.Close(); err != nil { + return fmt.Errorf("%w", err) + } + } + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go new file mode 100644 index 00000000000..635d6e89cfb --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -0,0 +1,210 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "fmt" +) + +// ErrNoObjects is the error returned when an image contains no data objects. +var ErrNoObjects = errors.New("no objects in image") + +// ErrObjectNotFound is the error returned when a data object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// ErrMultipleObjectsFound is the error returned when multiple data objects are found. +var ErrMultipleObjectsFound = errors.New("multiple objects found") + +// ErrInvalidObjectID is the error returned when an invalid object ID is supplied. +var ErrInvalidObjectID = errors.New("invalid object ID") + +// ErrInvalidGroupID is the error returned when an invalid group ID is supplied. +var ErrInvalidGroupID = errors.New("invalid group ID") + +// DescriptorSelectorFunc returns true if d matches, and false otherwise. +type DescriptorSelectorFunc func(d Descriptor) (bool, error) + +// WithDataType selects descriptors that have data type dt. +func WithDataType(dt DataType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.DataType() == dt, nil + } +} + +// WithID selects descriptors with a matching ID. +func WithID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + return d.ID() == id, nil + } +} + +// WithNoGroup selects descriptors that are not contained within an object group. +func WithNoGroup() DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.GroupID() == 0, nil + } +} + +// WithGroupID returns a selector func that selects descriptors with a matching groupID. +func WithGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + return d.GroupID() == groupID, nil + } +} + +// WithLinkedID selects descriptors that are linked to the data object with specified ID. +func WithLinkedID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + linkedID, isGroup := d.LinkedID() + return !isGroup && linkedID == id, nil + } +} + +// WithLinkedGroupID selects descriptors that are linked to the data object group with specified +// ID. +func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + linkedID, isGroup := d.LinkedID() + return isGroup && linkedID == groupID, nil + } +} + +// WithPartitionType selects descriptors containing a partition of type pt. +func WithPartitionType(pt PartType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.raw.isPartitionOfType(pt), nil + } +} + +// descriptorFromRaw populates a Descriptor from rd. +func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { + return Descriptor{ + raw: *rd, + r: f.rw, + relativeID: rd.ID - f.minIDs[rd.GroupID], + } +} + +// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true. +// If the image contains no data objects, an error wrapping ErrNoObjects is returned. +func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return nil, fmt.Errorf("%w", ErrNoObjects) + } + + var ds []Descriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error { + ds = append(ds, f.descriptorFromRaw(d)) + return nil + }) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return ds, nil +} + +// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is +// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns, +// ErrMultipleObjectsFound is returned. +func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) { + var d *rawDescriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error { + if d != nil { + return ErrMultipleObjectsFound + } + d = found + return nil + }) + + if err == nil && d == nil { + err = ErrObjectNotFound + } + + return d, err +} + +// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data +// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an +// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an +// error wrapping ErrMultipleObjectsFound is returned. +func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return Descriptor{}, fmt.Errorf("%w", ErrNoObjects) + } + + d, err := f.getDescriptor(fns...) + if err != nil { + return Descriptor{}, fmt.Errorf("%w", err) + } + + return f.descriptorFromRaw(d), nil +} + +// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns +// select the descriptor. +func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + for _, fn := range fns { + if ok, err := fn(d); !ok || err != nil { + return ok, err + } + } + return true, nil + } +} + +// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns +// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is +// returned to the caller. +func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + for i, d := range f.rds { + if !d.Used { + continue + } + + if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil { + return err + } else if !ok { + continue + } + + if err := onMatchFn(&f.rds[i]); err != nil { + return err + } + } + + return nil +} + +var errAbort = errors.New("abort") + +// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can +// be used as a no-op matchFn. +func abortOnMatch(*rawDescriptor) error { return errAbort } + +// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true. +func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) { + selectFn := func(d Descriptor) (bool, error) { + return fn(d), nil + } + _ = f.withDescriptors(selectFn, abortOnMatch) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go new file mode 100644 index 00000000000..704acee4a0e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -0,0 +1,364 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +// Package sif implements data structures and routines to create +// and access SIF files. +// +// Layout of a SIF file (example): +// +// .================================================. +// | GLOBAL HEADER: Sifheader | +// | - launch: "#!/usr/bin/env..." | +// | - magic: "SIF_MAGIC" | +// | - version: "1" | +// | - arch: "4" | +// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e | +// | - ctime: 1504657553 | +// | - mtime: 1504657653 | +// | - ndescr: 3 | +// | - descroff: 120 | --. +// | - descrlen: 432 | | +// | - dataoff: 4096 | | +// | - datalen: 619362 | | +// |------------------------------------------------| <-' +// | DESCR[0]: Sifdeffile | +// | - Sifcommon | +// | - datatype: DATA_DEFFILE | +// | - id: 1 | +// | - groupid: 1 | +// | - link: NONE | +// | - fileoff: 4096 | --. +// | - filelen: 222 | | +// |------------------------------------------------| <-----. +// | DESCR[1]: Sifpartition | | | +// | - Sifcommon | | | +// | - datatype: DATA_PARTITION | | | +// | - id: 2 | | | +// | - groupid: 1 | | | +// | - link: NONE | | | +// | - fileoff: 4318 | ----. | +// | - filelen: 618496 | | | | +// | - fstype: Squashfs | | | | +// | - parttype: System | | | | +// | - content: Linux | | | | +// |------------------------------------------------| | | | +// | DESCR[2]: Sifsignature | | | | +// | - Sifcommon | | | | +// | - datatype: DATA_SIGNATURE | | | | +// | - id: 3 | | | | +// | - groupid: NONE | | | | +// | - link: 2 | ------' +// | - fileoff: 622814 | ------. +// | - filelen: 644 | | | | +// | - hashtype: SHA384 | | | | +// | - entity: @ | | | | +// |------------------------------------------------| <-' | | +// | Definition file data | | | +// | . | | | +// | . | | | +// | . | | | +// |------------------------------------------------| <---' | +// | File system partition image | | +// | . | | +// | . | | +// | . | | +// |------------------------------------------------| <-----' +// | Signed verification data | +// | . | +// | . | +// | . | +// `================================================' +// +package sif + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/google/uuid" +) + +// SIF header constants and quantities. +const ( + hdrLaunchLen = 32 // len("#!/usr/bin/env... ") + hdrMagicLen = 10 // len("SIF_MAGIC") + hdrVersionLen = 3 // len("99") +) + +var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'} + +// SpecVersion specifies a SIF specification version. +type SpecVersion uint8 + +func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) } + +// bytes returns the value of b, formatted for direct inclusion in a SIF header. +func (v SpecVersion) bytes() [hdrVersionLen]byte { + var b [3]byte + copy(b[:], fmt.Sprintf("%02d", v)) + return b +} + +// SIF specification versions. +const ( + version01 SpecVersion = iota + 1 +) + +// CurrentVersion specifies the current SIF specification version. +const CurrentVersion = version01 + +const ( + descrGroupMask = 0xf0000000 // groups start at that offset + descrEntityLen = 256 // len("Joe Bloe ...") + descrNameLen = 128 // descriptor name (string identifier) + descrMaxPrivLen = 384 // size reserved for descriptor specific data +) + +// DataType represents the different SIF data object types stored in the image. +type DataType int32 + +// List of supported SIF data types. +const ( + DataDeffile DataType = iota + 0x4001 // definition file data object + DataEnvVar // environment variables data object + DataLabels // JSON labels data object + DataPartition // file system data object + DataSignature // signing/verification data object + DataGenericJSON // generic JSON meta-data + DataGeneric // generic / raw data + DataCryptoMessage // cryptographic message data object +) + +// String returns a human-readable representation of t. +func (t DataType) String() string { + switch t { + case DataDeffile: + return "Def.FILE" + case DataEnvVar: + return "Env.Vars" + case DataLabels: + return "JSON.Labels" + case DataPartition: + return "FS" + case DataSignature: + return "Signature" + case DataGenericJSON: + return "JSON.Generic" + case DataGeneric: + return "Generic/Raw" + case DataCryptoMessage: + return "Cryptographic Message" + } + return "Unknown" +} + +// FSType represents the different SIF file system types found in partition data objects. +type FSType int32 + +// List of supported file systems. +const ( + FsSquash FSType = iota + 1 // Squashfs file system, RDONLY + FsExt3 // EXT3 file system, RDWR (deprecated) + FsImmuObj // immutable data object archive + FsRaw // raw data + FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY +) + +// String returns a human-readable representation of t. +func (t FSType) String() string { + switch t { + case FsSquash: + return "Squashfs" + case FsExt3: + return "Ext3" + case FsImmuObj: + return "Archive" + case FsRaw: + return "Raw" + case FsEncryptedSquashfs: + return "Encrypted squashfs" + } + return "Unknown" +} + +// PartType represents the different SIF container partition types (system and data). +type PartType int32 + +// List of supported partition types. +const ( + PartSystem PartType = iota + 1 // partition hosts an operating system + PartPrimSys // partition hosts the primary operating system + PartData // partition hosts data only + PartOverlay // partition hosts an overlay +) + +// String returns a human-readable representation of t. +func (t PartType) String() string { + switch t { + case PartSystem: + return "System" + case PartPrimSys: + return "*System" + case PartData: + return "Data" + case PartOverlay: + return "Overlay" + } + return "Unknown" +} + +// hashType represents the different SIF hashing function types used to fingerprint data objects. +type hashType int32 + +// List of supported hash functions. +const ( + hashSHA256 hashType = iota + 1 + hashSHA384 + hashSHA512 + hashBLAKE2S + hashBLAKE2B +) + +// FormatType represents the different formats used to store cryptographic message objects. +type FormatType int32 + +// List of supported cryptographic message formats. +const ( + FormatOpenPGP FormatType = iota + 1 + FormatPEM +) + +// String returns a human-readable representation of t. +func (t FormatType) String() string { + switch t { + case FormatOpenPGP: + return "OpenPGP" + case FormatPEM: + return "PEM" + } + return "Unknown" +} + +// MessageType represents the different messages stored within cryptographic message objects. +type MessageType int32 + +// List of supported cryptographic message formats. +const ( + // openPGP formatted messages. + MessageClearSignature MessageType = 0x100 + + // PEM formatted messages. + MessageRSAOAEP MessageType = 0x200 +) + +// String returns a human-readable representation of t. +func (t MessageType) String() string { + switch t { + case MessageClearSignature: + return "Clear Signature" + case MessageRSAOAEP: + return "RSA-OAEP" + } + return "Unknown" +} + +// header describes a loaded SIF file. +type header struct { + LaunchScript [hdrLaunchLen]byte + + Magic [hdrMagicLen]byte + Version [hdrVersionLen]byte + Arch archType + ID uuid.UUID + + CreatedAt int64 + ModifiedAt int64 + + DescriptorsFree int64 + DescriptorsTotal int64 + DescriptorsOffset int64 + DescriptorsSize int64 + DataOffset int64 + DataSize int64 +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h. +func (h header) GetIntegrityReader() io.Reader { + return io.MultiReader( + bytes.NewReader(h.LaunchScript[:]), + bytes.NewReader(h.Magic[:]), + bytes.NewReader(h.Version[:]), + bytes.NewReader(h.ID[:]), + ) +} + +// ReadWriter describes the interface required to read and write SIF images. +type ReadWriter interface { + io.ReaderAt + io.WriteSeeker + Truncate(int64) error +} + +// FileImage describes the representation of a SIF file in memory. +type FileImage struct { + rw ReadWriter // Backing storage for image. + + h header // Raw global header from image. + rds []rawDescriptor // Raw descriptors from image. + + closeOnUnload bool // Close rw on Unload. + minIDs map[uint32]uint32 // Minimum object IDs for each group ID. +} + +// LaunchScript returns the image launch script. +func (f *FileImage) LaunchScript() string { + return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00")) +} + +// Version returns the SIF specification version of the image. +func (f *FileImage) Version() string { + return string(bytes.TrimRight(f.h.Version[:], "\x00")) +} + +// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU +// architecture cannot be determined. +func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() } + +// ID returns the ID of the image. +func (f *FileImage) ID() string { return f.h.ID.String() } + +// CreatedAt returns the creation time of the image. +func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) } + +// ModifiedAt returns the last modification time of the image. +func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) } + +// DescriptorsFree returns the number of free descriptors in the image. +func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree } + +// DescriptorsTotal returns the total number of descriptors in the image. +func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal } + +// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset } + +// DescriptorsSize returns the size (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize } + +// DataOffset returns the offset (in bytes) of the data section in the image. +func (f *FileImage) DataOffset() int64 { return f.h.DataOffset } + +// DataSize returns the size (in bytes) of the data section in the image. +func (f *FileImage) DataSize() int64 { return f.h.DataSize } + +// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the +// header of the image. +func (f *FileImage) GetHeaderIntegrityReader() io.Reader { + return f.h.GetIntegrityReader() +} diff --git a/vendor/github.com/vbauerster/mpb/v7/.travis.yml b/vendor/github.com/vbauerster/mpb/v7/.travis.yml deleted file mode 100644 index 9a203a67d2a..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le - -go: - - 1.14.x - -script: - - go test -race ./... - - for i in _examples/*/; do go build $i/*.go || exit 1; done diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md index 90d4fe639ca..413f9e1dbf0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v7/README.md @@ -1,8 +1,7 @@ # Multi Progress Bar [![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v7) -[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) -[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) +[![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) [![Donate with PayPal](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/vbauerster) **mpb** is a Go lib for rendering progress bars in terminal applications. @@ -37,10 +36,10 @@ func main() { total := 100 name := "Single Bar:" - // adding a single bar, which will inherit container's width - bar := p.Add(int64(total), - // progress bar filler with customized style - mpb.NewBarFiller(mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟")), + // create a single bar, which will inherit container's width + bar := p.New(int64(total), + // BarFillerBuilder with custom style + mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), @@ -66,7 +65,7 @@ func main() { ```go var wg sync.WaitGroup - // passed &wg will be accounted at p.Wait() call + // passed wg will be accounted at p.Wait() call p := mpb.New(mpb.WithWaitGroup(&wg)) total, numBars := 100, 3 wg.Add(numBars) @@ -104,7 +103,7 @@ func main() { } }() } - // Waiting for passed &wg and for all bars to complete and flush + // wait for passed wg and for all bars to complete and flush p.Wait() ``` diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go index dabe1a47546..4991f4f15b4 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar.go @@ -5,9 +5,9 @@ import ( "context" "fmt" "io" - "log" "runtime/debug" "strings" + "sync" "time" "github.com/acarl005/stripansi" @@ -17,32 +17,21 @@ import ( // Bar represents a progress bar. type Bar struct { - priority int // used by heap - index int // used by heap - - toShutdown bool - toDrop bool - noPop bool - hasEwmaDecorators bool - operateState chan func(*bState) - frameCh chan *frame - - // cancel is called either by user or on complete event - cancel func() - // done is closed after cacheState is assigned - done chan struct{} - // cacheState is populated, right after close(b.done) - cacheState *bState - + index int // used by heap + priority int // used by heap + hasEwma bool + frameCh chan *renderFrame + operateState chan func(*bState) + done chan struct{} container *Progress - dlogger *log.Logger + bs *bState + cancel func() recoveredPanic interface{} } type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) -// bState is actual bar state. It gets passed to *Bar.serve(...) monitor -// goroutine. +// bState is actual bar's state. type bState struct { id int priority int @@ -50,11 +39,10 @@ type bState struct { total int64 current int64 refill int64 - lastN int64 - iterated bool + lastIncrement int64 trimSpace bool completed bool - completeFlushed bool + aborted bool triggerComplete bool dropOnComplete bool noPop bool @@ -63,36 +51,33 @@ type bState struct { averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener - bufP, bufB, bufA *bytes.Buffer + buffers [3]*bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller extender extenderFunc + debugOut io.Writer - // runningBar is a key for *pState.parkedBars - runningBar *Bar - - debugOut io.Writer + afterBar *Bar // key for (*pState).queueBars + sync bool } -type frame struct { - reader io.Reader - lines int +type renderFrame struct { + reader io.Reader + lines int + shutdown bool } func newBar(container *Progress, bs *bState) *Bar { - logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) bar := &Bar{ - container: container, priority: bs.priority, - toDrop: bs.dropOnComplete, - noPop: bs.noPop, + hasEwma: len(bs.ewmaDecorators) != 0, + frameCh: make(chan *renderFrame, 1), operateState: make(chan func(*bState)), - frameCh: make(chan *frame, 1), done: make(chan struct{}), + container: container, cancel: cancel, - dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), } go bar.serve(ctx, bs) @@ -100,12 +85,20 @@ func newBar(container *Progress, bs *bState) *Bar { } // ProxyReader wraps r with metrics required for progress tracking. -// Panics if r is nil. +// If r is 'unknown total/size' reader it's mandatory to call +// (*Bar).SetTotal(-1, true) method after (Reader).Read returns io.EOF. +// Panics if r is nil. If bar is already completed or aborted, returns +// nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } - return newProxyReader(r, b) + select { + case <-b.done: + return nil + default: + return b.newProxyReader(r) + } } // ID returs id of the bar. @@ -115,18 +108,18 @@ func (b *Bar) ID() int { case b.operateState <- func(s *bState) { result <- s.id }: return <-result case <-b.done: - return b.cacheState.id + return b.bs.id } } -// Current returns bar's current number, in other words sum of all increments. +// Current returns bar's current value, in other words sum of all increments. func (b *Bar) Current() int64 { result := make(chan int64) select { case b.operateState <- func(s *bState) { result <- s.current }: return <-result case <-b.done: - return b.cacheState.current + return b.bs.current } } @@ -145,7 +138,7 @@ func (b *Bar) SetRefill(amount int64) { // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case b.operateState <- func(s *bState) { for _, decorators := range [...][]decor.Decorator{ @@ -156,28 +149,56 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { cb(extractBaseDecorator(d)) } } - close(done) + close(sync) + }: + <-sync + case <-b.done: + } +} + +// EnableTriggerComplete enables triggering complete event. It's +// effective only for bar which was constructed with `total <= 0` and +// after total has been set with (*Bar).SetTotal(int64, false). If bar +// has been incremented to the total, complete event is triggered right +// away. +func (b *Bar) EnableTriggerComplete() { + select { + case b.operateState <- func(s *bState) { + if s.triggerComplete || s.total <= 0 { + return + } + if s.current >= s.total { + s.current = s.total + s.completed = true + go b.forceRefresh() + } else { + s.triggerComplete = true + } }: - <-done case <-b.done: } } -// SetTotal sets total dynamically. -// If total is less than or equal to zero it takes progress' current value. -func (b *Bar) SetTotal(total int64, triggerComplete bool) { +// SetTotal sets total to an arbitrary value. It's effective only for +// bar which was constructed with `total <= 0`. Setting total to negative +// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool). +// If triggerCompleteNow is true, total value is set to current and +// complete event is triggered right away. +func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { select { case b.operateState <- func(s *bState) { - s.triggerComplete = triggerComplete - if total <= 0 { + if s.triggerComplete { + return + } + if total < 0 { s.total = s.current } else { s.total = total } - if s.triggerComplete && !s.completed { + if triggerCompleteNow { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -189,13 +210,12 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) { func (b *Bar) SetCurrent(current int64) { select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = current - s.current + s.lastIncrement = current - s.current s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -214,15 +234,17 @@ func (b *Bar) IncrBy(n int) { // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { + if n <= 0 { + return + } select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = n + s.lastIncrement = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -236,10 +258,18 @@ func (b *Bar) IncrInt64(n int64) { func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { select { case b.operateState <- func(s *bState) { - ewmaIterationUpdate(false, s, dur) + if s.lastIncrement > 0 { + s.decoratorEwmaUpdate(dur) + s.lastIncrement = 0 + } else { + panic("increment required before ewma iteration update") + } }: case <-b.done: - ewmaIterationUpdate(true, b.cacheState, dur) + if b.bs.lastIncrement > 0 { + b.bs.decoratorEwmaUpdate(dur) + b.bs.lastIncrement = 0 + } } } @@ -249,9 +279,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { case b.operateState <- func(s *bState) { - for _, d := range s.averageDecorators { - d.AverageAdjust(start) - } + s.decoratorAverageAdjust(start) }: case <-b.done: } @@ -266,43 +294,33 @@ func (b *Bar) SetPriority(priority int) { // Abort interrupts bar's running goroutine. Abort won't be engaged // if bar is already in complete state. If drop is true bar will be -// removed as well. +// removed as well. To make sure that bar has been removed call +// (*Bar).Wait method. func (b *Bar) Abort(drop bool) { - done := make(chan struct{}) select { case b.operateState <- func(s *bState) { - if s.completed == true { - close(done) + if s.completed || s.aborted { return } - // container must be run during lifetime of this inner goroutine - // we control this by done channel declared above - go func() { - if drop { - b.container.dropBar(b) - } else { - var uncompleted int - b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true - }) - if uncompleted == 0 { - b.container.refreshCh <- time.Now() - } - } - close(done) // release hold of Abort - }() - b.cancel() + s.aborted = true + s.dropOnComplete = drop + go b.forceRefresh() }: - // guarantee: container is alive during lifetime of this hold - <-done case <-b.done: } } +// Aborted reports whether the bar is in aborted state. +func (b *Bar) Aborted() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- s.aborted }: + return <-result + case <-b.done: + return b.bs.aborted + } +} + // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { result := make(chan bool) @@ -310,22 +328,28 @@ func (b *Bar) Completed() bool { case b.operateState <- func(s *bState) { result <- s.completed }: return <-result case <-b.done: - return true + return b.bs.completed } } -func (b *Bar) serve(ctx context.Context, s *bState) { +// Wait blocks until bar is completed or aborted. +func (b *Bar) Wait() { + <-b.done +} + +func (b *Bar) serve(ctx context.Context, bs *bState) { defer b.container.bwg.Done() + if bs.afterBar != nil && bs.sync { + bs.afterBar.Wait() + } for { select { case op := <-b.operateState: - op(s) + op(bs) case <-ctx.Done(): - // Notifying decorators about shutdown event - for _, sl := range s.shutdownListeners { - sl.Shutdown() - } - b.cacheState = s + bs.aborted = !bs.completed + bs.decoratorShutdownNotify() + b.bs = bs close(b.done) return } @@ -335,76 +359,62 @@ func (b *Bar) serve(ctx context.Context, s *bState) { func (b *Bar) render(tw int) { select { case b.operateState <- func(s *bState) { + var reader io.Reader + var lines int stat := newStatistics(tw, s) defer func() { // recovering if user defined decorator panics for example if p := recover(); p != nil { - if b.recoveredPanic == nil { - s.extender = makePanicExtender(p) - b.toShutdown = !b.toShutdown - b.recoveredPanic = p + if s.debugOut != nil { + fmt.Fprintln(s.debugOut, p) + _, _ = s.debugOut.Write(debug.Stack()) } - reader, lines := s.extender(nil, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - b.dlogger.Println(p) + s.aborted = !s.completed + s.extender = makePanicExtender(p) + reader, lines = s.extender(nil, s.reqWidth, stat) + b.recoveredPanic = p } - s.completeFlushed = s.completed + frame := renderFrame{ + reader: reader, + lines: lines + 1, + shutdown: s.completed || s.aborted, + } + if frame.shutdown { + b.cancel() + } + b.frameCh <- &frame }() - reader, lines := s.extender(s.draw(stat), s.reqWidth, stat) - b.toShutdown = s.completed && !s.completeFlushed - b.frameCh <- &frame{reader, lines + 1} + if b.recoveredPanic == nil { + reader = s.draw(stat) + } + reader, lines = s.extender(reader, s.reqWidth, stat) }: case <-b.done: - s := b.cacheState - stat := newStatistics(tw, s) - var r io.Reader + var reader io.Reader + var lines int + stat, s := newStatistics(tw, b.bs), b.bs if b.recoveredPanic == nil { - r = s.draw(stat) - } - reader, lines := s.extender(r, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - } -} - -func (b *Bar) subscribeDecorators() { - var averageDecorators []decor.AverageDecorator - var ewmaDecorators []decor.EwmaDecorator - var shutdownListeners []decor.ShutdownListener - b.TraverseDecorators(func(d decor.Decorator) { - if d, ok := d.(decor.AverageDecorator); ok { - averageDecorators = append(averageDecorators, d) + reader = s.draw(stat) } - if d, ok := d.(decor.EwmaDecorator); ok { - ewmaDecorators = append(ewmaDecorators, d) + reader, lines = s.extender(reader, s.reqWidth, stat) + b.frameCh <- &renderFrame{ + reader: reader, + lines: lines + 1, } - if d, ok := d.(decor.ShutdownListener); ok { - shutdownListeners = append(shutdownListeners, d) - } - }) - b.hasEwmaDecorators = len(ewmaDecorators) != 0 - select { - case b.operateState <- func(s *bState) { - s.averageDecorators = averageDecorators - s.ewmaDecorators = ewmaDecorators - s.shutdownListeners = shutdownListeners - }: - case <-b.done: } } -func (b *Bar) forceRefreshIfLastUncompleted() { - var uncompleted int +func (b *Bar) forceRefresh() { + var anyOtherRunning bool b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true + anyOtherRunning = b != bar && bar.isRunning() + return !anyOtherRunning }) - if uncompleted == 0 { + if !anyOtherRunning { for { select { case b.container.refreshCh <- time.Now(): + time.Sleep(prr) case <-b.done: return } @@ -412,51 +422,64 @@ func (b *Bar) forceRefreshIfLastUncompleted() { } } +func (b *Bar) isRunning() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { + result <- !s.completed && !s.aborted + }: + return <-result + case <-b.done: + return false + } +} + func (b *Bar) wSyncTable() [][]chan int { result := make(chan [][]chan int) select { case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: return <-result case <-b.done: - return b.cacheState.wSyncTable() + return b.bs.wSyncTable() } } func (s *bState) draw(stat decor.Statistics) io.Reader { + bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] nlr := strings.NewReader("\n") tw := stat.AvailableWidth for _, d := range s.pDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufP.WriteString(str) + bufP.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…")) - s.bufP.Reset() + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…")) + bufP.Reset() return io.MultiReader(trunc, nlr) } if !s.trimSpace && stat.AvailableWidth > 1 { stat.AvailableWidth -= 2 - s.bufB.WriteByte(' ') - defer s.bufB.WriteByte(' ') + bufB.WriteByte(' ') + defer bufB.WriteByte(' ') } tw = stat.AvailableWidth for _, d := range s.aDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufA.WriteString(str) + bufA.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…")) - s.bufA.Reset() - return io.MultiReader(s.bufP, s.bufB, trunc, nlr) + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…")) + bufA.Reset() + return io.MultiReader(bufP, bufB, trunc, nlr) } - s.filler.Fill(s.bufB, s.reqWidth, stat) + s.filler.Fill(bufB, s.reqWidth, stat) - return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr) + return io.MultiReader(bufP, bufB, bufA, nlr) } func (s *bState) wSyncTable() [][]chan int { @@ -481,14 +504,86 @@ func (s *bState) wSyncTable() [][]chan int { return table } +func (s *bState) subscribeDecorators() { + for _, decorators := range [...][]decor.Decorator{ + s.pDecorators, + s.aDecorators, + } { + for _, d := range decorators { + d = extractBaseDecorator(d) + if d, ok := d.(decor.AverageDecorator); ok { + s.averageDecorators = append(s.averageDecorators, d) + } + if d, ok := d.(decor.EwmaDecorator); ok { + s.ewmaDecorators = append(s.ewmaDecorators, d) + } + if d, ok := d.(decor.ShutdownListener); ok { + s.shutdownListeners = append(s.shutdownListeners, d) + } + } + } +} + +func (s bState) decoratorEwmaUpdate(dur time.Duration) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.ewmaDecorators); i++ { + switch d := s.ewmaDecorators[i]; i { + case len(s.ewmaDecorators) - 1: + d.EwmaUpdate(s.lastIncrement, dur) + default: + wg.Add(1) + go func() { + d.EwmaUpdate(s.lastIncrement, dur) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorAverageAdjust(start time.Time) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.averageDecorators); i++ { + switch d := s.averageDecorators[i]; i { + case len(s.averageDecorators) - 1: + d.AverageAdjust(start) + default: + wg.Add(1) + go func() { + d.AverageAdjust(start) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorShutdownNotify() { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.shutdownListeners); i++ { + switch d := s.shutdownListeners[i]; i { + case len(s.shutdownListeners) - 1: + d.Shutdown() + default: + wg.Add(1) + go func() { + d.Shutdown() + wg.Done() + }() + } + } + wg.Wait() +} + func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ - ID: s.id, AvailableWidth: tw, + ID: s.id, Total: s.total, Current: s.current, Refill: s.refill, - Completed: s.completeFlushed, + Completed: s.completed, + Aborted: s.aborted, } } @@ -499,27 +594,13 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator { return d } -func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { - if !done && !s.iterated { - panic("increment required before ewma iteration update") - } else { - s.iterated = false - } - for _, d := range s.ewmaDecorators { - d.EwmaUpdate(s.lastN, dur) - } -} - func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) - stack := debug.Stack() - stackLines := bytes.Count(stack, []byte("\n")) return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) { mr := io.MultiReader( strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")), - strings.NewReader(fmt.Sprintf("\n%#v\n", st)), - bytes.NewReader(stack), + strings.NewReader("\n"), ) - return mr, stackLines + 1 + return mr, 0 } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go index a69087c474e..81177fc2eea 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go @@ -9,31 +9,42 @@ import ( // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // -// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. +// reqWidth is requested width set by `func WithWidth(int) ContainerOption`. // If not set, it defaults to terminal width. // -// Default implementations can be obtained via: -// -// func NewBarFiller(BarStyle()) BarFiller -// func NewBarFiller(SpinnerStyle()) BarFiller -// type BarFiller interface { Fill(w io.Writer, reqWidth int, stat decor.Statistics) } // BarFillerBuilder interface. +// Default implementations are: +// +// BarStyle() +// SpinnerStyle() +// NopStyle() +// type BarFillerBuilder interface { Build() BarFiller } -// BarFillerFunc is function type adapter to convert function into BarFiller. +// BarFillerFunc is function type adapter to convert compatible function +// into BarFiller interface. type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { f(w, reqWidth, stat) } +// BarFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type BarFillerBuilderFunc func() BarFiller + +func (f BarFillerBuilderFunc) Build() BarFiller { + return f() +} + // NewBarFiller constructs a BarFiller from provided BarFillerBuilder. +// Deprecated. Prefer using `*Progress.New(...)` directly. func NewBarFiller(b BarFillerBuilder) BarFiller { return b.Build() } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go index 80b2104555d..d8bf90a4a94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go @@ -32,13 +32,13 @@ type BarStyleComposer interface { } type bFiller struct { + rev bool components [components]*component tip struct { count uint onComplete *component frames []*component } - flush func(dst io.Writer, filling, padding [][]byte) } type component struct { @@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer { } func (s *barStyle) Build() BarFiller { - bf := new(bFiller) - if s.rev { - bf.flush = func(dst io.Writer, filling, padding [][]byte) { - flush(dst, padding, filling) - } - } else { - bf.flush = flush - } + bf := &bFiller{rev: s.rev} bf.components[iLbound] = &component{ width: runewidth.StringWidth(stripansi.Strip(s.lbound)), bytes: []byte(s.lbound), @@ -164,8 +157,8 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } - w.Write(s.components[iLbound].bytes) - defer w.Write(s.components[iRbound].bytes) + mustWrite(w, s.components[iLbound].bytes) + defer mustWrite(w, s.components[iRbound].bytes) if width == 0 { return @@ -236,14 +229,25 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { } } - s.flush(w, filling, padding) + if s.rev { + flush(w, padding, filling) + } else { + flush(w, filling, padding) + } } -func flush(dst io.Writer, filling, padding [][]byte) { +func flush(w io.Writer, filling, padding [][]byte) { for i := len(filling) - 1; i >= 0; i-- { - dst.Write(filling[i]) + mustWrite(w, filling[i]) } for i := 0; i < len(padding); i++ { - dst.Write(padding[i]) + mustWrite(w, padding[i]) + } +} + +func mustWrite(w io.Writer, p []byte) { + _, err := w.Write(p) + if err != nil { + panic(err) } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go new file mode 100644 index 00000000000..1a7086fecb6 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go @@ -0,0 +1,14 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v7/decor" +) + +// NopStyle provides BarFillerBuilder which builds NOP BarFiller. +func NopStyle() BarFillerBuilder { + return BarFillerBuilderFunc(func() BarFiller { + return BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + }) +} diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go index 58ae1c53283..d38525efc94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go @@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } + var err error rest := width - frameWidth switch s.position { case positionLeft: - io.WriteString(w, frame+strings.Repeat(" ", rest)) + _, err = io.WriteString(w, frame+strings.Repeat(" ", rest)) case positionRight: - io.WriteString(w, strings.Repeat(" ", rest)+frame) + _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame) default: str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) - io.WriteString(w, str) + _, err = io.WriteString(w, str) + } + if err != nil { + panic(err) } s.count++ } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go index 46b7de0bf56..8599f0a57b9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go @@ -5,12 +5,20 @@ import ( "io" "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) +func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) { + for _, d := range decorators { + if d != nil { + filtered = append(filtered, d) + } + } + return +} + func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { type mergeWrapper interface { MergeUnwrap() []decor.Decorator @@ -26,14 +34,14 @@ func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Deco // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.aDecorators, decorators...) + s.addDecorators(&s.aDecorators, skipNil(decorators)...) } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.pDecorators, decorators...) + s.addDecorators(&s.pDecorators, skipNil(decorators)...) } } @@ -51,14 +59,17 @@ func BarWidth(width int) BarOption { } } -// BarQueueAfter queues this (being constructed) bar to relplace -// runningBar after it has been completed. -func BarQueueAfter(runningBar *Bar) BarOption { - if runningBar == nil { +// BarQueueAfter puts this (being constructed) bar into the queue. +// When argument bar completes or aborts queued bar replaces its place. +// If sync is true queued bar is suspended until argument bar completes +// or aborts. +func BarQueueAfter(bar *Bar, sync bool) BarOption { + if bar == nil { return nil } return func(s *bState) { - s.runningBar = runningBar + s.afterBar = bar + s.sync = sync } } @@ -81,7 +92,10 @@ func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { if st.Completed { - io.WriteString(w, message) + _, err := io.WriteString(w, message) + if err != nil { + panic(err) + } } else { base.Fill(w, reqWidth, st) } @@ -138,9 +152,12 @@ func BarNoPop() BarOption { } } -// BarOptional will invoke provided option only when pick is true. -func BarOptional(option BarOption, pick bool) BarOption { - return BarOptOn(option, internal.Predicate(pick)) +// BarOptional will invoke provided option only when cond is true. +func BarOptional(option BarOption, cond bool) BarOption { + if cond { + return option + } + return nil } // BarOptOn will invoke provided option only when higher order predicate diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go index a858c3c51dd..e523a175938 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go @@ -5,8 +5,6 @@ import ( "io/ioutil" "sync" "time" - - "github.com/vbauerster/mpb/v7/internal" ) // ContainerOption is a func option to alter default behavior of a bar @@ -101,9 +99,12 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptional will invoke provided option only when pick is true. -func ContainerOptional(option ContainerOption, pick bool) ContainerOption { - return ContainerOptOn(option, internal.Predicate(pick)) +// ContainerOptional will invoke provided option only when cond is true. +func ContainerOptional(option ContainerOption, cond bool) ContainerOption { + if cond { + return option + } + return nil } // ContainerOptOn will invoke provided option only when higher order diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go index 925c8b1dcf1..fac15b3bce9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go @@ -11,7 +11,7 @@ import ( // ErrNotTTY not a TeleTYpewriter error. var ErrNotTTY = errors.New("not a terminal") -// http://ascii-table.com/ansi-escape-sequences.php +// https://github.com/dylanaraps/pure-sh-bible#cursor-movement const ( escOpen = "\x1b[" cuuAndEd = "A\x1b[J" @@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) { return tw, err } -func (w *Writer) ansiCuuAndEd() (err error) { +func (w *Writer) ansiCuuAndEd() error { buf := make([]byte, 8) buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) - _, err = w.out.Write(append(buf, cuuAndEd...)) - return + _, err := w.out.Write(append(buf, cuuAndEd...)) + return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go index e81fae367ee..aad7709c062 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go @@ -47,12 +47,13 @@ const ( // Statistics consists of progress related statistics, that Decorator // may need. type Statistics struct { - ID int AvailableWidth int + ID int Total int64 Current int64 Refill int64 Completed bool + Aborted bool } // Decorator interface. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go index e41406a64da..84767115515 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go @@ -17,6 +17,9 @@ import ( // +----+--------+---------+--------+ // func Merge(decorator Decorator, placeholders ...WC) Decorator { + if decorator == nil { + return nil + } if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { return decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go new file mode 100644 index 00000000000..10ff67009cc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go @@ -0,0 +1,41 @@ +package decor + +// OnAbort returns decorator, which wraps provided decorator with sole +// purpose to display provided message on abort event. It has no effect +// if bar.Abort(drop bool) is called with true argument. +// +// `decorator` Decorator to wrap +// +// `message` message to display on abort event +// +func OnAbort(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + d := &onAbortWrapper{ + Decorator: decorator, + msg: message, + } + if md, ok := decorator.(*mergeDecorator); ok { + d.Decorator, md.Decorator = md.Decorator, d + return md + } + return d +} + +type onAbortWrapper struct { + Decorator + msg string +} + +func (d *onAbortWrapper) Decor(s Statistics) string { + if s.Aborted { + wc := d.GetConf() + return wc.FormatMsg(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d *onAbortWrapper) Base() Decorator { + return d.Decorator +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go index f46b19abaa9..2ada2b31b16 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go @@ -1,6 +1,6 @@ package decor -// OnComplete returns decorator, which wraps provided decorator, with +// OnComplete returns decorator, which wraps provided decorator with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap @@ -8,6 +8,9 @@ package decor // `message` message to display on complete event // func OnComplete(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } d := &onCompleteWrapper{ Decorator: decorator, msg: message, diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go new file mode 100644 index 00000000000..a9db0653aee --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go @@ -0,0 +1,27 @@ +package decor + +// OnPredicate returns decorator if predicate evaluates to true. +// +// `decorator` Decorator +// +// `predicate` func() bool +// +func OnPredicate(decorator Decorator, predicate func() bool) Decorator { + if predicate() { + return decorator + } + return nil +} + +// OnCondition returns decorator if condition is true. +// +// `decorator` Decorator +// +// `cond` bool +// +func OnCondition(decorator Decorator, cond bool) Decorator { + if cond { + return decorator + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go new file mode 100644 index 00000000000..c6a34384e63 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go @@ -0,0 +1,10 @@ +package decor + +import "io" + +func mustWriteString(w io.Writer, s string) { + _, err := io.WriteString(w, s) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go index 2b0a7a9562c..e72668993ee 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "strconv" "github.com/vbauerster/mpb/v7/internal" @@ -24,12 +23,11 @@ func (s percentageType) Format(st fmt.State, verb rune) { } } - io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, "%") + mustWriteString(st, "%") } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go index e4b9740584b..09ecc23f807 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go @@ -2,8 +2,6 @@ package decor import ( "fmt" - "io" - "math" "strconv" ) @@ -47,16 +45,15 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iMiB case self < _iTiB: unit = _iGiB - case self <= math.MaxInt64: + default: unit = _iTiB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } const ( @@ -96,14 +93,13 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _MB case self < _TB: unit = _GB - case self <= math.MaxInt64: + default: unit = _TB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go index 634edabfdc0..f052352fc40 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "math" "time" @@ -24,7 +23,7 @@ type speedFormatter struct { func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) - io.WriteString(st, "/s") + mustWriteString(st, "/s") } // EwmaSpeed exponential-weighted-moving-average based speed decorator. diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go deleted file mode 100644 index 1e4dd24d9da..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go +++ /dev/null @@ -1,6 +0,0 @@ -package internal - -// Predicate helper for internal use. -func Predicate(pick bool) func() bool { - return func() bool { return pick } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go index c60c6569406..1d9a53e5c9d 100644 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ b/vendor/github.com/vbauerster/mpb/v7/progress.go @@ -6,8 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" - "log" "math" "os" "sync" @@ -18,12 +16,10 @@ import ( ) const ( - // default RefreshRate - prr = 150 * time.Millisecond + prr = 150 * time.Millisecond // default RefreshRate ) -// Progress represents a container that renders one or more progress -// bars. +// Progress represents a container that renders one or more progress bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup @@ -33,17 +29,14 @@ type Progress struct { done chan struct{} refreshCh chan time.Time once sync.Once - dlogger *log.Logger } -// pState holds bars in its priorityQueue. It gets passed to -// *Progress.serve(...) monitor goroutine. +// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { - bHeap priorityQueue - heapUpdated bool - pMatrix map[int][]chan int - aMatrix map[int][]chan int - barShutdownQueue []*Bar + bHeap priorityQueue + heapUpdated bool + pMatrix map[int][]chan int + aMatrix map[int][]chan int // following are provided/overrided by user idCount int @@ -55,27 +48,26 @@ type pState struct { externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} - parkedBars map[*Bar]*Bar + queueBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to -// reuse instance after *Progress.Wait() method has been called. +// reuse instance after (*Progress).Wait method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided -// context. It's not possible to reuse instance after *Progress.Wait() +// context. It's not possible to reuse instance after (*Progress).Wait // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { s := &pState{ - bHeap: priorityQueue{}, - rr: prr, - parkedBars: make(map[*Bar]*Bar), - output: os.Stdout, - debugOut: ioutil.Discard, + bHeap: priorityQueue{}, + rr: prr, + queueBars: make(map[*Bar]*Bar), + output: os.Stdout, } for _, opt := range options { @@ -91,7 +83,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), done: make(chan struct{}), - dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile), } p.cwg.Add(1) @@ -99,25 +90,27 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a bar with default bar filler. Different filler can -// be chosen and applied via `*Progress.Add(...) *Bar` method. +// AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(BarStyle()), options...) + return p.New(total, BarStyle(), options...) } -// AddSpinner creates a bar with default spinner filler. Different -// filler can be chosen and applied via `*Progress.Add(...) *Bar` -// method. +// AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(SpinnerStyle()), options...) + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar with provided BarFillerBuilder. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + return p.Add(total, builder.Build(), options...) } // Add creates a bar which renders itself by provided filler. -// If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool). -// Panics if *Progress instance is done, i.e. called after *Progress.Wait(). +// If `total <= 0` triggering complete event by increment methods is disabled. +// Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { - filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) @@ -125,9 +118,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) - if bs.runningBar != nil { - bs.runningBar.noPop = true - ps.parkedBars[bs.runningBar] = bar + if bs.afterBar != nil { + ps.queueBars[bs.afterBar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true @@ -136,7 +128,6 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar result <- bar }: bar := <-result - bar.subscribeDecorators() return bar case <-p.done: p.bwg.Done() @@ -144,21 +135,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar } } -func (p *Progress) dropBar(b *Bar) { - select { - case p.operateState <- func(s *pState) { - if b.index < 0 { - return - } - heap.Remove(&s.bHeap, b.index) - s.heapUpdated = true - }: - case <-p.done: - } -} - func (p *Progress) traverseBars(cb func(b *Bar) bool) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case p.operateState <- func(s *pState) { for i := 0; i < s.bHeap.Len(); i++ { @@ -167,9 +145,9 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { break } } - close(done) + close(sync) }: - <-done + <-sync case <-p.done: } } @@ -203,8 +181,8 @@ func (p *Progress) BarCount() int { // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { + // wait for user wg, if any if p.uwg != nil { - // wait for user wg p.uwg.Wait() } @@ -232,12 +210,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { op(s) case <-p.refreshCh: if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } case <-s.shutdownNotifier: for s.heapUpdated { if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } } return @@ -245,6 +237,64 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } +func (s *pState) render(cw *cwriter.Writer) error { + if s.heapUpdated { + s.updateSyncMatrix() + s.heapUpdated = false + } + syncWidth(s.pMatrix) + syncWidth(s.aMatrix) + + tw, err := cw.GetWidth() + if err != nil { + tw = s.reqWidth + } + for i := 0; i < s.bHeap.Len(); i++ { + bar := s.bHeap[i] + go bar.render(tw) + } + + return s.flush(cw) +} + +func (s *pState) flush(cw *cwriter.Writer) error { + var lines int + pool := make([]*Bar, 0, s.bHeap.Len()) + for s.bHeap.Len() > 0 { + b := heap.Pop(&s.bHeap).(*Bar) + frame := <-b.frameCh + lines += frame.lines + _, err := cw.ReadFrom(frame.reader) + if err != nil { + return err + } + if frame.shutdown { + b.Wait() // waiting for b.done, so it's safe to read b.bs + var toDrop bool + if qb, ok := s.queueBars[b]; ok { + delete(s.queueBars, b) + qb.priority = b.priority + pool = append(pool, qb) + toDrop = true + } else if s.popCompleted && !b.bs.noPop { + lines -= frame.lines + toDrop = true + } + if toDrop || b.bs.dropOnComplete { + s.heapUpdated = true + continue + } + } + pool = append(pool, b) + } + + for _, b := range pool { + heap.Push(&s.bHeap, b) + } + + return cw.Flush(lines) +} + func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { @@ -283,75 +333,6 @@ func (s *pState) newTicker(done <-chan struct{}) chan time.Time { return ch } -func (s *pState) render(cw *cwriter.Writer) error { - if s.heapUpdated { - s.updateSyncMatrix() - s.heapUpdated = false - } - syncWidth(s.pMatrix) - syncWidth(s.aMatrix) - - tw, err := cw.GetWidth() - if err != nil { - tw = s.reqWidth - } - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - go bar.render(tw) - } - - return s.flush(cw) -} - -func (s *pState) flush(cw *cwriter.Writer) error { - var totalLines int - bm := make(map[*Bar]int, s.bHeap.Len()) - for s.bHeap.Len() > 0 { - b := heap.Pop(&s.bHeap).(*Bar) - frame := <-b.frameCh - cw.ReadFrom(frame.reader) - if b.toShutdown { - if b.recoveredPanic != nil { - s.barShutdownQueue = append(s.barShutdownQueue, b) - b.toShutdown = false - } else { - // shutdown at next flush - // this ensures no bar ends up with less than 100% rendered - defer func() { - s.barShutdownQueue = append(s.barShutdownQueue, b) - }() - } - } - bm[b] = frame.lines - totalLines += frame.lines - } - - for _, b := range s.barShutdownQueue { - if parkedBar := s.parkedBars[b]; parkedBar != nil { - parkedBar.priority = b.priority - heap.Push(&s.bHeap, parkedBar) - delete(s.parkedBars, b) - b.toDrop = true - } - if s.popCompleted && !b.noPop { - totalLines -= bm[b] - b.toDrop = true - } - if b.toDrop { - delete(bm, b) - s.heapUpdated = true - } - b.cancel() - } - s.barShutdownQueue = s.barShutdownQueue[0:0] - - for b := range bm { - heap.Push(&s.bHeap, b) - } - - return cw.Flush(totalLines) -} - func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) @@ -400,9 +381,11 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio bs.priority = -(math.MaxInt32 - s.idCount) } - bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) - bs.bufB = bytes.NewBuffer(make([]byte, 0, 256)) - bs.bufA = bytes.NewBuffer(make([]byte, 0, 128)) + for i := 0; i < len(bs.buffers); i++ { + bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) + } + + bs.subscribeDecorators() return bs } @@ -413,7 +396,7 @@ func syncWidth(matrix map[int][]chan int) { } } -var maxWidthDistributor = func(column []chan int) { +func maxWidthDistributor(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go index 316f438d76b..b0dd89d458c 100644 --- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go +++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go @@ -11,73 +11,63 @@ type proxyReader struct { bar *Bar } -func (x *proxyReader) Read(p []byte) (int, error) { +func (x proxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) x.bar.IncrBy(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type proxyWriterTo struct { - io.ReadCloser // *proxyReader - wt io.WriterTo - bar *Bar + proxyReader + wt io.WriterTo } -func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) x.bar.IncrInt64(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type ewmaProxyReader struct { - io.ReadCloser // *proxyReader - bar *Bar - iT time.Time + proxyReader } -func (x *ewmaProxyReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.proxyReader.Read(p) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } type ewmaProxyWriterTo struct { - io.ReadCloser // *ewmaProxyReader - wt io.WriterTo // *proxyWriterTo - bar *Bar - iT time.Time + ewmaProxyReader + wt proxyWriterTo } -func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() n, err := x.wt.WriteTo(w) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } -func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser { - rc := toReadCloser(r) - rc = &proxyReader{rc, bar} - - if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators { - now := time.Now() - rc = &ewmaProxyReader{rc, bar, now} - if isWriterTo { - rc = &ewmaProxyWriterTo{rc, wt, bar, now} +func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) { + pr := proxyReader{toReadCloser(r), b} + if wt, ok := r.(io.WriterTo); ok { + pw := proxyWriterTo{pr, wt} + if b.hasEwma { + rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw} + } else { + rc = pw } - } else if isWriterTo { - rc = &proxyWriterTo{rc, wt, bar} + } else if b.hasEwma { + rc = ewmaProxyReader{pr} + } else { + rc = pr } return rc } diff --git a/vendor/modules.txt b/vendor/modules.txt index cb19532f807..e131ca63553 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,7 +52,7 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion -# github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 +# github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -178,8 +178,8 @@ github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.5.8 -## explicit; go 1.16 +# github.com/containerd/containerd v1.6.4 +## explicit; go 1.17 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types/task @@ -187,6 +187,8 @@ github.com/containerd/containerd/cio github.com/containerd/containerd/defaults github.com/containerd/containerd/errdefs github.com/containerd/containerd/events +github.com/containerd/containerd/events/exchange +github.com/containerd/containerd/filters github.com/containerd/containerd/identifiers github.com/containerd/containerd/log github.com/containerd/containerd/namespaces @@ -195,9 +197,11 @@ github.com/containerd/containerd/pkg/cri/io github.com/containerd/containerd/pkg/cri/util github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/ioutil +github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/ttrpcutil github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +github.com/containerd/containerd/plugin github.com/containerd/containerd/reference/docker github.com/containerd/containerd/runtime/v2/shim github.com/containerd/containerd/runtime/v2/task @@ -213,7 +217,7 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter/estargz v0.10.1 +# github.com/containerd/stargz-snapshotter/estargz v0.11.4 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -238,8 +242,8 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.1.1 ## explicit; go 1.17 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.23.1 -## explicit; go 1.13 +# github.com/containers/buildah v1.26.1 +## explicit; go 1.16 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -247,6 +251,9 @@ github.com/containers/buildah/copier github.com/containers/buildah/define github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah +github.com/containers/buildah/internal +github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/util github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/overlay @@ -254,18 +261,26 @@ github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/util -# github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f -## explicit; go 1.15 +# github.com/containers/common v0.48.0 +## explicit; go 1.16 github.com/containers/common/libimage github.com/containers/common/libimage/manifests +github.com/containers/common/libnetwork/cni +github.com/containers/common/libnetwork/etchosts +github.com/containers/common/libnetwork/internal/util +github.com/containers/common/libnetwork/netavark +github.com/containers/common/libnetwork/network +github.com/containers/common/libnetwork/types +github.com/containers/common/libnetwork/util github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/apparmor/internal/supported github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown github.com/containers/common/pkg/config -github.com/containers/common/pkg/defaultnet +github.com/containers/common/pkg/download github.com/containers/common/pkg/filters +github.com/containers/common/pkg/machine github.com/containers/common/pkg/manifests github.com/containers/common/pkg/parse github.com/containers/common/pkg/retry @@ -280,6 +295,7 @@ github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask +github.com/containers/common/pkg/util github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit @@ -288,8 +304,8 @@ github.com/containers/conmon/runner/config ## explicit; go 1.18 github.com/containers/conmon-rs/internal/proto github.com/containers/conmon-rs/pkg/client -# github.com/containers/image/v5 v5.17.0 -## explicit; go 1.13 +# github.com/containers/image/v5 v5.21.1 +## explicit; go 1.16 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -302,14 +318,15 @@ github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image github.com/containers/image/v5/internal/blobinfocache +github.com/containers/image/v5/internal/imagedestination +github.com/containers/image/v5/internal/imagesource github.com/containers/image/v5/internal/iolimits -github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform +github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/streamdigest github.com/containers/image/v5/internal/tmpdir -github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive @@ -317,6 +334,7 @@ github.com/containers/image/v5/oci/internal github.com/containers/image/v5/oci/layout github.com/containers/image/v5/openshift github.com/containers/image/v5/ostree +github.com/containers/image/v5/pkg/blobcache github.com/containers/image/v5/pkg/blobinfocache github.com/containers/image/v5/pkg/blobinfocache/boltdb github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize @@ -330,6 +348,7 @@ github.com/containers/image/v5/pkg/shortnames github.com/containers/image/v5/pkg/strslice github.com/containers/image/v5/pkg/sysregistriesv2 github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/sif github.com/containers/image/v5/signature github.com/containers/image/v5/storage github.com/containers/image/v5/tarball @@ -337,7 +356,7 @@ github.com/containers/image/v5/transports github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/types github.com/containers/image/v5/version -# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a ## explicit github.com/containers/libtrust # github.com/containers/ocicrypt v1.1.5 @@ -421,8 +440,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.37.0 -## explicit; go 1.14 +# github.com/containers/storage v1.40.2 +## explicit; go 1.16 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -512,7 +531,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v20.10.12+incompatible +# github.com/docker/docker v20.10.14+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -548,11 +567,14 @@ github.com/docker/docker/pkg/system ## explicit; go 1.13 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.0 -## explicit +# github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 +## explicit; go 1.13 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig +# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c +## explicit +github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics @@ -586,8 +608,8 @@ github.com/fatih/color # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.7.4 -## explicit; go 1.16 +# github.com/fsouza/go-dockerclient v1.7.11 +## explicit; go 1.17 github.com/fsouza/go-dockerclient # github.com/ghodss/yaml v1.0.0 ## explicit @@ -807,8 +829,8 @@ github.com/ishidawataru/sctp # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io -# github.com/jinzhu/copier v0.3.2 -## explicit; go 1.15 +# github.com/jinzhu/copier v0.3.5 +## explicit; go 1.13 github.com/jinzhu/copier # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 @@ -822,12 +844,13 @@ github.com/json-iterator/go # github.com/kevinburke/ssh_config v1.1.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.14.2 -## explicit; go 1.15 +# github.com/klauspost/compress v1.15.2 +## explicit; go 1.16 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -886,7 +909,7 @@ github.com/moby/spdystream/spdy # github.com/moby/sys/mount v0.2.0 ## explicit; go 1.14 github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.6.0 +# github.com/moby/sys/mountinfo v0.6.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 @@ -902,9 +925,6 @@ github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec -# github.com/mtrmac/gpgme v0.1.2 -## explicit; go 1.11 -github.com/mtrmac/gpgme # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -914,8 +934,6 @@ github.com/nozzle/throttler # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo v1.16.5 -## explicit; go 1.16 # github.com/onsi/ginkgo/v2 v2.1.4 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -990,14 +1008,14 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 -## explicit +# github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 +## explicit; go 1.16 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/signal github.com/openshift/imagebuilder/strslice -# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 +# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f ## explicit github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin @@ -1021,6 +1039,9 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat +# github.com/proglottis/gpgme v0.1.1 +## explicit; go 1.11 +github.com/proglottis/gpgme # github.com/prometheus/client_golang v1.12.2 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus @@ -1105,6 +1126,9 @@ github.com/stefanberger/go-pkcs11uri # github.com/stretchr/testify v1.7.2 ## explicit; go 1.13 github.com/stretchr/testify/assert +# github.com/sylabs/sif/v2 v2.7.0 +## explicit; go 1.17 +github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability @@ -1125,7 +1149,7 @@ github.com/urfave/cli/v2 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.1.5 +# github.com/vbauerster/mpb/v7 v7.4.1 ## explicit; go 1.14 github.com/vbauerster/mpb/v7 github.com/vbauerster/mpb/v7/cwriter