diff --git a/go.mod b/go.mod index a63ba7294bf..3185f571d2b 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/container-orchestrated-devices/container-device-interface v0.4.0 github.com/containerd/cgroups v1.0.4 - github.com/containerd/containerd v1.5.8 + github.com/containerd/containerd v1.6.1 github.com/containerd/cri-containerd v1.19.0 github.com/containerd/fifo v1.0.0 github.com/containerd/ttrpc v1.1.0 @@ -19,10 +19,10 @@ require ( github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c - github.com/containers/image/v5 v5.17.0 + github.com/containers/image/v5 v5.21.1 github.com/containers/ocicrypt v1.1.5 github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d - github.com/containers/storage v1.37.0 + github.com/containers/storage v1.40.0 github.com/coreos/go-systemd/v22 v22.3.2 github.com/cpuguy83/go-md2man v1.0.10 github.com/creack/pty v1.1.18 @@ -86,7 +86,7 @@ require ( capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/hcsshim v0.9.2 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -104,16 +104,17 @@ require ( github.com/cilium/ebpf v0.7.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.10.1 // indirect - github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect + github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/containers/psgo v1.7.1 // indirect github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect - github.com/docker/docker v20.10.12+incompatible // indirect + github.com/docker/docker v20.10.14+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 // indirect @@ -158,7 +159,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kevinburke/ssh_config v1.1.0 // indirect - github.com/klauspost/compress v1.14.2 // indirect + github.com/klauspost/compress v1.15.2 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -175,22 +176,22 @@ require ( github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.0 // indirect + github.com/moby/sys/mountinfo v0.6.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/mtrmac/gpgme v0.1.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 // indirect - github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/proglottis/gpgme v0.1.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect @@ -207,10 +208,11 @@ require ( github.com/spiegel-im-spiegel/errs v1.0.2 // indirect github.com/spiegel-im-spiegel/go-cvss v0.4.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/sylabs/sif/v2 v2.7.0 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.1.5 // indirect + github.com/vbauerster/mpb/v7 v7.4.1 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/xanzy/go-gitlab v0.68.0 // indirect github.com/xanzy/ssh-agent v0.3.0 // indirect diff --git a/go.sum b/go.sum index af323ecd782..77901bb24e0 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b h1:Dyvj8g9LBROQ6vC1GyfOPrH7GHvZAjMCIGwclTTsFaE= @@ -64,6 +65,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -124,6 +126,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -146,8 +149,9 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 h1:cSHEbLj0GZeHM1mWG84qEnGFojNEQ83W7cwaPRjcwXU= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -312,6 +316,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -339,16 +344,18 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/cri-containerd v1.19.0 h1:PcTvvl+SHaekCMQZFQkYjn1RKlYrK6khYbuhOeF68k0= github.com/containerd/cri-containerd v1.19.0/go.mod h1:wxbGdReWGCalzGOEpifoHeYCK4xAgnj4o/4bVB+9voU= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -360,6 +367,8 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -370,6 +379,7 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= @@ -377,8 +387,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b5837 github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng= -github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -418,15 +428,17 @@ github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c h1:x6gYpFEXrL github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c/go.mod h1:YogXfQJ+PfB6Dsc/tZDoTazDBdWiAUYnyElblkEtALo= github.com/containers/image/v5 v5.10.4/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= -github.com/containers/image/v5 v5.17.0 h1:KS5pro80CCsSp5qDBTMmSAWQo+xcBX19zUPExmYX2OQ= -github.com/containers/image/v5 v5.17.0/go.mod h1:GnYVusVRFPMMTAAUkrcS8NNSpBp8oyrjOUe04AAmRr4= -github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= +github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d h1:cztBXICnZeM0Cz/7IzbLbMs8jYCnJmil94NJszFYNXA= @@ -437,8 +449,9 @@ github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3E github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20= github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8= -github.com/containers/storage v1.37.0 h1:HVhDsur6sx889ZIZ1d1kEiOzv3gsr5q0diX2VZmOdSg= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.40.0 h1:erKY3ZVgp2F8+9jldwkJKJezrToNYs1YH/gqbPuwHes= +github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -512,15 +525,16 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -1042,8 +1056,9 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1200,9 +1215,12 @@ github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+S github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc= +github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= @@ -1225,7 +1243,6 @@ github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1: github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1301,7 +1318,9 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20200206005212-79b036d80240/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1335,7 +1354,6 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= -github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= @@ -1344,8 +1362,9 @@ github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1 github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orangecms/go-framebuffer v0.0.0-20200613202404-a0700d90c330/go.mod h1:3Myb/UszJY32F2G7yGkUtcW/ejHpjlGfYLim7cv2uKA= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -1384,6 +1403,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -1478,6 +1499,8 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= @@ -1576,9 +1599,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk= +github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1605,6 +1631,7 @@ github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2Ogf github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/twitchtv/twirp v5.8.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/u-root/gobusybox/src v0.0.0-20220120221406-4e2fbb8f41bf/go.mod h1:n6S68wL67GT3KLZWmLzrJCr1rMLQ6xzA9rv2k46kF2Q= github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a/go.mod h1:RWIgJWqm9/0gjBZ0Hl8iR6MVGzZ+yAda2uqqLmetE2I= @@ -1647,8 +1674,8 @@ github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8J github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c= github.com/vbauerster/mpb/v7 v7.1.4/go.mod h1:4zulrZfvshMOnd2APiHgWS9Yrw08AzZVRr9G11tkpcQ= -github.com/vbauerster/mpb/v7 v7.1.5 h1:vtUEUfQHmNeJETyF4AcRCOV6RC4wqFwNORy52UMXPbQ= -github.com/vbauerster/mpb/v7 v7.1.5/go.mod h1:4M8+qAoQqV60WDNktBM5k05i1iTrXE7rjKOHEVkVlec= +github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA= +github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -1747,6 +1774,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdm go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= @@ -1807,6 +1835,7 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1929,10 +1958,11 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2099,12 +2129,10 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210925032602-92d5a993a665/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2115,6 +2143,7 @@ golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2356,7 +2385,6 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= @@ -2453,6 +2481,7 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= @@ -2500,6 +2529,7 @@ k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0V k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0d2eb45b8e1..0952d8c7392 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -74,6 +74,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) } + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + // And for DefaultMode. selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} if config.AEAD().Mode() != packet.AEADModeEAX { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index c3fedf7a250..44c6ebf0ddd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -37,15 +37,17 @@ type Identity struct { Name string // by convention, has the form "Full Name (comment) " UserId *packet.UserId SelfSignature *packet.Signature - Signatures []*packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // A Subkey is an additional public key in an Entity. Subkeys can be used for // encryption. type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature } // A Key identifies a specific public key in an Entity. This is either the @@ -55,6 +57,7 @@ type Key struct { PublicKey *packet.PublicKey PrivateKey *packet.PrivateKey SelfSignature *packet.Signature + Revocations []*packet.Signature } // A KeyRing provides access to public and private keys. @@ -71,28 +74,58 @@ type KeyRing interface { DecryptionKeys() []Key } -// PrimaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. func (e *Entity) PrimaryIdentity() *Identity { - var firstIdentity *Identity + var primaryIdentity *Identity for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident } } - return firstIdentity + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if (existingId == nil) { + return true + } + + if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + return true + } + + if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + return false + } + + if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) } // EncryptionKey returns the best candidate Key for encrypting a message to the // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the primary key has expired. + // Fail to find any encryption key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -104,6 +137,8 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { subkey.Sig.FlagEncryptCommunications && subkey.PublicKey.PubKeyAlgo.CanEncrypt() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { candidateSubkey = i maxTime = subkey.Sig.CreationTime @@ -112,17 +147,16 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we don't have any candidate subkeys for encryption and // the primary key doesn't have any usage metadata then we // assume that the primary key is ok. Or, if the primary key is // marked as ok to encrypt with, then we can obviously use it. - // Also, check expiry again just to be safe. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } return Key{}, false @@ -137,10 +171,13 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) { // SigningKeyById return the Key for signing a message with this // Entity and keyID. func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - // Fail to find any signing key if the primary key has expired. + // Fail to find any signing key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -152,8 +189,10 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { subkey.Sig.FlagSign && subkey.PublicKey.PubKeyAlgo.CanSign() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PrivateKey.KeyId == id) { + (id == 0 || subkey.PublicKey.KeyId == id) { candidateSubkey = idx maxTime = subkey.Sig.CreationTime } @@ -161,22 +200,63 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we have no candidate subkey then we assume that it's ok to sign // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. Also, check expiry again just to be safe. + // sign with, then we can use it. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && - (id == 0 || e.PrivateKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in return Key{}, false } +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + // An EntityList contains one or more Entities. type EntityList []*Entity @@ -184,21 +264,14 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } for _, subKey := range e.Subkeys { if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -210,15 +283,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { + if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 { var usage byte if key.SelfSignature.FlagCertify { usage |= packet.KeyFlagCertify @@ -247,7 +312,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) { for _, e := range el { for _, subKey := range e.Subkeys { if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -442,11 +507,22 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { break } - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { return errors.StructuralError("user ID self-signature invalid: " + err.Error()) } - if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { identity.SelfSignature = sig } identity.Signatures = append(identity.Signatures, sig) @@ -488,9 +564,9 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p switch sig.SigType { case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig + subKey.Revocations = append(subKey.Revocations, sig) case packet.SigTypeSubkeyBinding: - if shouldReplaceSubkeySig(subKey.Sig, sig) { + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { subKey.Sig = sig } } @@ -505,22 +581,6 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p return nil } -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - // SerializePrivate serializes an Entity, including private key material, but // excluding signatures from other entities, to the given Writer. // Identities and subkeys are re-signed in case they changed since NewEntry. @@ -549,20 +609,37 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo if err != nil { return } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { return } if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) if err != nil { return } } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return + for _, revocation := range ident.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + if ident.SelfSignature != nil { + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } } } for _, subkey := range e.Subkeys { @@ -583,6 +660,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo } } } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return @@ -598,6 +681,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -615,6 +704,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return err @@ -659,14 +754,13 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co // specified reason code and text (RFC4880 section-5.2.3.23). // If config is nil, sensible defaults will be used. func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } @@ -686,22 +780,21 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea return errors.InvalidArgumentError("given subkey is not associated with this key") } - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } - if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { return err } - sk.Sig = revSig + sk.Revocations = append(sk.Revocations, revSig) return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go index 21d17c2f8be..a2219e64d1c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -138,6 +138,69 @@ heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB =IKnw -----END PGP PUBLIC KEY BLOCK-----` +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR @@ -304,33 +367,34 @@ X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE -FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL -CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl -1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo -aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy -OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 -/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA -g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla -d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP -2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l -nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX -nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX -SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 -rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA -0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ -wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa -LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV -8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz -j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy -AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 -1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ -QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp -f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn -3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK -2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA -ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi -f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj -5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=522n +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT -----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index fe0da9d3685..65203813778 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -395,6 +395,7 @@ const ( SigTypeDirectSignature = 0x1F SigTypeKeyRevocation = 0x20 SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 28971c1ada2..297dc40ba8c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -735,13 +735,13 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by the passed in signingKey. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } - return signingKey.VerifySignature(h, sig) + return pk.VerifySignature(h, sig) } // userIdSignatureHash returns a Hash of the message that needs to be signed diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6b1704e4292..86602d02904 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -28,6 +28,10 @@ const ( KeyFlagSign KeyFlagEncryptCommunications KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey ) // Signature represents a signature. See RFC 4880, section 5.2. @@ -68,14 +72,19 @@ type Signature struct { IssuerFingerprint []byte IsPrimaryId *bool + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 + RevocationReason *ReasonForRevocation RevocationReasonText string // In a self-signature, these flags are set there is a features subpacket @@ -218,6 +227,7 @@ const ( prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 @@ -368,6 +378,15 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if subpacket[0]&KeyFlagEncryptStorage != 0 { sig.FlagEncryptStorage = true } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 if !isHashed { @@ -377,8 +396,8 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("empty revocation reason subpacket") return } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.24 specifies a very general @@ -416,6 +435,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + if !isHashed { + return + } + sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: v, l := subpacket[0], len(subpacket[1:]) if v == 5 && l != 32 || v != 5 && l != 20 { @@ -507,6 +532,9 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { if subpacket.hashed == hashed { n := serializeSubpacketLength(to, len(subpacket.contents)+1) to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } to = to[1+n:] n = copy(to, subpacket.contents) to = to[n:] @@ -714,6 +742,14 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return sig.Sign(h, priv, config) } +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + // Serialize marshals sig to w. Sign, SignUserId or SignKey must have been // called first. func (sig *Signature) Serialize(w io.Writer) (err error) { @@ -826,7 +862,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) } if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) @@ -850,6 +886,15 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagEncryptStorage { flags |= KeyFlagEncryptStorage } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) } @@ -892,6 +937,10 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) } + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + if len(sig.PreferredAEAD) > 0 { subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) } @@ -899,7 +948,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. if sig.RevocationReason != nil { subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index d6bea7d4acc..614fbafd5e1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -156,5 +156,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index a649ebbab39..acb7cc6e29f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -370,11 +370,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // If signature KeyID matches if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - scr.md.Signature = sig - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { - scr.md.SignatureError = errors.ErrSignatureExpired + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + now := scr.config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + signatureError = errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + signatureError = errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + signatureError = errors.ErrKeyExpired + } } + scr.md.Signature = sig + scr.md.SignatureError = signatureError } else { scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) } @@ -483,10 +497,16 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, err = key.PublicKey.VerifySignature(h, sig) if err == nil { now := config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + return key.Entity, errors.ErrKeyRevoked + } if sig.SigExpired(now) { return key.Entity, errors.ErrSignatureExpired } - if key.PublicKey.KeyExpired(key.SelfSignature, now) { + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { return key.Entity, errors.ErrKeyExpired } return key.Entity, nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index bb74b13872a..23a24f20305 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -208,6 +208,15 @@ func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signe return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) } +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + // writeAndSign writes the data as a payload package and, optionally, signs // it. hints contains optional information, that is also encrypted, // that aids the recipients in processing the message. The resulting @@ -358,7 +367,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En var ok bool encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } sig := to[i].PrimaryIdentity().SelfSignature @@ -416,7 +425,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } var payload io.WriteCloser - if aeadSupported { + if config.AEAD() != nil && aeadSupported { payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) if err != nil { return diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go index b7f86da8695..d3d9839d639 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package events defines the event pushing and subscription service. +// Package events defines the ttrpc event service. package events diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/api/types/task/doc.go similarity index 91% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/api/types/task/doc.go index c073fa4ad7c..e10c7a46993 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/api/types/task/doc.go @@ -13,3 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ + +// Package task defines the task service. +package task diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 8b600673fe3..5606cc88a94 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,15 +21,14 @@ package cio import ( "context" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "syscall" "github.com/containerd/fifo" - "github.com/pkg/errors" ) // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root @@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { return nil, err } } - dir, err := ioutil.TempDir(root, "") + dir, err := os.MkdirTemp(root, "") if err != nil { return nil, err } @@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { if fifos.Stdin != "" { if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdin fifo") + return f, fmt.Errorf("failed to open stdin fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdin != nil { @@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if fifos.Stdout != "" { if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdout fifo") + return f, fmt.Errorf("failed to open stdout fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdout != nil { @@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if !fifos.Terminal && fifos.Stderr != "" { if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stderr fifo") + return f, fmt.Errorf("failed to open stderr fifo: %w", retErr) } } return f, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index ded475788f6..f3d736a6d42 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -23,7 +23,6 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const pipeRoot = `\\.\pipe` @@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdin != "" { l, err := winio.ListenPipe(fifos.Stdin, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err) } cios.closers = append(cios.closers, l) @@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdout != "" { l, err := winio.ListenPipe(fifos.Stdout, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err) } cios.closers = append(cios.closers, l) @@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stderr != "" { l, err := winio.ListenPipe(fifos.Stderr, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err) } cios.closers = append(cios.closers, l) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go new file mode 100644 index 00000000000..1391884cde7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" + // DefaultRuntime would be a multiple of choices, thus empty + DefaultRuntime = "" + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 6b69cd06b9b..8e2619a381a 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index a80700075f9..9f4bed8b077 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 05a35228ca4..87622559708 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -17,7 +17,7 @@ // Package errdefs defines the common errors used throughout containerd // packages. // -// Use with errors.Wrap and error.Wrapf to add context to an error. +// Use with fmt.Errorf to add context to an error. // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. @@ -28,8 +28,7 @@ package errdefs import ( "context" - - "github.com/pkg/errors" + "errors" ) // Definitions of common error types used throughout containerd. All containerd diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 209f63bd0fc..7a9b33e05af 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -18,9 +18,9 @@ package errdefs import ( "context" + "fmt" "strings" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -68,9 +68,9 @@ func ToGRPC(err error) error { // ToGRPCf maps the error to grpc error codes, assembling the formatting string // and combining it with the target error string. // -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) } // FromGRPC returns the underlying error from a grpc service based on the grpc error code @@ -104,9 +104,9 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrap(cls, msg) + err = fmt.Errorf("%s: %w", msg, cls) } else { - err = errors.WithStack(cls) + err = cls } return err diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go new file mode 100644 index 00000000000..a1f385d7abd --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -0,0 +1,251 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package exchange + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/typeurl" + goevents "github.com/docker/go-events" + "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +// Exchange broadcasts events +type Exchange struct { + broadcaster *goevents.Broadcaster +} + +// NewExchange returns a new event Exchange +func NewExchange() *Exchange { + return &Exchange{ + broadcaster: goevents.NewBroadcaster(), + } +} + +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + +// Forward accepts an envelope to be directly distributed on the exchange. +// +// This is useful when an event is forwarded on behalf of another namespace or +// when the event is propagated on behalf of another publisher. +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { + if err := validateEnvelope(envelope); err != nil { + return err + } + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error forwarding event") + } else { + logger.Debug("event forwarded") + } + }() + + return e.broadcaster.Write(envelope) +} + +// Publish packages and sends an event. The caller will be considered the +// initial publisher of the event. This means the timestamp will be calculated +// at this point and this method may read from the calling context. +func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { + var ( + namespace string + encoded *types.Any + envelope events.Envelope + ) + + namespace, err = namespaces.NamespaceRequired(ctx) + if err != nil { + return fmt.Errorf("failed publishing event: %w", err) + } + if err := validateTopic(topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", topic, err) + } + + encoded, err = typeurl.MarshalAny(event) + if err != nil { + return err + } + + envelope.Timestamp = time.Now().UTC() + envelope.Namespace = namespace + envelope.Topic = topic + envelope.Event = encoded + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error publishing event") + } else { + logger.Debug("event published") + } + }() + + return e.broadcaster.Write(&envelope) +} + +// Subscribe to events on the exchange. Events are sent through the returned +// channel ch. If an error is encountered, it will be sent on channel errs and +// errs will be closed. To end the subscription, cancel the provided context. +// +// Zero or more filters may be provided as strings. Only events that match +// *any* of the provided filters will be sent on the channel. The filters use +// the standard containerd filters package syntax. +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evch = make(chan *events.Envelope) + errq = make(chan error, 1) + channel = goevents.NewChannel(0) + queue = goevents.NewQueue(channel) + dst goevents.Sink = queue + ) + + closeAll := func() { + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) + } + + ch = evch + errs = errq + + if len(fs) > 0 { + filter, err := filters.ParseAll(fs...) + if err != nil { + errq <- fmt.Errorf("failed parsing subscription filters: %w", err) + closeAll() + return + } + + dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool { + return filter.Match(adapt(gev)) + })) + } + + e.broadcaster.Add(dst) + + go func() { + defer closeAll() + + var err error + loop: + for { + select { + case ev := <-channel.C: + env, ok := ev.(*events.Envelope) + if !ok { + // TODO(stevvooe): For the most part, we are well protected + // from this condition. Both Forward and Publish protect + // from this. + err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev) + break + } + + select { + case evch <- env: + case <-ctx.Done(): + break loop + } + case <-ctx.Done(): + break loop + } + } + + if err == nil { + if cerr := ctx.Err(); cerr != context.Canceled { + err = cerr + } + } + + errq <- err + }() + + return +} + +func validateTopic(topic string) error { + if topic == "" { + return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument) + } + + if topic[0] != '/' { + return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument) + } + + if len(topic) == 1 { + return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument) + } + + components := strings.Split(topic[1:], "/") + for _, component := range components { + if err := identifiers.Validate(component); err != nil { + return fmt.Errorf("failed validation on component %q: %w", component, err) + } + } + + return nil +} + +func validateEnvelope(envelope *events.Envelope) error { + if err := identifiers.Validate(envelope.Namespace); err != nil { + return fmt.Errorf("event envelope has invalid namespace: %w", err) + } + + if err := validateTopic(envelope.Topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err) + } + + if envelope.Timestamp.IsZero() { + return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument) + } + + return nil +} + +func adapt(ev interface{}) filters.Adaptor { + if adaptor, ok := ev.(filters.Adaptor); ok { + return adaptor + } + + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 00000000000..5a9c559c1e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 00000000000..cf09d8d9e4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 00000000000..49182d7b7bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,291 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 00000000000..b76aab9b4a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 00000000000..6a485467b8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index f52317b491b..cbd3a52ba9e 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -25,10 +25,10 @@ package identifiers import ( + "fmt" "regexp" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -51,15 +51,15 @@ var ( // In general identifiers that pass this validation should be safe for use as filesystem path components. func Validate(s string) error { if len(s) == 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument) } if len(s) > maxLength { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument) } if !identifierRe.MatchString(s) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 37b6a7d1c8a..0db9562b82b 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -52,7 +52,8 @@ const ( // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) } // GetLogger retrieves the current logger from the context. If no logger is @@ -61,7 +62,7 @@ func GetLogger(ctx context.Context) *logrus.Entry { logger := ctx.Value(loggerKey{}) if logger == nil { - return L + return L.WithContext(ctx) } return logger.(*logrus.Entry) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index b53c9012c1b..e5e23fe4301 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -18,11 +18,11 @@ package namespaces import ( "context" + "fmt" "os" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" - "github.com/pkg/errors" ) const ( @@ -69,10 +69,10 @@ func Namespace(ctx context.Context) (string, bool) { func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + return "", fmt.Errorf("namespace is required: %w", errdefs.ErrFailedPrecondition) } if err := identifiers.Validate(namespace); err != nil { - return "", errors.Wrap(err, "namespace validation") + return "", fmt.Errorf("namespace validation: %w", err) } return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go index b382215025d..176a0e6673a 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go @@ -16,11 +16,11 @@ package constants -// TODO(random-liu): Merge annotations package into this package. - const ( // K8sContainerdNamespace is the namespace we use to connect containerd. K8sContainerdNamespace = "k8s.io" - // CRIVersion is the CRI version supported by the CRI plugin. - CRIVersion = "v1alpha2" + // CRIVersion is the latest CRI version supported by the CRI plugin. + CRIVersion = "v1" + // CRIVersionAlpha is the alpha version of CRI supported by the CRI plugin. + CRIVersionAlpha = "v1alpha2" ) diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go index 59d41411f23..2cdc97e1de5 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/containerd/containerd/cio" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // AttachOptions specifies how to attach to a container. diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go index 2780b958ab5..f0f90840160 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go index dcc9fe6eb94..b6c9c12028f 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,13 @@ package io import ( + "fmt" "io" "net" "os" "sync" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -62,7 +60,7 @@ func openPipe(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -70,7 +68,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go index 27721e71724..674a89fdff8 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go @@ -21,11 +21,10 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "time" "github.com/sirupsen/logrus" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -43,7 +42,7 @@ const ( // NewDiscardLogger creates logger which discards all the input. func NewDiscardLogger() io.WriteCloser { - return cioutil.NewNopWriteCloser(ioutil.Discard) + return cioutil.NewNopWriteCloser(io.Discard) } // NewCRILogger returns a write closer which redirect container log into diff --git a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go index d0e0bf37efa..cf027eaf8c0 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go @@ -18,8 +18,8 @@ package util import ( "encoding/json" - - "github.com/pkg/errors" + "errors" + "fmt" ) // DeepCopy makes a deep copy from src into dst. @@ -32,11 +32,11 @@ func DeepCopy(dst interface{}, src interface{}) error { } bytes, err := json.Marshal(src) if err != nil { - return errors.Wrap(err, "unable to marshal src") + return fmt.Errorf("unable to marshal src: %w", err) } err = json.Unmarshal(bytes, dst) if err != nil { - return errors.Wrap(err, "unable to unmarshal into dst") + return fmt.Errorf("unable to unmarshal into dst: %w", err) } return nil } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index aa604baab92..74c303b944f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -18,10 +18,9 @@ package dialer import ( "context" + "fmt" "net" "time" - - "github.com/pkg/errors" ) type dialResult struct { @@ -74,6 +73,6 @@ func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { dr.c.Close() } }() - return nil, errors.Errorf("dial %s: timeout", address) + return nil, fmt.Errorf("dial %s: timeout", address) } } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index e7d19583395..b4304ffbf1f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go new file mode 100644 index 00000000000..bc1af75abb7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shutdown + +import ( + "context" + "errors" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ErrShutdown is the error condition when a context has been fully shutdown +var ErrShutdown = errors.New("shutdown") + +// Service is used to facilitate shutdown by through callback +// registration and shutdown initiation +type Service interface { + // Shutdown initiates shutdown + Shutdown() + // RegisterCallback registers functions to be called on shutdown and before + // the shutdown channel is closed. A callback error will propagate to the + // context error + RegisterCallback(func(context.Context) error) +} + +// WithShutdown returns a context which is similar to a cancel context, but +// with callbacks which can propagate to the context error. Unlike a cancel +// context, the shutdown context cannot be canceled from the parent context. +// However, future child contexes will be canceled upon shutdown. +func WithShutdown(ctx context.Context) (context.Context, Service) { + ss := &shutdownService{ + Context: ctx, + doneC: make(chan struct{}), + timeout: 30 * time.Second, + } + return ss, ss +} + +type shutdownService struct { + context.Context + + mu sync.Mutex + isShutdown bool + callbacks []func(context.Context) error + doneC chan struct{} + err error + timeout time.Duration +} + +func (s *shutdownService) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + if s.isShutdown { + return + } + s.isShutdown = true + + go func(callbacks []func(context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + grp, ctx := errgroup.WithContext(ctx) + for i := range callbacks { + fn := callbacks[i] + grp.Go(func() error { return fn(ctx) }) + } + err := grp.Wait() + if err == nil { + err = ErrShutdown + } + s.mu.Lock() + s.err = err + close(s.doneC) + s.mu.Unlock() + }(s.callbacks) +} + +func (s *shutdownService) Done() <-chan struct{} { + return s.doneC +} + +func (s *shutdownService) Err() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.err +} +func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.callbacks == nil { + s.callbacks = []func(context.Context) error{} + } + s.callbacks = append(s.callbacks, fn) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index 8b4d925d28e..f05ab7aa9f9 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,13 +17,14 @@ package ttrpcutil import ( + "errors" + "fmt" "sync" "time" v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) const ttrpcDialTimeout = 5 * time.Second @@ -43,7 +44,7 @@ func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { conn, err := dialer.Dialer(address, ttrpcDialTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to connect") + return nil, fmt.Errorf("failed to connect: %w", err) } client := ttrpc.NewClient(conn, opts...) diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index aab756fd2a6..4f8d7dd2d59 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index c7657e1869b..3913ef66373 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform { switch platform.Architecture { case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } vector = append(vector, specs.Platform{ Architecture: "386", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, - Variant: platform.Variant, }) case "arm": if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 4a7177e3138..046e0356d19 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -18,6 +18,7 @@ package platforms import ( "bufio" + "fmt" "os" "runtime" "strings" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) // Present the ARM instruction set architecture, eg: v7, v8 @@ -48,7 +48,7 @@ func cpuVariant() string { // by ourselves. We can just parse these information from /proc/cpuinfo func getCPUInfo(pattern string) (info string, err error) { if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) } cpuinfo, err := os.Open("/proc/cpuinfo") @@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) { return "", err } - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) } func getCPUVariant() string { diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 6ede94061eb..dbe9957ca9d 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false @@ -86,9 +86,11 @@ func normalizeArch(arch, variant string) (string, string) { case "i386": arch = "386" variant = "" - case "x86_64", "x86-64": + case "x86_64", "x86-64", "amd64": arch = "amd64" - variant = "" + if variant == "v1" { + variant = "" + } case "aarch64", "arm64": arch = "arm64" switch variant { diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go index cb77fbc9f7b..cfa3ff34a19 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -16,27 +16,11 @@ package platforms -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - // DefaultString returns the default string specifier for the platform. func DefaultString() string { return Format(DefaultSpec()) } -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - // DefaultStrict returns strict form of Default. func DefaultStrict() MatchComparer { return OnlyStrict(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000000..e249fe48d38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index e8a7d5ffa0d..49690f1b3e7 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. @@ -18,6 +19,22 @@ package platforms +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + // Default returns the default matcher for the platform. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index 0c380e3b7c1..c1aaf72ca8e 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -29,6 +27,18 @@ import ( "golang.org/x/sys/windows" ) +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + type matchComparer struct { defaults Matcher osVersionPrefix string diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 088bdea0508..8f955d036df 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -107,6 +107,8 @@ package platforms import ( + "fmt" + "path" "regexp" "runtime" "strconv" @@ -114,7 +116,6 @@ import ( "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -166,14 +167,14 @@ func (m *matcher) String() string { func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) } } @@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. @@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -246,20 +247,7 @@ func Format(platform specs.Platform) string { return "unknown" } - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") + return path.Join(platform.OS, platform.Architecture, platform.Variant) } // Normalize validates and translate the platform to the canonical value. @@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - return platform } diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go new file mode 100644 index 00000000000..dcb533c8a74 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -0,0 +1,171 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitContext is used for plugin initialization +type InitContext struct { + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + + // deprecated: will be removed in 2.0, use plugin.EventType + Events *exchange.Exchange + + Meta *Meta // plugins can fill in metadata at init. + + plugins *Set +} + +// NewContext returns a new plugin InitContext +func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext { + return &InitContext{ + Context: ctx, + Root: filepath.Join(root, r.URI()), + State: filepath.Join(state, r.URI()), + Meta: &Meta{ + Exports: map[string]string{}, + }, + plugins: plugins, + } +} + +// Get returns the first plugin by its type +func (i *InitContext) Get(t Type) (interface{}, error) { + return i.plugins.Get(t) +} + +// Meta contains information gathered from the registration and initialization +// process. +type Meta struct { + Platforms []ocispec.Platform // platforms supported by plugin + Exports map[string]string // values exported by plugin + Capabilities []string // feature switches for plugin +} + +// Plugin represents an initialized plugin, used with an init context. +type Plugin struct { + Registration *Registration // registration, as initialized + Config interface{} // config, as initialized + Meta *Meta + + instance interface{} + err error // will be set if there was an error initializing the plugin +} + +// Err returns the errors during initialization. +// returns nil if not error was encountered +func (p *Plugin) Err() error { + return p.err +} + +// Instance returns the instance and any initialization error of the plugin +func (p *Plugin) Instance() (interface{}, error) { + return p.instance, p.err +} + +// Set defines a plugin collection, used with InitContext. +// +// This maintains ordering and unique indexing over the set. +// +// After iteratively instantiating plugins, this set should represent, the +// ordered, initialization set of plugins for a containerd instance. +type Set struct { + ordered []*Plugin // order of initialization + byTypeAndID map[Type]map[string]*Plugin +} + +// NewPluginSet returns an initialized plugin set +func NewPluginSet() *Set { + return &Set{ + byTypeAndID: make(map[Type]map[string]*Plugin), + } +} + +// Add a plugin to the set +func (ps *Set) Add(p *Plugin) error { + if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok { + ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{ + p.Registration.ID: p, + } + } else if _, idok := byID[p.Registration.ID]; !idok { + byID[p.Registration.ID] = p + } else { + return fmt.Errorf("plugin %v already initialized: %w", p.Registration.URI(), errdefs.ErrAlreadyExists) + } + + ps.ordered = append(ps.ordered, p) + return nil +} + +// Get returns the first plugin by its type +func (ps *Set) Get(t Type) (interface{}, error) { + for _, v := range ps.byTypeAndID[t] { + return v.Instance() + } + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) +} + +// GetAll returns all initialized plugins +func (ps *Set) GetAll() []*Plugin { + return ps.ordered +} + +// Plugins returns plugin set +func (i *InitContext) Plugins() *Set { + return i.plugins +} + +// GetAll plugins in the set +func (i *InitContext) GetAll() []*Plugin { + return i.plugins.GetAll() +} + +// GetByID returns the plugin of the given type and ID +func (i *InitContext) GetByID(t Type, id string) (interface{}, error) { + ps, err := i.GetByType(t) + if err != nil { + return nil, err + } + p, ok := ps[id] + if !ok { + return nil, fmt.Errorf("no %s plugins with id %s: %w", t, id, errdefs.ErrNotFound) + } + return p.Instance() +} + +// GetByType returns all plugins with the specific type. +func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { + p, ok := i.plugins.byTypeAndID[t] + if !ok { + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) + } + + return p, nil +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go new file mode 100644 index 00000000000..eb38c127157 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" + "sync" +) + +var ( + // ErrNoType is returned when no type is specified + ErrNoType = errors.New("plugin: no type") + // ErrNoPluginID is returned when no id is specified + ErrNoPluginID = errors.New("plugin: no id") + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") + // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, + // this allows the plugin loader differentiate between a plugin which is configured + // not to load and one that fails to load. + ErrSkipPlugin = errors.New("skip plugin") + + // ErrInvalidRequires will be thrown if the requirements for a plugin are + // defined in an invalid manner. + ErrInvalidRequires = errors.New("invalid requires") +) + +// IsSkipPlugin returns true if the error is skipping the plugin +func IsSkipPlugin(err error) bool { + return errors.Is(err, ErrSkipPlugin) +} + +// Type is the type of the plugin +type Type string + +func (t Type) String() string { return string(t) } + +const ( + // InternalPlugin implements an internal plugin to containerd + InternalPlugin Type = "io.containerd.internal.v1" + // RuntimePlugin implements a runtime + RuntimePlugin Type = "io.containerd.runtime.v1" + // RuntimePluginV2 implements a runtime v2 + RuntimePluginV2 Type = "io.containerd.runtime.v2" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" + // GRPCPlugin implements a grpc service + GRPCPlugin Type = "io.containerd.grpc.v1" + // TTRPCPlugin implements a ttrpc shim service + TTRPCPlugin Type = "io.containerd.ttrpc.v1" + // SnapshotPlugin implements a snapshotter + SnapshotPlugin Type = "io.containerd.snapshotter.v1" + // TaskMonitorPlugin implements a task monitor + TaskMonitorPlugin Type = "io.containerd.monitor.v1" + // DiffPlugin implements a differ + DiffPlugin Type = "io.containerd.differ.v1" + // MetadataPlugin implements a metadata store + MetadataPlugin Type = "io.containerd.metadata.v1" + // ContentPlugin implements a content store + ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" + // EventPlugin implements event handling + EventPlugin Type = "io.containerd.event.v1" + // TracingProcessorPlugin implements a open telemetry span processor + TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" +) + +const ( + // RuntimeLinuxV1 is the legacy linux runtime + RuntimeLinuxV1 = "io.containerd.runtime.v1.linux" + // RuntimeRuncV1 is the runc runtime that supports a single container + RuntimeRuncV1 = "io.containerd.runc.v1" + // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim + RuntimeRuncV2 = "io.containerd.runc.v2" +) + +// Registration contains information for registering a plugin +type Registration struct { + // Type of the plugin + Type Type + // ID of the plugin + ID string + // Config specific to the plugin + Config interface{} + // Requires is a list of plugins that the registered plugin requires to be available + Requires []Type + + // InitFn is called when initializing a plugin. The registration and + // context are passed in. The init function may modify the registration to + // add exports, capabilities and platform support declarations. + InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool +} + +// Init the registered plugin +func (r *Registration) Init(ic *InitContext) *Plugin { + p, err := r.InitFn(ic) + return &Plugin{ + Registration: r, + Config: ic.Config, + Meta: ic.Meta, + instance: p, + err: err, + } +} + +// URI returns the full plugin URI +func (r *Registration) URI() string { + return fmt.Sprintf("%s.%s", r.Type, r.ID) +} + +var register = struct { + sync.RWMutex + r []*Registration +}{} + +// Load loads all plugins at the provided path into containerd +func Load(path string) (err error) { + defer func() { + if v := recover(); v != nil { + rerr, ok := v.(error) + if !ok { + rerr = fmt.Errorf("%s", v) + } + err = rerr + } + }() + return loadPlugins(path) +} + +// Register allows plugins to register +func Register(r *Registration) { + register.Lock() + defer register.Unlock() + + if r.Type == "" { + panic(ErrNoType) + } + if r.ID == "" { + panic(ErrNoPluginID) + } + if err := checkUnique(r); err != nil { + panic(err) + } + + for _, requires := range r.Requires { + if requires == "*" && len(r.Requires) != 1 { + panic(ErrInvalidRequires) + } + } + + register.r = append(register.r, r) +} + +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + +// Graph returns an ordered list of registered plugins for initialization. +// Plugins in disableList specified by id will be disabled. +func Graph(filter DisableFilter) (ordered []*Registration) { + register.RLock() + defer register.RUnlock() + + for _, r := range register.r { + if filter(r) { + r.Disable = true + } + } + + added := map[*Registration]bool{} + for _, r := range register.r { + if r.Disable { + continue + } + children(r, added, &ordered) + if !added[r] { + ordered = append(ordered, r) + added[r] = true + } + } + return ordered +} + +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { + for _, r := range register.r { + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) + if !added[r] { + *ordered = append(*ordered, r) + added[r] = true + } + } + } + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go new file mode 100644 index 00000000000..0df0669d29a --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -0,0 +1,63 @@ +//go:build go1.8 && !windows && amd64 && !static_build && !gccgo +// +build go1.8,!windows,amd64,!static_build,!gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "path/filepath" + "plugin" + "runtime" +) + +// loadPlugins loads all plugins for the OS and Arch +// that containerd is built for inside the provided path +func loadPlugins(path string) error { + abs, err := filepath.Abs(path) + if err != nil { + return err + } + pattern := filepath.Join(abs, fmt.Sprintf( + "*-%s-%s.%s", + runtime.GOOS, + runtime.GOARCH, + getLibExt(), + )) + libs, err := filepath.Glob(pattern) + if err != nil { + return err + } + for _, lib := range libs { + if _, err := plugin.Open(lib); err != nil { + return err + } + } + return nil +} + +// getLibExt returns a platform specific lib extension for +// the platform that containerd is running on +func getLibExt() string { + switch runtime.GOOS { + case "windows": + return "dll" + default: + return "so" + } +} diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go similarity index 67% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/containerd/plugin/plugin_other.go index 6e40a9c7d7f..a2883bbbadf 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,3 +1,6 @@ +//go:build !go1.8 || windows || !amd64 || static_build || gccgo +// +build !go1.8 windows !amd64 static_build gccgo + /* Copyright The containerd Authors. @@ -14,17 +17,9 @@ limitations under the License. */ -package sys - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() +package plugin -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +func loadPlugins(path string) error { + // plugins not supported until 1.8 + return nil +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 0998639b030..6fa97dfdca9 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -338,11 +338,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index c14aacca99b..59e522cede4 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -18,6 +18,7 @@ package shim import ( "context" + "errors" "flag" "fmt" "io" @@ -30,21 +31,15 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" shimapi "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Client for a shim server -type Client struct { - service shimapi.TaskService - context context.Context - signals chan os.Signal -} - // Publisher for events type Publisher interface { events.Publisher @@ -53,22 +48,37 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { - ID string + ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry ContainerdBinary string Address string TTRPCAddress string } +type StopStatus struct { + Pid int + ExitStatus int + ExitedAt time.Time +} + // Init func for the creation of a shim server +// TODO(2.0): Remove init function type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface +// TODO(2.0): Remove unified shim interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) StartShim(ctx context.Context, opts StartOpts) (string, error) } +// Manager is the interface which manages the shim process +type Manager interface { + Name() string + Start(ctx context.Context, id string, opts StartOpts) (string, error) + Stop(ctx context.Context, id string) (StopStatus, error) +} + // OptsKey is the context key for the Opts value. type OptsKey struct{} @@ -91,10 +101,23 @@ type Config struct { NoSetupLogger bool } +type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +type taskService struct { + shimapi.TaskService +} + +func (t taskService) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, t.TaskService) + return nil +} + var ( debugFlag bool versionFlag bool - idFlag string + id string namespaceFlag string socketFlag string bundlePath string @@ -111,7 +134,7 @@ func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&id, "id", "", "id of the task") flag.StringVar(&socketFlag, "socket", "", "socket path to serve") flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") @@ -136,35 +159,85 @@ func setRuntime() { } } -func setLogger(ctx context.Context, id string) error { - logrus.SetFormatter(&logrus.TextFormatter{ +func setLogger(ctx context.Context, id string) (context.Context, error) { + l := log.G(ctx) + l.Logger.SetFormatter(&logrus.TextFormatter{ TimestampFormat: log.RFC3339NanoFixed, FullTimestamp: true, }) if debugFlag { - logrus.SetLevel(logrus.DebugLevel) + l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) if err != nil { - return err + return ctx, err } - logrus.SetOutput(f) - return nil + l.Logger.SetOutput(f) + return log.WithLogger(ctx, l), nil } // Run initializes and runs a shim server -func Run(id string, initFunc Init, opts ...BinaryOpts) { +// TODO(2.0): Remove function +func Run(name string, initFunc Init, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + + ctx := context.Background() + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", name)) + + if err := run(ctx, nil, initFunc, name, config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", name, err) + os.Exit(1) + } +} + +// TODO(2.0): Remove this type +type shimToManager struct { + shim Shim + name string +} + +func (stm shimToManager) Name() string { + return stm.name +} + +func (stm shimToManager) Start(ctx context.Context, id string, opts StartOpts) (string, error) { + opts.ID = id + return stm.shim.StartShim(ctx, opts) +} + +func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error) { + // shim must already have id + dr, err := stm.shim.Cleanup(ctx) + if err != nil { + return StopStatus{}, err + } + return StopStatus{ + Pid: int(dr.Pid), + ExitStatus: int(dr.ExitStatus), + ExitedAt: dr.ExitedAt, + }, nil +} + +// RunManager initialzes and runs a shim server +// TODO(2.0): Rename to Run +func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config for _, o := range opts { o(&config) } - if err := run(id, initFunc, config); err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", manager.Name())) + + if err := run(ctx, manager, nil, "", config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", manager.Name(), err) os.Exit(1) } } -func run(id string, initFunc Init, config Config) error { +func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { fmt.Printf("%s:\n", os.Args[0]) @@ -199,27 +272,49 @@ func run(id string, initFunc Init, config Config) error { } defer publisher.Close() - ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = namespaces.WithNamespace(ctx, namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) - ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) - ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher, cancel) - if err != nil { - return err + ctx, sd := shutdown.WithShutdown(ctx) + defer sd.Shutdown() + + if manager == nil { + service, err := initFunc(ctx, id, publisher, sd.Shutdown) + if err != nil { + return err + } + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return taskService{service}, nil + }, + }) + manager = shimToManager{ + shim: service, + name: name, + } } + // Handle explicit actions switch action { case "delete": - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) - go handleSignals(ctx, logger, signals) - response, err := service.Cleanup(ctx) + go reap(ctx, logger, signals) + ss, err := manager.Stop(ctx, id) if err != nil { return err } - data, err := proto.Marshal(response) + data, err := proto.Marshal(&shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, + }) if err != nil { return err } @@ -229,12 +324,12 @@ func run(id string, initFunc Init, config Config) error { return nil case "start": opts := StartOpts{ - ID: idFlag, ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, } - address, err := service.StartShim(ctx, opts) + + address, err := manager.Start(ctx, id, opts) if err != nil { return err } @@ -242,46 +337,120 @@ func run(id string, initFunc Init, config Config) error { return err } return nil - default: - if !config.NoSetupLogger { - if err := setLogger(ctx, idFlag); err != nil { - return err - } + } + + if !config.NoSetupLogger { + ctx, err = setLogger(ctx, id) + if err != nil { + return err } - client := NewShimClient(ctx, service, signals) - if err := client.Serve(); err != nil { - if err != context.Canceled { - return err + } + + plugin.Register(&plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "shutdown", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return sd, nil + }, + }) + + // Register event plugin + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "publisher", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return publisher, nil + }, + }) + + var ( + initialized = plugin.NewPluginSet() + ttrpcServices = []ttrpcService{} + ) + plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + // NOTE: Root is empty since the shim does not support persistent storage, + // shim plugins should make use state directory for writing files to disk. + // The state directory will be destroyed when the shim if cleaned up or + // on reboot + "", + bundlePath, + ) + initContext.Address = addressFlag + initContext.TTRPCAddress = ttrpcAddress + + // load the plugin specific configuration if it is provided + //TODO: Read configuration passed into shim, or from state directory? + //if p.Config != nil { + // pc, err := config.Decode(p) + // if err != nil { + // return nil, err + // } + // initContext.Config = pc + //} + + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return fmt.Errorf("could not add plugin result to plugin set: %w", err) + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } + continue + } + + if src, ok := instance.(ttrpcService); ok { + logrus.WithField("id", id).Debug("registering ttrpc service") + ttrpcServices = append(ttrpcServices, src) } + } + + server, err := newServer() + if err != nil { + return fmt.Errorf("failed creating server: %w", err) + } - // NOTE: If the shim server is down(like oom killer), the address - // socket might be leaking. - if address, err := ReadAddress("address"); err == nil { - _ = RemoveSocket(address) + for _, srv := range ttrpcServices { + if err := srv.RegisterTTRPC(server); err != nil { + return fmt.Errorf("failed to register service: %w", err) } + } - select { - case <-publisher.Done(): - return nil - case <-time.After(5 * time.Second): - return errors.New("publisher not closed") + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { + if err != shutdown.ErrShutdown { + return err } } -} -// NewShimClient creates a new shim server client -func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { - s := &Client{ - service: svc, - context: ctx, - signals: signals, + // NOTE: If the shim server is down(like oom killer), the address + // socket might be leaking. + if address, err := ReadAddress("address"); err == nil { + _ = RemoveSocket(address) + } + + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") } - return s } -// Serve the shim server -func (s *Client) Serve() error { +// serve serves the ttrpc API over a unix socket in the current working directory +// and blocks until the context is canceled +func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error { dump := make(chan os.Signal, 32) setupDumpStacks(dump) @@ -289,18 +458,19 @@ func (s *Client) Serve() error { if err != nil { return err } - server, err := newServer() - if err != nil { - return errors.Wrap(err, "failed creating server") - } - - logrus.Debug("registering ttrpc server") - shimapi.RegisterTaskService(server, s.service) - if err := serve(s.context, server, socketFlag); err != nil { + l, err := serveListener(socketFlag) + if err != nil { return err } - logger := logrus.WithFields(logrus.Fields{ + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, @@ -310,24 +480,9 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(s.context, logger, s.signals) -} -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(ctx context.Context, server *ttrpc.Server, path string) error { - l, err := serveListener(path) - if err != nil { - return err - } - go func() { - defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") - } - }() - return nil + go handleExitSignals(ctx, logger, shutdown) + return reap(ctx, logger, signals) } func dumpStacks(logger *logrus.Entry) { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go index 314b45cb548..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go index 4bc636280d0..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index a61b6420892..e2dab0931ea 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func serveListener(path string) (net.Listener, error) { path = "[inherited from parent]" } else { if len(path) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) } l, err = net.Listen("unix", path) } @@ -70,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { @@ -78,6 +79,8 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si case <-ctx.Done(): return ctx.Err() case s := <-signals: + // Exit signals are handled separately from this loop + // They get registered with this channel so that we can ignore such signals for short-running actions (e.g. `delete`) switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -89,6 +92,22 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si } } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { + ch := make(chan os.Signal, 32) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case s := <-ch: + logger.WithField("signal", s).Debugf("Caught exit signal") + cancel() + return + case <-ctx.Done(): + return + } + } +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifoDup2(ctx, "log", unix.O_WRONLY, 0700, int(os.Stderr.Fd())) } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 7339eb2a2e4..4b098ab1630 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package shim import ( "context" + "errors" "io" "net" "os" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -48,10 +46,13 @@ func serveListener(path string) (net.Listener, error) { return nil, errors.New("not supported") } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { return errors.New("not supported") } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 52bfaa9ce71..28ac9d1e799 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -19,26 +19,32 @@ package shim import ( "bytes" "context" + "errors" "fmt" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strings" - "sync" "time" "github.com/containerd/containerd/namespaces" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) -var runtimePaths sync.Map +type CommandConfig struct { + Runtime string + Address string + TTRPCAddress string + Path string + SchedCore bool + Args []string + Opts *types.Any +} // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -49,67 +55,23 @@ func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAdd } args := []string{ "-namespace", ns, - "-address", containerdAddress, + "-address", config.Address, "-publish-binary", self, } - args = append(args, cmdArgs...) - name := BinaryName(runtime) - if name == "" { - return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) - } - - var cmdPath string - cmdPathI, cmdPathFound := runtimePaths.Load(name) - if cmdPathFound { - cmdPath = cmdPathI.(string) - } else { - var lerr error - binaryPath := BinaryPath(runtime) - if _, serr := os.Stat(binaryPath); serr == nil { - cmdPath = binaryPath - } - - if cmdPath == "" { - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - // LookPath only finds current directory matches based on - // the callers current directory but the caller is not - // likely in the same directory as the containerd - // executables. Instead match the calling binaries path - // (containerd) and see if they are side by side. If so - // execute the shim found there. - testPath := filepath.Join(filepath.Dir(self), name) - if _, serr := os.Stat(testPath); serr == nil { - cmdPath = testPath - } - if cmdPath == "" { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) - } - } - } - } - } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } - if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { - // We didn't store cmdPath we loaded an already cached value. Use it. - cmdPath = cmdPathI.(string) - } - } - - cmd := exec.Command(cmdPath, args...) - cmd.Dir = path + args = append(args, config.Args...) + cmd := exec.CommandContext(ctx, config.Runtime, args...) + cmd.Dir = config.Path cmd.Env = append( os.Environ(), "GOMAXPROCS=2", - fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), ) + if config.SchedCore { + cmd.Env = append(cmd.Env, "SCHED_CORE=1") + } cmd.SysProcAttr = getSysProcAttr() - if opts != nil { - d, err := proto.Marshal(opts) + if config.Opts != nil { + d, err := proto.Marshal(config.Opts) if err != nil { return nil, err } @@ -196,7 +158,7 @@ func ReadAddress(path string) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index f956b0986bd..4e2309a8069 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -29,10 +30,9 @@ import ( "syscall" "time" + "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) const ( @@ -53,16 +53,16 @@ func AdjustOOMScore(pid int) error { parent := os.Getppid() score, err := sys.GetOOMScoreAdj(parent) if err != nil { - return errors.Wrap(err, "get parent OOM score") + return fmt.Errorf("get parent OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(pid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } -const socketRoot = "/run/containerd" +const socketRoot = defaults.DefaultStateDir // SocketAddress returns a socket address func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { @@ -76,7 +76,7 @@ func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { // AnonDialer returns a dialer for a socket func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // AnonReconnectDialer returns a dialer for an existing socket on reconnection @@ -90,19 +90,25 @@ func NewSocket(address string) (*net.UnixListener, error) { sock = socket(address) path = sock.path() ) - if !sock.isAbstract() { + + isAbstract := sock.isAbstract() + + if !isAbstract { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := os.Chmod(path, 0600); err != nil { - os.Remove(sock.path()) - l.Close() - return nil, err + + if !isAbstract { + if err := os.Chmod(path, 0600); err != nil { + os.Remove(sock.path()) + l.Close() + return nil, err + } } return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index 325c290043f..b9042841b1a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,13 +18,13 @@ package shim import ( "context" + "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) const shimBinaryFormat = "containerd-shim-%s-%s.exe" @@ -40,9 +40,9 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error c, err := winio.DialPipeContext(ctx, address) if os.IsNotExist(err) { - return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + return nil, fmt.Errorf("npipe not found on reconnect: %w", os.ErrNotExist) } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } else if err != nil { return nil, err } @@ -65,14 +65,14 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { if os.IsNotExist(err) { select { case <-serveTimer.C: - return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + return nil, fmt.Errorf("pipe not found before timeout: %w", os.ErrNotExist) default: // Wait 10ms for the shim to serve and try again. time.Sleep(10 * time.Millisecond) continue } } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } return nil, err } diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go index 28d6c2cabc6..73a57013ff1 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go index db3cf702f49..a71a9cd7e93 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -19,14 +20,14 @@ package sys import ( - "io/ioutil" + "os" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { - dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index d8329af9fbc..805a7a736fa 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index a9198ef399a..87ebacc2006 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,6 +17,7 @@ package sys import ( + "fmt" "os" "path/filepath" "regexp" @@ -29,7 +28,6 @@ import ( "unsafe" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -270,7 +268,7 @@ func ForceRemoveAll(path string) error { snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { if err := cleanupWCOWLayers(snapshotDir); err != nil { - return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) } } @@ -280,12 +278,22 @@ func ForceRemoveAll(path string) error { func cleanupWCOWLayers(root string) error { // See snapshots/windows/windows.go getSnapshotDir() var layerNums []int + var rmLayerNums []int if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if path != root && info.IsDir() { - if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { - layerNums = append(layerNums, layerNum) + name := filepath.Base(path) + if strings.HasPrefix(name, "rm-") { + layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) + if err != nil { + return err + } + rmLayerNums = append(rmLayerNums, layerNum) } else { - return err + layerNum, err := strconv.Atoi(name) + if err != nil { + return err + } + layerNums = append(layerNums, layerNum) } return filepath.SkipDir } @@ -295,8 +303,14 @@ func cleanupWCOWLayers(root string) error { return err } - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) + for _, rmLayerNum := range rmLayerNums { + if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { + return err + } + } + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) for _, layerNum := range layerNums { if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { return err @@ -311,19 +325,20 @@ func cleanupWCOWLayer(layerPath string) error { HomeDir: filepath.Dir(layerPath), } - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. + // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { - return errors.Wrapf(err, "failed to unprepare %s", layerPath) + if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { + return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) } } if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to deactivate %s", layerPath) + return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) } if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to destroy %s", layerPath) + return fmt.Errorf("failed to destroy %s: %w", layerPath, err) } return nil diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go deleted file mode 100644 index a21045529ae..00000000000 --- a/vendor/github.com/containerd/containerd/sys/mount_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// FMountat performs mount from the provided directory. -func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return errors.Wrap(errno, "failed to fork thread") - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") - } - - if status != 0 { - return errors.Wrap(status, "failed to mount") - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go index 82a347c6f72..bb2a3eafb4f 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ b/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -18,7 +18,6 @@ package sys import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -69,7 +68,7 @@ func SetOOMScore(pid, score int) error { // no oom score is set, or a sore is set to 0. func GetOOMScoreAdj(pid int) (int, error) { path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index f5d7e9786b8..fa0db5a10e0 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 0033178dfa7..6c4f13b9088 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,12 +20,14 @@ package reaper import ( - "os/exec" + "errors" + "fmt" "sync" + "syscall" "time" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -115,6 +118,38 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { return -1, ErrNoSuchProcess } +// WaitTimeout is used to skip the blocked command and kill the left process. +func (m *Monitor) WaitTimeout(c *exec.Cmd, ec chan runc.Exit, timeout time.Duration) (int, error) { + type exitStatusWrapper struct { + status int + err error + } + + // capacity can make sure that the following goroutine will not be + // blocked if there is no receiver when timeout. + waitCh := make(chan *exitStatusWrapper, 1) + go func() { + defer close(waitCh) + + status, err := m.Wait(c, ec) + waitCh <- &exitStatusWrapper{ + status: status, + err: err, + } + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + syscall.Kill(c.Process.Pid, syscall.SIGKILL) + return 0, fmt.Errorf("timeout %v for cmd(pid=%d): %s, %s", timeout, c.Process.Pid, c.Path, c.Args) + case res := <-waitCh: + return res.status, res.err + } +} + // Subscribe to process exit changes func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index b67cc1fa3a7..367e19cad84 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,11 @@ package sys import ( + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,7 +32,7 @@ import ( func CreateUnixSocket(path string) (net.Listener, error) { // BSDs have a 104 limit if len(path) > 104 { - return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) } if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go index 3ee7679b49b..1ae12bc5114 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go deleted file mode 100644 index 4f03cd6cb0f..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go deleted file mode 100644 index ec3b9df69aa..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go deleted file mode 100644 index 21a666dff86..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b8200307f3a..a92784ef819 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.5.8+unknown" + Version = "1.6.1+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 708b2668990..0da3efe4c21 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,9 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" + "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -38,7 +39,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { - rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -307,7 +333,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Import tar file. intar, err := importTar(in) if err != nil { - return nil, errors.Wrap(err, "failed to sort") + return nil, fmt.Errorf("failed to sort: %w", err) } // Sort the tar file respecting to the prioritized files list. @@ -318,7 +344,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri *missedPrioritized = append(*missedPrioritized, l) continue // allow not found } - return nil, errors.Wrap(err, "failed to sort tar entries") + return nil, fmt.Errorf("failed to sort tar entries: %w", err) } } if len(prioritized) == 0 { @@ -371,7 +397,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} pw, err := newCountReader(in) if err != nil { - return nil, errors.Wrap(err, "failed to make position watcher") + return nil, fmt.Errorf("failed to make position watcher: %w", err) } tr := tar.NewReader(pw) @@ -383,7 +409,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err == io.EOF { break } else { - return nil, errors.Wrap(err, "failed to parse tar file") + return nil, fmt.Errorf("failed to parse tar file, %w", err) } } switch cleanEntryName(h.Name) { @@ -420,7 +446,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error { _, okIn := in.get(name) _, okOut := out.get(name) if !okIn && !okOut { - return errors.Wrapf(errNotFound, "file: %q", name) + return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index e56319545e6..921e59ec6ef 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -27,10 +27,10 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -40,7 +40,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/vbatts/tar-split/archive/tar" ) @@ -107,7 +106,7 @@ type Telemetry struct { } // Open opens a stargz file for reading. -// The behaviour is configurable using options. +// The behavior is configurable using options. // // Note that each entry name is normalized as the path that is relative to root. func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { @@ -385,8 +384,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.Digest != "" { d, err := digest.Parse(e.Digest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse regular file digest %q", e.Digest) + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) } regDigestMap[e.Offset] = d } else { @@ -401,8 +399,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.ChunkDigest != "" { d, err := digest.Parse(e.ChunkDigest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse chunk digest %q", e.ChunkDigest) + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) } chunkDigestMap[e.Offset] = d } else { @@ -581,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -647,7 +644,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { } blobPayloadSize, _, _, err := c.ParseFooter(footer) if err != nil { - return nil, errors.Wrapf(err, "failed to parse footer") + return nil, fmt.Errorf("failed to parse footer: %w", err) } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -935,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 7330849cb89..591d7a62e11 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -34,7 +34,6 @@ import ( "strconv" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type gzipCompression struct { @@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t } tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } @@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } zr, err := gzip.NewReader(bytes.NewReader(p)) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() extra := zr.Header.Extra @@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 9224e456dde..8f27dfb3ea2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -28,9 +28,9 @@ import ( "compress/gzip" "crypto/sha256" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "os" "reflect" "sort" @@ -41,7 +41,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // TestingController is Compression with some helper methods necessary for testing. @@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -1062,18 +1061,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT fSize := controller.FooterSize() footer := make([]byte, fSize) if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { - return nil, 0, errors.Wrap(err, "error reading footer") + return nil, 0, fmt.Errorf("error reading footer: %w", err) } _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse footer") + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) } // Decode the TOC JSON tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { - return nil, 0, errors.Wrap(err, "failed to parse TOC") + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) } return decodedJTOC, tocOffset, nil } diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index e1649ba8e18..d28cc4a3ffd 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "reflect" "strings" @@ -15,8 +14,10 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -31,7 +32,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -63,8 +63,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource + dest private.ImageDestination + rawSource private.ImageSource reportWriter io.Writer progressOutput io.Writer progressInterval time.Duration @@ -80,13 +80,13 @@ type copier struct { // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool - ociEncryptLayers *[]int + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + ociEncryptLayers *[]int } const ( @@ -122,17 +122,23 @@ type ImageListSelection int // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination ReportWriter io.Writer SourceCtx *types.SystemContext DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. // The encryption options is derived from the construction of EncryptConfig object. // Note: During initial encryption process of a layer, the resultant digest is not known @@ -191,26 +197,28 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return nil, err } - reportWriter := ioutil.Discard + reportWriter := io.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter } - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) } + dest := imagedestination.FromPublic(publicDest) defer func() { if err := dest.Close(); err != nil { retErr = errors.Wrapf(retErr, " (dest: %v)", err) } }() - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) } + rawSource := imagesource.FromPublic(publicRawSource) defer func() { if err := rawSource.Close(); err != nil { retErr = errors.Wrapf(retErr, " (src: %v)", err) @@ -222,7 +230,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { - progressOutput = ioutil.Discard + progressOutput = io.Discard } c := &copier{ @@ -410,7 +418,36 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) } } - canModifyManifestList := (len(sigs) == 0) + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyOneImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } // Determine if we'll need to convert the manifest list to a different format. forceListMIMEType := options.ForceManifestMIMEType @@ -425,8 +462,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "determining manifest list type to write to destination") } if selectedListType != originalList.MIMEType() { - if !canModifyManifestList { - return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) } } @@ -510,8 +547,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // If we can't just use the original value, but we have to change it, flag an error. if !bytes.Equal(attemptedManifestList, originalManifestList) { - if !canModifyManifestList { - return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) } logrus.Debugf("Manifest list has been updated") } else { @@ -536,7 +573,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Sign the manifest list. if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy) + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, err } @@ -629,13 +666,27 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } } + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + ic := imageCopier{ c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - ociEncryptLayers: options.OciEncryptLayers, + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, } // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: @@ -643,7 +694,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -660,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - // If encrypted and decryption keys provided, we should try to decrypt - ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { @@ -710,10 +759,10 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -744,7 +793,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, "", "", err } @@ -813,9 +862,9 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) } ic.manifestUpdates.EmbeddedDockerReference = destRef return nil @@ -833,7 +882,7 @@ func isTTY(w io.Writer) bool { return false } -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) @@ -844,8 +893,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfosUpdated := false // If we only need to check authorization, no updates required. if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -975,8 +1024,8 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src if !ic.noPendingManifestUpdates() { - if !ic.canModifyManifest { - return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + if ic.cannotModifyManifestReason != "" { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -1011,96 +1060,12 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrapf(err, "writing manifest %q", string(man)) + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", errors.Wrapf(err, "writing manifest") } return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress. -// The caller must eventually call pool.Wait() after the pool will no longer be updated. -// NOTE: Every progress bar created within the progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. -func (c *copier) newProgressPool() *mpb.Progress { - return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) -} - -// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar -func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator { - producer := func(filler interface{}) decor.DecorFunc { - return func(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) - } - } - return decor.Any(producer(filler), wcc...) -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -// NOTE: Every progress bar created within a progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // onComplete will replace prefix once the bar/spinner has completed - onComplete = prefix + " " + onComplete - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft() - if info.Size > 0 { - if partial { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - customPartialBlobCounter(sstyle.Build()), - ), - ) - } else { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) - } - } else { - bar = pool.Add(0, - sstyle.Build(), - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() @@ -1111,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { } defer c.concurrentBlobCopiesSemaphore.Release(1) - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) - } - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool := c.newProgressPool() defer progressPool.Wait() bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) + } + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) + + bar.mark100PercentComplete() return destInfo, nil }() if err != nil { @@ -1171,48 +1137,37 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - // Diffs are needed if we are encrypting an image or trying to decrypt an image - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - var ( - blobInfo types.BlobInfo - reused bool - err error - ) - // Note: the storage destination optimizes the committing of - // layers which requires passing the index of the layer. - // Hence, we need to special case and cast. - dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, - SrcRef: srcRef, - EmptyLayer: emptyLayer, - } - options.LayerIndex = &layerIndex - reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) - } else { - reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - } - + reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + }) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) - bar.SetTotal(0, true) + bar.mark100PercentComplete() }() // Throw an event that the layer has been skipped @@ -1243,9 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) - imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) - if okSource && okDest && !diffIDIsNeeded { + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1253,33 +1206,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar.Abort(hideProgressBar) }() - progress := make(chan int64) - terminate := make(chan interface{}) - - defer close(terminate) - defer close(progress) - - proxy := imageSourceSeekableProxy{ - source: imgSource, - progress: progress, + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, } - go func() { - for { - select { - case written := <-progress: - bar.IncrInt64(written) - case <-terminate: - return - } - } - }() - - bar.SetTotal(srcInfo.Size, false) - info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, info @@ -1292,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") defer bar.Abort(false) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) if err != nil { return types.BlobInfo{}, "", err @@ -1317,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } diffID = diffIDResult.digest } } - bar.SetTotal(srcInfo.Size, true) + bar.mark100PercentComplete() return blobInfo, diffID, nil }() } @@ -1334,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1359,7 +1303,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1411,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { if isConfig { // This is guaranteed by the caller, but set it here to be explicit. canModifyBlob = false } @@ -1431,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } var destStream io.Reader = digestingReader + // === Update progress bars + destStream = bar.ProxyReader(destStream) + // === Decrypt the stream, if required. var decrypted bool if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { @@ -1465,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) } - // === Update progress bars - destStream = bar.ProxyReader(destStream) - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { @@ -1615,24 +1559,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - var uploadedInfo types.BlobInfo - // Note: the storage destination optimizes the committing of layers - // which requires passing the index of the layer. Hence, we need to - // special case and cast. - dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - } else { - uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + options := private.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "writing blob") } @@ -1664,7 +1599,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // sent there if we are not already at EOF. if getOriginalLayerCopyWriter != nil { logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) + _, err := io.Copy(io.Discard, originalLayerReader) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) } @@ -1677,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encrypted && !decrypted { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } } if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index b97edbf08b7..86ec8863aea 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -79,10 +79,10 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller + if ic.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000000..585d860570d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,148 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go similarity index 68% rename from vendor/github.com/containers/image/v5/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_channel.go index 42f490d326a..d5e9e09bda9 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -1,15 +1,13 @@ package copy import ( - "context" "io" "time" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/types" ) -// progressReader is a reader that reports its progress on an interval. +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. type progressReader struct { source io.Reader channel chan<- types.ProgressProperties @@ -79,26 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) { } return n, err } - -// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes -// are received. -type imageSourceSeekableProxy struct { - // source is the seekable input to read from. - source internalTypes.ImageSourceSeekable - // progress is the chan where the total number of bytes read so far are reported. - progress chan int64 -} - -// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received -// to the progress chan. -func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks) - if err == nil { - total := int64(0) - for _, c := range chunks { - total += int64(c.Length) - } - s.progress <- total - } - return rc, errs, err -} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 61612a4d3d7..aa42674bcdc 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -1,13 +1,14 @@ package copy import ( + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/pkg/errors" ) // createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "initializing GPG") @@ -17,13 +18,19 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } } c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) if err != nil { return nil, errors.Wrap(err, "creating signature") } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index ea20e7c5e41..3b135e68e50 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) } if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) + contents, err := os.ReadFile(d.ref.versionPath()) if err != nil { return nil, err } @@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag } } // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644) if err != nil { return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) } @@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { - return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) } // PutSignatures writes a set of signatures to the destination. @@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { return err } } @@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) { // returns true if directory is empty func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return false, err } @@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) { // deletes the contents of a directory func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index ad9129d4012..8b509112aa8 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "github.com/containers/image/v5/manifest" @@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error { // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) if err != nil { return nil, "", err } @@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { signatures := [][]byte{} for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest)) if err != nil { if os.IsNotExist(err) { break diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 3fe9a11d003..f537de72da3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,13 +1,11 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -463,7 +461,11 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) } @@ -500,7 +502,7 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat // makeRequest should generally be preferred. // In case of an HTTP 429 status code in the response, it may automatically retry a few times. // TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { delay := backoffInitialDelay attempts := 0 for { @@ -518,7 +520,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url if delay > backoffMaxDelay { delay = backoffMaxDelay } - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds()) + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds()) select { case <-ctx.Done(): return nil, ctx.Err() @@ -533,12 +535,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url, stream) +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, url.String(), stream) if err != nil { return nil, err } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") @@ -553,7 +555,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, return nil, err } } - logrus.Debugf("%s %s", method, url) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.client.Do(req) if err != nil { return nil, err @@ -650,10 +652,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("refresh_token", c.auth.IdentityToken) params.Add("client_id", "containers/image") - authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err @@ -705,13 +707,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, } authReq.Header.Add("User-Agent", c.userAgent) - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bear token"); err != nil { + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { return nil, err } tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) @@ -735,14 +737,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { c.client = &http.Client{Transport: tr} ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + if err != nil { + return err + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return err } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return httpResponseToError(resp, "") } @@ -762,14 +767,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + if err != nil { + return false + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return false } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 56c39c141d7..d02100cf80b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -182,7 +181,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // returns, so there isn’t a way for the error text to be provided to any of our callers. defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) if err != nil { logrus.Debugf("Error uploading layer chunked %v", err) return nil, err @@ -207,7 +206,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, locationQuery := uploadLocation.Query() locationQuery.Set("digest", blobDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } @@ -257,9 +256,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc "from": {reference.Path(srcRepo)}, }.Encode(), } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope) + logrus.Debugf("Trying to mount %s", u.Redacted()) + res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) if err != nil { return err } @@ -276,8 +274,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc if err != nil { return errors.Wrap(err, "determining upload URL after a mount attempt") } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) if err != nil { logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) } else { @@ -464,6 +462,18 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst } return err } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed” uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } return nil } @@ -581,16 +591,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) if err != nil { return err } - err = ioutil.WriteFile(url.Path, signature, 0644) + err = os.WriteFile(url.Path, signature, 0644) if err != nil { return err } return nil case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted()) } } @@ -608,9 +618,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error return false, err case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted()) } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 314e9b394ee..085a3afcc5c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -16,7 +15,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" @@ -147,6 +146,11 @@ func (s *dockerImageSource) Close() error { return nil } +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *dockerImageSource) SupportsGetBlobAt() bool { + return true +} + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() // to read the image's layers. @@ -248,13 +252,14 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } for _, u := range urls { - if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + url, err := url.Parse(u) + if err != nil || (url.Scheme != "http" && url.Scheme != "https") { continue // unsupported url. skip this url. } // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil) + resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err == nil { if resp.StatusCode != http.StatusOK { err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) @@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { } // splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { defer close(streams) defer close(errs) currentOffset := uint64(0) @@ -302,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, break } toSkip := c.Offset - currentOffset - if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { errs <- err break } @@ -310,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } s := signalCloseReader{ closed: make(chan interface{}), - stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } streams <- s @@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } // handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { defer close(streams) defer close(errs) if !strings.HasPrefix(mediaType, "multipart/") { @@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } } -// GetBlobAt returns a stream for the specified blob. +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. // The specified chunks must be not overlapping and sorted by their offset. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) var rangeVals []string @@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, return streams, errs, nil case http.StatusBadRequest: res.Body.Close() - return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status} + return nil, nil, private.BadPartialRequestError{Status: res.Status} default: err := httpResponseToError(res, "Error fetching partial blob") if err == nil { @@ -506,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) + sig, err := os.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil @@ -516,7 +524,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil case "http", "https": - logrus.Debugf("GET %s", url) + logrus.Debugf("GET %s", url.Redacted()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { return nil, false, err @@ -529,7 +537,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) } sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) if err != nil { @@ -538,7 +546,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted()) } } @@ -602,8 +610,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" @@ -621,11 +632,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - for i := 0; ; i++ { url := signatureStorageURL(c.signatureBase, manifestDigest, i) missing, err := c.deleteOneSignature(url) @@ -756,7 +762,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) { func (s signalCloseReader) Close() error { defer close(s.closed) if s.consumeStream { - if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + if _, err := io.Copy(io.Discard, s.stream); err != nil { s.stream.Close() return err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 6164ceb66ed..c77c002d15e 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -4,7 +4,6 @@ import ( "archive/tar" "encoding/json" "io" - "io/ioutil" "os" "path" @@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") if err != nil { return nil, errors.Wrap(err, "creating temporary file") } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go index b8d84d2452a..8e9be17c189 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path" "sync" @@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif uncompressedSize := h.Size if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) if err != nil { return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) } @@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B } if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 515e59327d3..d0a3f1be069 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -2,7 +2,6 @@ package docker import ( "fmt" - "io/ioutil" "net/url" "os" "path" @@ -82,7 +81,7 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, } else { // returns default directory if no sigstore specified in configuration file url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted()) } // NOTE: Keep this in sync with docs/signature-protocols.md! // FIXME? Restrict to explicitly supported schemes? @@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { continue } configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) + configBytes, err := os.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 00000000000..7b15871f7b6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 00000000000..82734a6cdc3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,69 @@ +package imagedestination + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ImageDestination: dest} +} + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + types.ImageDestination +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (w *wrapped) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name()) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go new file mode 100644 index 00000000000..fe1be8d9eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -0,0 +1,47 @@ +package imagesource + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(src) returns an object that provides the private.ImageSource API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageSource, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageSource. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(src types.ImageSource) private.ImageSource { + if src2, ok := src.(private.ImageSource); ok { + return src2 + } + return &wrapped{ImageSource: src} +} + +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + types.ImageSource +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (w *wrapped) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name()) +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go index 3fed1995cb8..49fa410e91f 100644 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -2,7 +2,6 @@ package iolimits import ( "io" - "io/ioutil" "github.com/pkg/errors" ) @@ -47,7 +46,7 @@ const ( func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { limitedReader := io.LimitReader(reader, int64(limit+1)) - res, err := ioutil.ReadAll(limitedReader) + res, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go deleted file mode 100644 index bf6cc87d421..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} - -// Describe returns a string describing the attributes of a specified key -func (k *Key) Describe() (string, error) { - keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) - if err != nil { - return "", err - } - return keyAttr, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go deleted file mode 100644 index 5eaad615c7c..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -package keyctl - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it -func ReadUserKeyring() ([]*Key, error) { - var ( - b []byte - err error - sizeRead int - ) - krSize := 4 - size := krSize - b = make([]byte, size) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - krSize = sizeRead - } - } - keyIDs := getKeyIDsFromByte(b[:krSize]) - return keyIDs, err -} - -func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { - idSize := 4 - var keys []*Key - for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { - tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) - keys = append(keys, &Key{id: keyID(tempID)}) - } - return keys -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go deleted file mode 100644 index 5f4d2157ae9..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go deleted file mode 100644 index f61666e42c2..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go new file mode 100644 index 00000000000..65788651fb5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -0,0 +1,106 @@ +package private + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + + // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. + SupportsGetBlobAt() bool + // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). + BlobChunkAccessor +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + + // SupportsPutBlobPartial returns true if PutBlobPartial is supported. + SupportsPutBlobPartial() bool + + // PutBlobWithOptions writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + + // PutBlobPartial attempts to create a blob using the data that is already present + // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. + // It is available only if SupportsPutBlobPartial(). + // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller + // should fall back to PutBlobWithOptions. + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) + + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + Cache types.BlobInfoCache // Cache to use and/or update. + // If true, it is allowed to use an equivalent of the desired blob; + // in that case the returned info may not match the input. + CanSubstitute bool + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// BlobChunkAccessor allows fetching discontiguous chunks of a blob. +type BlobChunkAccessor interface { + // GetBlobAt returns a sequential channel of readers that contain data for the requested + // blob chunks, and a channel that might get a single error value. + // The specified chunks must be not overlapping and sorted by their offset. + // The readers must be fully consumed, in the order they are returned, before blocking + // to read the next chunk. + GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 306220585b6..84bb656ac71 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -3,7 +3,6 @@ package streamdigest import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/image/v5/internal/putblobdigest" @@ -16,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go deleted file mode 100644 index 388f8cf3b49..00000000000 --- a/vendor/github.com/containers/image/v5/internal/types/types.go +++ /dev/null @@ -1,91 +0,0 @@ -package types - -import ( - "context" - "io" - - "github.com/containers/image/v5/docker/reference" - publicTypes "github.com/containers/image/v5/types" -) - -// ImageDestinationWithOptions is an internal extension to the ImageDestination -// interface. -type ImageDestinationWithOptions interface { - publicTypes.ImageDestination - - // PutBlobWithOptions is a wrapper around PutBlob. If - // options.LayerIndex is set, the blob will be committed directly. - // Either by the calling goroutine or by another goroutine already - // committing layers. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) - - // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If - // options.LayerIndex is set, the reused blob will be recoreded as - // already pulled. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Denotes whether the blob is a config or not. - IsConfig bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Use an equivalent of the desired blob. - CanSubstitute bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int - // The reference of the image that contains the target blob. - SrcRef reference.Named -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - Offset uint64 - Length uint64 -} - -// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceSeekable interface { - // GetBlobAt returns a stream for the specified blob. - // The specified chunks must be not overlapping and sorted by their offset. - GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. -// This API is experimental and can be changed without bumping the major version number. -type ImageDestinationPartial interface { - // PutBlobPartial writes contents of stream and returns data representing the result. - PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error) -} - -// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 511cdcc37e5..20955ab7ff4 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -51,7 +51,7 @@ const ( // other than the ones the caller specifically allows. // expectedMIMEType is used only for diagnostics. // NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous // data that just isn’t what was expected, as opposed to actually ambiguous data. func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, allowed allowedManifestFields) error { @@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, Manifests interface{} `json:"manifests"` }{} if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this shold not happen; + // The caller was supposed to already validate version numbers, so this should not happen; // let’s not bother with making this error “nice”. return err } diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 4b644f2539a..2e3e5da15ea 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -110,7 +110,8 @@ func GuessMIMEType(manifest []byte) string { } switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. return meta.MediaType } // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. @@ -121,9 +122,9 @@ func GuessMIMEType(manifest []byte) string { } return DockerV2Schema1MediaType case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. ociMan := struct { Config struct { MediaType string `json:"mediaType"` diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index c4616b96500..5892184df19 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -66,6 +66,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript return &OCI1{ imgspecv1.Manifest{ Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, Config: config, Layers: layers, }, diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 5bec43ff998..c4f11e09c56 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -119,6 +119,7 @@ func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[ index := OCI1Index{ imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: make([]imgspecv1.Descriptor, len(components)), Annotations: dupStringStringMap(annotations), }, @@ -195,6 +196,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { index := OCI1Index{ Index: imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: []imgspecv1.Descriptor{}, Annotations: make(map[string]string), }, diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 54d325d34df..4fa9127659b 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -3,7 +3,6 @@ package archive import ( "context" "fmt" - "io/ioutil" "os" "strings" @@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index d0ee7263527..77e8fd87637 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -112,7 +111,7 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false + return true } // PutBlob writes contents of stream and returns data representing the result. @@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc if err := ensureParentDirectoryExists(blobPath); err != nil { return err } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + if err := os.WriteFile(blobPath, m, 0644); err != nil { return err } @@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][] // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) if err != nil { return err } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) } func ensureDirectoryExists(path string) error { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 9d8ab689ba4..8973f461c9a 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -3,7 +3,6 @@ package layout import ( "context" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return nil, "", err } - m, err := ioutil.ReadFile(manifestPath) + m, err := os.ReadFile(manifestPath) if err != nil { return nil, "", err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 4ffbced6bd6..a6473ae68f4 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // LoadFromFile takes a filename and deserializes the contents into Config object func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c7c6cf6945a..67612d800c2 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -95,7 +95,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re req.Header.Set("Content-Type", "application/json") } - logrus.Debugf("%s %s", method, url.String()) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 3eb2a2cba22..011118fa52b 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err } @@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) + entries, err := os.ReadDir(dir) if err != nil { return err } - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if err := os.Remove(fullpath); err != nil { return err } continue } + info, err := entry.Info() + if err != nil { + return err + } if selinuxHnd != nil { relPath, err := filepath.Rel(root, fullpath) if err != nil { @@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user } } - if info.IsDir() { + if entry.IsDir() { if usermode { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { return err @@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user if err != nil { return err } - } else if usermode && (info.Mode().IsRegular()) { + } else if usermode && (entry.Type().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } @@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [ } d.digest = digest - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) + return os.WriteFile(manifestPath, manifestBlob, 0644) } // PutSignatures writes signatures to the destination. @@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [ for i, sig := range signatures { signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + if err := os.WriteFile(signaturePath, sig, 0644); err != nil { return err } } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index d30c764a630..1e1f2be03cd 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -9,7 +9,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "strconv" "strings" "unsafe" @@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d } defer sigReader.Close() - sig, err := ioutil.ReadAll(sigReader) + sig, err := os.ReadAll(sigReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index c28e8179296..34c90dd77e4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -5,7 +5,6 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/types" @@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil + return io.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. @@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. @@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { return nil, false, errors.Wrapf(err, "initializing decompression") } } else { - res = ioutil.NopCloser(stream) + res = io.NopCloser(stream) } return res, decompressor != nil, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 63f5bd53e1b..d0bdd08e9a7 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/hashicorp/go-multierror" @@ -54,8 +54,8 @@ var ( // SetCredentials stores the username and password in a location // appropriate for sys and the users’ configuration. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. // Returns a human-redable description of the location that was updated. // NOTE: The return value is only intended to be read by humans; its form is not an API, // it may change (or new forms can be added) any time. @@ -128,11 +128,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allRegistries := make(map[string]bool) - addRegistry := func(s string) { - allRegistries[s] = true + allKeys := make(map[string]bool) + addKey := func(s string) { + allKeys[s] = true } + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + helpers, err := sysregistriesv2.CredentialHelpers(sys) if err != nil { return nil, err @@ -151,10 +155,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addRegistry(registry) + addKey(registry) } - for registry := range auths.AuthConfigs { - addRegistry(registry) + for key := range auths.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + addKey(key) } } // External helpers. @@ -166,7 +174,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon switch errors.Cause(err) { case nil: for registry := range creds { - addRegistry(registry) + addKey(registry) } case exec.ErrNotFound: // It's okay if the helper doesn't exist. @@ -179,17 +187,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for registry := range allRegistries { - authConf, err := GetCredentials(sys, registry) + for key := range allKeys { + authConf, err := GetCredentials(sys, key) if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - // Ignore if the credentials could not be found (anymore). - continue - } // Note: we rely on the logging in `GetCredentials`. return nil, err } - authConfigs[registry] = authConf + if authConf != (types.DockerAuthConfig{}) { + authConfigs[key] = authConf + } } return authConfigs, nil @@ -230,16 +236,14 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { return paths } -// GetCredentials returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json -// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. // If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // -// GetCredentialsForRef should almost always be used in favor of this API to -// allow different credentials for different repositories on the same registry. -func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get()) +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) } // GetCredentialsForRef returns the registry credentials necessary for @@ -247,35 +251,39 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // appropriate for sys and the users’ configuration. // If an entry is not found, an empty struct is returned. func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get()) + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) } // getCredentialsWithHomeDir is an internal implementation detail of // GetCredentialsForRef and GetCredentials. It exists only to allow testing it // with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) { - // consistency check of the ref and registry arguments - if ref != nil && reference.Domain(ref) != registry { - return types.DockerAuthConfig{}, errors.Errorf( - "internal error: provided reference domain %q name does not match registry %q", - reference.Domain(ref), registry, - ) +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err } if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry) + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) return *sys.DockerAuthConfig, nil } + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat) + authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) if err != nil { return types.DockerAuthConfig{}, "", err } - if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + if authConfig != (types.DockerAuthConfig{}) { return authConfig, path.path, nil } } @@ -291,57 +299,61 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re for _, helper := range helpers { var ( creds types.DockerAuthConfig + helperKey string credHelperPath string err error ) switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: + helperKey = key creds, credHelperPath, err = getCredentialsFromAuthFiles() // External helpers. default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry creds, err = getAuthFromCredHelper(helper, registry) } if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err) + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) multiErr = multierror.Append(multiErr, err) continue } - if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { - continue - } - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil } - logrus.Debug(msg) - return creds, nil } if multiErr != nil { return types.DockerAuthConfig{}, multiErr } - logrus.Debugf("No credentials for %s found", registry) + logrus.Debugf("No credentials for %s found", key) return types.DockerAuthConfig{}, nil } -// GetAuthentication returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json file or -// .docker/config.json +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // // Deprecated: This API only has support for username and password. To get the // support for oauth2 in container registry authentication, we added the new -// GetCredentials API. The new API should be used and this API is kept to +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) } // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir) +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } @@ -353,8 +365,8 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st // RemoveAuthentication removes credentials for `key` from all possible // sources such as credential helpers and auth files. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. func RemoveAuthentication(sys *types.SystemContext, key string) error { isNamespaced, err := validateKey(key) if err != nil { @@ -531,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -593,7 +605,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", errors.Wrapf(err, "marshaling JSON %q", path) } - if err = ioutil.WriteFile(path, newData, 0600); err != nil { + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { return "", errors.Wrapf(err, "writing to file %q", path) } } @@ -606,6 +618,10 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } return types.DockerAuthConfig{}, err } @@ -639,26 +655,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { return helperclient.Erase(p, registry) } -// findAuthentication looks for auth of registry in path. If ref is -// not nil, then it will be taken into account when looking up the -// authentication credentials. -func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) } // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. if ch, exists := auths.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) return getAuthFromCredHelper(ch, registry) } - // Support for different paths in auth. + // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) var keys []string - if !legacyFormat && ref != nil { - keys = authKeysForRef(ref) + if !legacyFormat { + keys = authKeysForKey(key) } else { keys = []string{registry} } @@ -686,26 +704,28 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat } } + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path) return types.DockerAuthConfig{}, nil } -// authKeysForRef returns the valid paths for a provided reference. For example, -// when given a reference "quay.io/repo/ns/image:tag", then it would return +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForRef(ref reference.Named) (res []string) { - name := ref.Name() - +func authKeysForKey(key string) (res []string) { for { - res = append(res, name) + res = append(res, key) - lastSlash := strings.LastIndex(name, "/") + lastSlash := strings.LastIndex(key, "/") if lastSlash == -1 { break } - name = name[:lastSlash] + key = key[:lastSlash] } return res @@ -759,11 +779,24 @@ func normalizeRegistry(registry string) string { // validateKey verifies that the input key does not have a prefix that is not // allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (isNamespaced bool, err error) { +func validateKey(key string) (bool, error) { if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key) + return false, errors.Errorf("key %s contains http[s]:// prefix", key) } + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } // check if the provided key contains one or more subpaths. - return strings.ContainsRune(key, '/'), nil + return isNamespaced, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go deleted file mode 100644 index 0bf16125919..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,119 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/v5/internal/pkg/keyctl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// NOTE: none of the functions here are currently used. If we ever want to -// re-enable keyring support, we should introduce a similar built-in credential -// helpers as for `sysregistriesv2.AuthenticationFileHelper`. - -const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - keys, err := keyctl.ReadUserKeyring() - if err != nil { - return err - } - - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return err - } - - for _, k := range keys { - keyAttr, err := k.Describe() - if err != nil { - return err - } - // split string "type;uid;gid;perm;description" - keyAttrs := strings.SplitN(keyAttr, ";", 5) - if len(keyAttrs) < 5 { - return errors.Errorf("Key attributes of %d are not available", k.ID()) - } - keyDescribe := keyAttrs[4] - if strings.HasPrefix(keyDescribe, keyDescribePrefix) { - err := keyctl.Unlink(userkeyring, k) - if err != nil { - return errors.Wrapf(err, "unlinking key %d", k.ID()) - } - logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) - } - } - return nil -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { //nolint:deadcode,unused - return fmt.Sprintf("%s%s", keyDescribePrefix, registry) -} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go deleted file mode 100644 index d9827d8edbc..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !linux && (!386 || !amd64) -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index fb0a15b993e..46c10ff631a 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -118,6 +118,7 @@ type Resolved struct { } func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) } @@ -138,6 +139,8 @@ const ( rationaleUSR // Resolved value has been selected by the user (via the prompt). rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub ) // Description returns a human-readable description about the resolution @@ -152,6 +155,8 @@ func (r *Resolved) Description() string { return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -265,8 +270,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } if !isShort { // no short name - named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" return resolved, nil } @@ -295,9 +312,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - resolved.addCandidate(namedAlias) resolved.rationale = rationaleAlias resolved.originDescription = aliasOriginDescription @@ -325,9 +339,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { if err != nil { return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - resolved.addCandidate(named) } @@ -412,6 +423,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e var candidates []reference.Named + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + // Strip off the tag to normalize the short name for looking it up in // the config files. isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) @@ -434,9 +462,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed candidates = append(candidates, namedAlias) } @@ -447,16 +473,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e } // Note that "localhost" has precedence over the unqualified-search registries. - for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) { - named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) - if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) - } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - - candidates = append(candidates, named) - } - - return candidates, nil + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7122e869fe6..6909ea0a60f 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -13,6 +13,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -315,11 +316,14 @@ func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAlia func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { conf := shortNameAliasConf{} - _, err := toml.DecodeFile(confPath, &conf) + meta, err := toml.DecodeFile(confPath, &conf) if err != nil && !os.IsNotExist(err) { // It's okay if the config doesn't exist. Other errors are not. return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) + } // Even if we don’t always need the cache, doing so validates the machine-generated config. The // file could still be corrupted by another process or user. diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index c8a603c4ef0..c1753c8457a 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "io/fs" "os" "path/filepath" "reflect" @@ -43,6 +44,16 @@ const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" // helper. const AuthenticationFileHelper = "containers-auth.json" +const ( + // configuration values for "pull-from-mirror" + // mirrors will be used for both digest pulls and tag pulls + MirrorAll = "all" + // mirrors will only be used for digest pulls + MirrorByDigestOnly = "digest-only" + // mirrors will only be used for tag pulls + MirrorByTagOnly = "tag-only" +) + // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. Can be empty iff Prefix contains @@ -53,6 +64,18 @@ type Endpoint struct { // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. Insecure bool `toml:"insecure,omitempty"` + // PullFromMirror is used for adding restrictions to image pull through the mirror. + // Set to "all", "digest-only", or "tag-only". + // If "digest-only", mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror + // that it is less likely to be out of sync if tags move, it should not be unnecessarily + // used for digest references. + // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. + // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. + // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. + PullFromMirror string `toml:"pull-from-mirror,omitempty"` } // userRegistriesFile is the path to the per user registry configuration file. @@ -115,7 +138,7 @@ type Registry struct { Blocked bool `toml:"blocked,omitempty"` // If true, mirrors will only be used for digest pulls. Pulling images by // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // we pull from. Restricting mirrors to pulls by digest avoids that issue. MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` } @@ -130,17 +153,29 @@ type PullSource struct { // reference. func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { var endpoints []Endpoint - + _, isDigested := ref.(reference.Canonical) if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} + // Only use mirrors when the reference is a digested one. + if isDigested { + endpoints = append(endpoints, r.Mirrors...) } } else { - endpoints = append(r.Mirrors, r.Endpoint) + for _, mirror := range r.Mirrors { + // skip the mirror if per mirror setting exists but reference does not match the restriction + switch mirror.PullFromMirror { + case MirrorByDigestOnly: + if !isDigested { + continue + } + case MirrorByTagOnly: + if isDigested { + continue + } + } + endpoints = append(endpoints, mirror) + } } + endpoints = append(endpoints, r.Endpoint) sources := []PullSource{} for _, ep := range endpoints { @@ -374,6 +409,10 @@ func (config *V2RegistriesConf) postProcessRegistries() error { } } + // validate the mirror usage settings does not apply to primary registry + if reg.PullFromMirror != "" { + return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) + } // make sure mirrors are valid for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) @@ -387,6 +426,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error { if mir.Location == "" { return &InvalidRegistries{s: "invalid condition: mirror location is unset"} } + + if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { + return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} + } + if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && + mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { + return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} + } } if reg.Location == "" { regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) @@ -597,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { dirPaths = append(dirPaths, wrapper.userConfigDirPath) } for _, dirPath := range dirPaths { - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: + case d == nil: // this should only happen when err != nil but let's be sure return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -877,10 +924,13 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. var combinedTOML tomlConfig - _, err := toml.DecodeFile(path, &combinedTOML) + meta, err := toml.DecodeFile(path, &combinedTOML) if err != nil { return nil, err } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, path) + } if combinedTOML.V1RegistriesConf.Nonempty() { // Enforce the v2 format if requested. diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 7e2142b1f58..c766417d0ec 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,7 +2,6 @@ package tlsclientconfig import ( "crypto/tls" - "io/ioutil" "net" "net/http" "os" @@ -19,7 +18,7 @@ import ( // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) + fs, err := os.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil @@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) + data, err := os.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? @@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { return nil } -func hasFile(files []os.FileInfo, name string) bool { +func hasFile(files []os.DirEntry, name string) bool { for _, f := range files { if f.Name() == name { return true diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..70758ad4399 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,210 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ccf1259660e --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,216 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index 07fdd42a9af..8e9ce0dd236 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -3,22 +3,46 @@ package signature import ( + "errors" "fmt" + "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/opencontainers/go-digest" ) +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + // SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { manifestDigest, err := manifest.Digest(m) if err != nil { return nil, err } sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) } // VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 2c08c231ea8..249b5a1fe46 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -6,16 +6,19 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "strings" - "golang.org/x/crypto/openpgp" + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // SigningMechanism abstracts a way to sign binary blobs and verify their signatures. // Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) type SigningMechanism interface { // Close removes resources associated with the mechanism, if any. Close() error @@ -34,6 +37,15 @@ type SigningMechanism interface { UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) } +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + // SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. type SigningNotSupportedError string @@ -70,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents if !md.IsSigned { return nil, "", errors.New("The input is not a signature") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: An error during reading the body can happen only if // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index 6ae74d430d6..4c7968417ed 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -5,11 +5,11 @@ package signature import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" - "github.com/mtrmac/gpgme" + "github.com/proglottis/gpgme" ) // A GPG/OpenPGP signing mechanism, implemented using gpgme. @@ -20,7 +20,7 @@ type gpgmeSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { ctx, err := newGPGMEContext(optionalDir) if err != nil { return nil, err @@ -35,8 +35,8 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err } @@ -117,9 +117,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error { return nil } -// Sign creates a (non-detached) signature of input using keyIdentity. +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { key, err := m.ctx.GetKey(keyIdentity, true) if err != nil { return nil, err @@ -133,12 +133,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, if err != nil { return nil, err } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { return nil, err } return sigBuffer.Bytes(), nil } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { signedBuffer := bytes.Buffer{} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 0a09788f989..63cb7788bbb 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -7,14 +7,21 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "strings" "time" "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. @@ -24,7 +31,7 @@ type openpgpSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -37,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er } } - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -55,7 +62,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -104,10 +111,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error { // Sign creates a (non-detached) signature of input using keyIdentity. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) @@ -117,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if !md.IsSigned { return nil, "", errors.New("not signed") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // (and possibly also signed, but it _must_ be encrypted) and the signing diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 82fbb68cb14..bb91cae8c16 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -16,7 +16,6 @@ package signature import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri // NewPolicyFromFile returns a policy configured in the specified file. func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) + contents, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 26cca4759e0..65e8259732b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -5,7 +5,7 @@ package signature import ( "context" "fmt" - "io/ioutil" + "os" "strings" "github.com/containers/image/v5/manifest" @@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types if pr.KeyData != nil { data = pr.KeyData } else { - d, err := ioutil.ReadFile(pr.KeyPath) + d, err := os.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go index 09f4f85e0a8..05bf8229e7d 100644 --- a/vendor/github.com/containers/image/v5/signature/signature.go +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { // of the system just because it is a private key — actually the presence of a private key // on the system increases the likelihood of an a successful attack on that private key // on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { json, err := json.Marshal(s) if err != nil { return nil, err } + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + return mech.Sign(json, keyIdentity) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 7329ef6eee0..8071e3b32fa 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -10,7 +10,6 @@ import ( stderrors "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -18,9 +17,9 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" @@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool { // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } // NOTE: the blob is first written to a temporary file and subsequently @@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } defer rc.Close() - tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") if err != nil { return nil, 0, err } @@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC } r := bytes.NewReader(b) logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil + return io.NopCloser(r), int64(r.Len()), "", nil } // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could @@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest * // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") if err != nil { return nil, errors.Wrapf(err, "creating a temporary directory") } @@ -446,15 +445,20 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } -// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is -// set, the blob will be committed directly. Either by the calling goroutine -// or by another goroutine already committing layers. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with non "WithOptions" functions is not -// supported. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig) +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) if err != nil { return info, err } @@ -466,11 +470,6 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) } -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -480,6 +479,15 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ + Cache: cache, + IsConfig: isConfig, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ @@ -534,7 +542,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, @@ -542,80 +550,40 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } -// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If -// options.LayerIndex is set, the reused blob will be recoreded as already -// pulled. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with the non "WithOptions" functions -// is not supported. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef) - if err != nil || !reused || options.LayerIndex == nil { - return reused, info, err - } - - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob. -// If ref is provided, this function first tries to get layer from Additional Layer Store. -func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.lock.Lock() - defer s.lock.Unlock() - - if ref != nil { - // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String()) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) - } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, - }, nil - } - } - - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} - type zstdFetcher struct { - stream internalTypes.ImageSourceSeekable - ctx context.Context - blobInfo types.BlobInfo + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo } -// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt. +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []internalTypes.ImageSourceChunk + var newChunks []private.ImageSourceChunk for _, v := range chunks { - i := internalTypes.ImageSourceChunk{ + i := private.ImageSourceChunk{ Offset: v.Offset, Length: v.Length, } newChunks = append(newChunks, i) } - rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks) - if _, ok := err.(internalTypes.BadPartialRequestError); ok { + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { err = chunked.ErrBadRequest{} } return rc, errs, err } -// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed -// in a non-sequential way to retrieve the missing chunks. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { fetcher := zstdFetcher{ - stream: stream, - ctx: ctx, - blobInfo: srcInfo, + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, } differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) @@ -640,6 +608,22 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int return srcInfo, nil } +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -650,16 +634,36 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ + Cache: cache, + CanSubstitute: canSubstitute, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } -// tryReusingBlobLocked implements a core functionality of TryReusingBlob. -// This must be called with a lock being held on storageImageDestination. -func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } @@ -708,9 +712,9 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) @@ -720,8 +724,8 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, blobinfo, nil } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest return true, types.BlobInfo{ @@ -786,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er } // Assume it's a file, since we're only calling this from a place that expects to read files. if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) + contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) } @@ -1131,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t delete(dataBlobs, layerBlob.Digest) } for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.filenames[blob]) if err != nil { return errors.Wrapf(err, "copying non-layer blob %q to image", blob) } @@ -1192,21 +1196,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } logrus.Debugf("saved image metadata %q", string(metadata)) } - // Set the reference's name on the image. We don't need to worry about avoiding duplicate - // values because SetNames() will deduplicate the list that we pass to it. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return errors.Wrapf(err, "adding names %v to image %q", name, img.ID) } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) + logrus.Debugf("added name %q to image %q", name, img.ID) } commitSucceeded = true @@ -1261,6 +1257,11 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { return true // Yes, we want the unmodified manifest } +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (s *storageImageDestination) SupportsPutBlobPartial() bool { + return true +} + // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { sizes := []int{} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 694ad17bd1a..aedfdf5de65 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) + n, err := io.Copy(io.Discard, reader) if err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } @@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool { func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. for i := range is.blobIDs { if blobinfo.Digest == is.blobIDs[i] { // We want to read that layer: open the file or memory block and hand it back. if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil } reader, err := os.Open(is.filenames[i]) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index d407c657fae..63d835530b5 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -3,7 +3,7 @@ package tarball import ( "errors" "fmt" - "io/ioutil" + "io" "os" "strings" @@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc filenames := strings.Split(reference, separator) for _, filename := range filenames { if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) + stdin, err = io.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf("error buffering stdin: %v", err) } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 2110a091d28..0bae8b2599d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -12,7 +12,9 @@ import ( _ "github.com/containers/image/v5/oci/archive" _ "github.com/containers/image/v5/oci/layout" _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" + // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go "github.com/containers/image/v5/transports" diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index c98a6c6fda0..dcff8caf76b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -561,6 +561,11 @@ type SystemContext struct { UserShortNameAliasConfPath string // If set, short-name resolution in pkg/shortnames must follow the specified mode ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool // If not "", overrides the default path for the authentication file, but only new format files AuthFilePath string // if not "", overrides the default path for the authentication file, but with the legacy format; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index ffb2a4ce259..6295b549305 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 17 + VersionMinor = 21 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..83b061c70b2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 00000000000..fab2c41e89b --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index d080d790c1a..fd3d310548d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,17 +17,17 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" UBUNTU_NAME: "ubuntu-2104" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - _BUILT_IMAGE_SUFFIX: "c6431352024203264" - FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" + IMAGE_SUFFIX: "c4512539143831552" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -117,7 +117,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -132,7 +132,7 @@ lint_task: meta_task: container: - image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}" + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" cpu: 1 memory: 1 @@ -154,7 +154,7 @@ meta_task: vendor_task: container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -172,6 +172,6 @@ success_task: - meta - vendor container: - image: golang:1.15 + image: golang:1.16 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index dbc1f7c9987..2c1e4a18566 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -51,6 +51,9 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*. containers-storage: $(sources) ## build using gc on the host $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host @@ -66,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd done cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ docs: install.tools ## build the docs on the host $(MAKE) -C docs docs gccgo: ## build using gccgo using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ test: local-binary ## build the binaries and run the tests using VMs - $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) test-unit: local-binary ## run the unit tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash test-integration: local-binary ## run the integration tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-validate: ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ install.tools: - make -C tests/tools + $(MAKE) -C tests/tools $(FFJSON): - make -C tests/tools + $(MAKE) -C tests/tools install.docs: docs - make -C docs install + $(MAKE) -C docs install install: install.docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index bf50e910e62..32b7211cb61 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.37.0 +1.40.0 diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile deleted file mode 100644 index c82c1f81bd3..00000000000 --- a/vendor/github.com/containers/storage/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# The fedora/28-cloud-base and debian/jessie64 boxes are also available for -# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to -# "virtualbox" to use them instead. -# -Vagrant.configure("2") do |config| - config.vm.define "fedora" do |c| - c.vm.box = "fedora/28-cloud-base" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end - config.vm.define "debian" do |c| - c.vm.box = "debian/jessie64" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end -end diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index b4f773f2b73..a8b20f03a01 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -84,8 +84,17 @@ type ContainerStore interface { // SetNames updates the list of names associated with the container // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -324,6 +333,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } if err == nil { container = &Container{ ID: id, @@ -371,22 +386,40 @@ func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) } - container.Names = names - return r.Save() + r.byname[name] = container } - return ErrContainerUnknown + container.Names = names + return r.Save() } func (r *containerStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a566fbffa0f..e66613c098a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -26,6 +27,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -649,11 +651,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) { // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { if err != nil { return err } - if !info.IsDir() { + if !d.IsDir() { return nil } dirs = append(dirs, path) diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 3903b1dddd9..339aa0d3809 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package btrfs @@ -16,6 +17,7 @@ import "C" import ( "fmt" + "io/fs" "io/ioutil" "math" "os" @@ -256,7 +258,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration @@ -267,20 +269,20 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { } // we want to check children only so skip itself // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { + if d.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } } return nil } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 63bfd2d136f..2db6764c91b 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -50,11 +50,14 @@ func chownByMapsMain() { if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { toHost = nil } + + chowner := newLChowner() + chown := func(path string, info os.FileInfo, _ error) error { if path == "." { return nil } - return platformLChown(path, info, toHost, toContainer) + return chowner.LChown(path, info, toHost, toContainer) } if err := pwalk.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 0387adfc123..c598b936d64 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package graphdriver @@ -6,17 +7,50 @@ import ( "errors" "fmt" "os" + "sync" "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { st, ok := info.Sys().(*syscall.Stat_t) if !ok { return nil } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + // Map an on-disk UID/GID pair from host to container // using the first map, then back to the host using the // second map. Skip that first step if they're 0, to @@ -42,7 +76,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. UID: uid, GID: gid, } - mappedPair, err := toHost.ToHost(pair) + mappedPair, err := toHost.ToHostOverflow(pair) if err != nil { return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) } @@ -50,7 +84,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %v", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 31bd5bb52dd..1845a4e086c 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package graphdriver @@ -9,6 +10,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { return &os.PathError{"lchown", path, syscall.EWINDOWS} } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index c5168bfdd25..e604b7e3186 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper @@ -6,6 +7,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -419,40 +421,35 @@ func (devices *DeviceSet) constructDeviceIDMap() { } } -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { + if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if strings.HasPrefix(finfo.Name(), ".") { + if strings.HasPrefix(name, ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == deviceSetMetaFile { + if name == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == transactionMetaFile { + if name == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) - hash := finfo.Name() - if hash == base { - hash = "" - } - // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", name, err) } return nil @@ -462,21 +459,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, info os.FileInfo, err error) error { + var scan = func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories - if info.IsDir() { + if d.IsDir() { return nil } - return devices.deviceFileWalkFunction(path, info) + return devices.deviceFileWalkFunction(path, d.Name()) } - return filepath.Walk(devices.metadataDir(), scan) + return filepath.WalkDir(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index e1320ee07f6..143cccf92ec 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -2,15 +2,43 @@ package graphdriver import ( "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", } ) +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index a534630df08..b7e681ace45 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -138,6 +138,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if parent != "" { options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } parentFs, err = driver.Get(parent, options) if err != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 44b3515a854..48fb7a550fa 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -11,6 +12,7 @@ import ( "syscall" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" @@ -218,3 +220,55 @@ func doesVolatile(d string) (bool, error) { }() return true, nil } + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, errors.Wrapf(err, "create mapped mount") + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go deleted file mode 100644 index 9ad1b863d8d..00000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check_115.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !go1.16 - -package overlay - -import ( - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/system" -) - -func scanForMountProgramIndicators(home string) (detected bool, err error) { - err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error { - if detected { - return filepath.SkipDir - } - if err != nil { - return err - } - basename := filepath.Base(path) - if strings.HasPrefix(basename, archive.WhiteoutPrefix) { - detected = true - return filepath.SkipDir - } - if info.IsDir() { - xattrs, err := system.Llistxattr(path) - if err != nil { - return err - } - for _, xattr := range xattrs { - if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { - detected = true - return filepath.SkipDir - } - } - } - return nil - }) - return detected, err -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 00000000000..34fdc5790f1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,160 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +const ( + // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr + _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint + + // _OPEN_TREE_CLONE - Clone the source path mount + _OPEN_TREE_CLONE = 0x00000001 //nolint:golint + + // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd + _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint +) + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := _MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path) + } + + var attr attr + attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Syscall6(uintptr(unix.SYS_PAUSE), 0, 0, 0, 0, 0, 0) + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 1efe7316d3e..09d24ae8b0d 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -25,7 +26,6 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -38,7 +38,6 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -119,7 +118,8 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool - locker *locker.Locker + + supportsIDMappedMounts *bool } type additionalLayerStore struct { @@ -155,6 +155,15 @@ func hasMetacopyOption(opts []string) bool { return false } +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + func hasVolatileOption(opts []string) bool { for _, s := range opts { if s == "volatile" { @@ -195,6 +204,26 @@ func checkSupportVolatile(home, runhome string) (bool, error) { return usingVolatile, nil } +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + return true, nil + } + logrus.Debugf("Cached value indicated that overlay is not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, errors.Wrap(err2, "recording overlay idmapped mounts support status") + } + return supportsIDMappedMounts, err +} + func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { var supportsDType bool @@ -282,6 +311,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) backingFs = fsName } + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { m := os.FileMode(0700) @@ -306,20 +360,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { - return nil, err - } - runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { - return nil, err - } - var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -380,7 +420,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, - locker: locker.New(), options: *opts, } @@ -559,14 +598,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) ( return err } -func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { - if os.Geteuid() != 0 || graphroot == "" || rundir == "" { +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { return false, nil } - home := filepath.Join(graphroot, "overlay") - runhome := filepath.Join(rundir, "overlay") - var contents string flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) if err == nil { @@ -881,11 +917,18 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err != nil { return err } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { return err } if parent != "" { @@ -896,14 +939,26 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable rootUID = int(st.UID()) rootGID = int(st.GID()) } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { - os.RemoveAll(dir) + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } } }() @@ -1039,17 +1094,22 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l + return l, true } } } - return newpath + return newpath, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1122,9 +1182,6 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -1145,6 +1202,9 @@ func (d *Driver) Remove(id string) error { // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { @@ -1162,6 +1222,7 @@ func (d *Driver) recreateSymlinks() error { // Keep looping as long as we take some corrective action in each iteration var errs *multierror.Error madeProgress := true + iterations := 0 for madeProgress { errs = nil madeProgress = false @@ -1175,7 +1236,7 @@ func (d *Driver) recreateSymlinks() error { // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir)) + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1212,7 +1273,12 @@ func (d *Driver) recreateSymlinks() error { if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - os.Remove(filepath.Join(linksDir, link.Name())) + if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } madeProgress = true continue } @@ -1222,6 +1288,8 @@ func (d *Driver) recreateSymlinks() error { linkFile := filepath.Join(d.dir(targetID), "link") data, err := ioutil.ReadFile(linkFile) if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) continue @@ -1229,6 +1297,11 @@ func (d *Driver) recreateSymlinks() error { madeProgress = true } } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } } if errs != nil { return errs.ErrorOrNil() @@ -1242,18 +1315,20 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) + dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } - readWrite := true + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { disableShifting = true } + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } optsList := options.Options if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") @@ -1262,16 +1337,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // options otherwise the kernel refuses to follow the metacopy xattr. if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") optsList = append(optsList, "metacopy=on") - } else { - logLevel := logrus.WarnLevel - if unshare.IsRootless() { - logLevel = logrus.DebugLevel - } - logrus.StandardLogger().Logf(logLevel, "Ignoring metacopy option from storage.conf, not supported with booted kernel") } } } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + for _, o := range optsList { if o == "ro" { readWrite = false @@ -1322,7 +1399,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1416,29 +1493,74 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workdir := path.Join(dir, "work") - var opts string - if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) - } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) - } - if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) - } - if d.options.mountProgram == "" && unshare.IsRootless() { - opts = fmt.Sprintf("%s,userxattr", opts) + optsList = append(optsList, "userxattr") } - // If "volatile" is not supported by the file system, just ignore the request - if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + if options.Volatile && !hasVolatileOption(optsList) { supported, err := d.getSupportsVolatile() if err != nil { return "", err } + // If "volatile" is not supported by the file system, just ignore the request if supported { - opts = fmt.Sprintf("%s,volatile", opts) + optsList = append(optsList, "volatile") + } + } + + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData := label.FormatMountLabel(opts, options.MountLabel) @@ -1447,10 +1569,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO pageSize := unix.Getpagesize() - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { @@ -1476,18 +1594,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } return nil } - } else if len(mountData) > pageSize { + } else if len(mountData) >= pageSize { + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + workdir = path.Join(id, "work") //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + if len(mountData) >= pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) @@ -1513,8 +1639,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err @@ -1529,6 +1653,18 @@ func (d *Driver) Put(id string) error { unmounted := false + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := ioutil.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + if d.options.mountProgram != "" { // Attempt to unmount the FUSE mount using either fusermount or fusermount3. // If they fail, fallback to unix.Unmount @@ -1606,11 +1742,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { return whiteoutFormat } -type fileGetNilCloser struct { - storage.FileGetter +type overlayFileGetter struct { + diffDirs []string } -func (f fileGetNilCloser) Close() error { +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) +} + +func (g *overlayFileGetter) Close() error { return nil } @@ -1619,13 +1768,18 @@ func (d *Driver) getStagingDir() string { } // DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { p, err := d.getDiffPath(id) if err != nil { return nil, err } - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -1900,12 +2054,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return nil } +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } - return d.options.mountProgram != "" + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() } // dumbJoin is more or less a dumber version of filepath.Join, but one which @@ -2074,3 +2247,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { } return diffPath, nil } + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index c748468e5cb..4623e7f4648 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e034bf152c9..f29dc8f8556 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -344,7 +344,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { return errors.Wrap(err, "error creating zfs mount") } defer func() { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } }() @@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error { logger.Debugf(`unmount("%s")`, mountpoint) - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index bf690515984..61a2ed871a4 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -2,7 +2,6 @@ package zfs import ( "fmt" - "strings" "github.com/containers/storage/drivers" "github.com/pkg/errors" @@ -26,14 +25,10 @@ func checkRootdirFs(rootdir string) error { } func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } + return id +} - return id[:maxlen] +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index edcb1da36b7..44c68f394ec 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -4,6 +4,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootDir string) error { @@ -27,3 +28,7 @@ func checkRootdirFs(rootDir string) error { func getMountpoint(id string) string { return id } + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 5fc810b89d4..de6e377541c 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "github.com/containers/storage/types" ) @@ -55,4 +57,9 @@ var ( ErrStoreIsReadOnly = types.ErrStoreIsReadOnly // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") ) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index f870b9ceed3..0a06a43235f 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,6 +1,9 @@ package storage import ( + "fmt" + "strings" + "github.com/containers/storage/pkg/idtools" "github.com/google/go-intervals/intervalset" "github.com/pkg/errors" @@ -218,3 +221,45 @@ func maxInt(a, b int) int { } return a } + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + } + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index bca25a65b8c..a4c3ed22c75 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -136,8 +136,19 @@ type ImageStore interface { // SetNames replaces the list of names associated with an image with the // supplied values. The values are expected to be valid normalized // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + // Delete removes the record of the image. Delete(id string) error @@ -425,37 +436,36 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if created.IsZero() { created = time.Now().UTC() } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Digests: nil, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - err := image.recomputeDigests() - if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - for _, name := range names { - r.byname[name] = image - } - for _, digest := range image.Digests { - list := r.bydigest[digest] - r.bydigest[digest] = append(list, image) - } - err = r.Save() - image = copyImage(image) + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) } + err = r.Save() + image = copyImage(image) return image, err } @@ -506,26 +516,44 @@ func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - image.addNameToHistory(name) + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) } - image.Names = names - return r.Save() + r.byname[name] = image + image.addNameToHistory(name) } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + image.Names = names + return r.Save() } func (r *imageStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index fbf6ad3621f..bba8d7588f7 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -23,6 +23,7 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" @@ -220,8 +221,17 @@ type LayerStore interface { // SetNames replaces the list of names associated with a layer with the // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -398,14 +408,13 @@ func (r *layerStore) Load() error { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err = r.deleteInternal(layer.ID) + if err != nil { + break } + shouldSave = true } } } @@ -674,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) } if layer.UncompressedDigest != "" { - r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } if err := r.Save(); err != nil { r.driver.Remove(id) @@ -683,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s return copyLayer(layer), nil } -func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } - size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err } @@ -716,12 +724,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parent = parentLayer.ID } var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) if moreOptions.TemplateLayer != "" { + var tserr error templateLayer, ok := r.lookup(moreOptions.TemplateLayer) if !ok { return nil, -1, ErrLayerUnknown } + templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } } else { templateIDMappings = &idtools.IDMappings{} } @@ -733,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab if mountLabel != "" { label.ReserveLabel(mountLabel) } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.Save() + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, @@ -740,89 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab IDMappings: idMappings, } if moreOptions.TemplateLayer != "" { - if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) - } - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) } oldMappings = templateIDMappings } else { if writeable { - if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating read-write layer") + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) } } else { - if err = r.driver.Create(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating layer") + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) } } oldMappings = parentMappings } if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { - if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err } } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), - BigDataNames: []string{}, - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) - if err != nil { - if r.Delete(layer.ID) != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err } - err = r.Save() + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + cleanupFailureContext = "applying layer diff" return nil, -1, err } - layer = copyLayer(layer) + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } } + delete(layer.Flags, incompleteFlag) + err = r.Save() + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true return layer, size, err } @@ -854,7 +914,6 @@ func (r *layerStore) Mounted(id string) (int, error) { } func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { - // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { for _, item := range opts { @@ -1031,25 +1090,43 @@ func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) } - layer.Names = names - return r.Save() + r.byname[name] = layer } - return ErrLayerUnknown + layer.Names = names + return r.Save() } func (r *layerStore) datadir(id string) string { @@ -1148,6 +1225,17 @@ func (r *layerStore) tspath(id string) string { return filepath.Join(r.layerdir, id+tarSplitSuffix) } +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + func (r *layerStore) deleteInternal(id string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) @@ -1156,6 +1244,18 @@ func (r *layerStore) deleteInternal(id string) error { if !ok { return ErrLayerUnknown } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.Save(); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID err := r.driver.Remove(id) if err != nil { @@ -1463,34 +1563,48 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } return maybeCompressReadCloser(diff) } - defer tsfile.Close() decompressor, err := pgzip.NewReader(tsfile) if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } return nil, err } - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + metadata = storage.NewJSONUnpacker(decompressor) fgetter, err := r.newFileGetter(to) if err != nil { - return nil, err + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() } - return err2 + return nil }) return maybeCompressReadCloser(rc) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 76544ff289b..d4f129ee634 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -7,6 +7,7 @@ import ( "compress/bzip2" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -77,6 +78,10 @@ const ( containersOverrideXattr = "user.containers.override_stat" ) +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + // Archiver allows the reuse of most utility functions of this package with a // pluggable Untar function. To facilitate the passing of specific id mappings // for untar, an archiver can be created with maps which will then be passed to @@ -507,6 +512,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } hdr, err := FileInfoHeader(name, fi, link) if err != nil { @@ -743,6 +752,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errs []string for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support @@ -852,14 +864,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil @@ -892,7 +904,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. - if !f.IsDir() { + if !d.IsDir() { return nil } @@ -962,7 +974,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) + doChown := !options.NoLchown if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) @@ -1067,7 +1082,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 2f548b661ce..51fbd9a2197 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index bbbd8c9de87..8769f2291b6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,9 +1,11 @@ +//go:build !linux // +build !linux package archive import ( "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -41,7 +43,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 14ffad5c0d4..ca8832fe421 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -4,6 +4,7 @@ import ( "archive/tar" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -134,7 +135,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err != nil { return 0, err } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..b8b278a1329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,627 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache areadly exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if errors.Cause(err) != os.ErrNotExist { + return err + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 092cf584af3..aeb7cfd4f01 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -5,6 +5,7 @@ package compressor // larger software like the graph drivers. import ( + "bufio" "encoding/base64" "io" "io/ioutil" @@ -15,6 +16,189 @@ import ( "github.com/vbatts/tar-split/archive/tar" ) +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) @@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if _, err := zstdWriter.Write(rawBytes); err != nil { return err } - payloadDigester := digest.Canonical.Digester() - payloadChecksum := payloadDigester.Hash() - payloadDest := io.MultiWriter(payloadChecksum, zstdWriter) + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() // Now handle the payload, if any - var startOffset, endOffset int64 + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) for { - read, errRead := tr.Read(buf) + mustSplit, read, errRead := rcReader.Read(buf) if errRead != nil && errRead != io.EOF { return err } - - // restart the compression only if there is - // a payload. + // restart the compression only if there is a payload. if read > 0 { if startOffset == 0 { startOffset, err = restartCompression() if err != nil { return err } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err } - _, err := payloadDest.Write(buf[:read]) + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() if err != nil { return err } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) } if errRead == io.EOF { if startOffset > 0 { - endOffset, err = restartCompression() - if err != nil { - return err - } checksum = payloadDigester.Digest().String() } break @@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r for k, v := range hdr.Xattrs { xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) } - m := internal.FileMetadata{ - Type: typ, - Name: hdr.Name, - Linkname: hdr.Linkname, - Mode: hdr.Mode, - Size: hdr.Size, - UID: hdr.Uid, - GID: hdr.Gid, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - Devmajor: hdr.Devmajor, - Devminor: hdr.Devminor, - Xattrs: xattrs, - Digest: checksum, - Offset: startOffset, - EndOffset: endOffset, - - // ChunkSize is 0 for the last chunk - ChunkSize: 0, - ChunkOffset: 0, - ChunkDigest: checksum, - } - metadata = append(metadata, m) + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) } rawBytes := tr.RawBytes() @@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { if level == nil { - l := 3 + l := 10 level = &l } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000000..f4dfad822e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index c91c43d85cd..3bb5286d92d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,11 +8,11 @@ import ( "archive/tar" "bytes" "encoding/binary" - "encoding/json" "fmt" "io" "time" + jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" ) @@ -20,6 +20,9 @@ import ( type TOC struct { Version int `json:"version"` Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` } type FileMetadata struct { @@ -27,25 +30,33 @@ type FileMetadata struct { Name string `json:"name"` Linkname string `json:"linkName,omitempty"` Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size"` - UID int `json:"uid"` - GID int `json:"gid"` - ModTime time.Time `json:"modtime"` - AccessTime time.Time `json:"accesstime"` - ChangeTime time.Time `json:"changetime"` - Devmajor int64 `json:"devMajor"` - Devminor int64 `json:"devMinor"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"` Digest string `json:"digest,omitempty"` Offset int64 `json:"offset,omitempty"` EndOffset int64 `json:"endOffset,omitempty"` - // Currently chunking is not supported. ChunkSize int64 `json:"chunkSize,omitempty"` ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` } +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + const ( TypeReg = "reg" TypeChunk = "chunk" @@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Entries: metadata, } + var json = jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 6efc6a4c845..7de20feaaa0 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -4,8 +4,8 @@ import ( archivetar "archive/tar" "context" "encoding/base64" - "encoding/json" "fmt" + "hash" "io" "io/ioutil" "os" @@ -13,6 +13,8 @@ import ( "reflect" "sort" "strings" + "sync" + "sync/atomic" "syscall" "time" @@ -25,6 +27,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -41,24 +44,35 @@ const ( bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota - fileTypeEstargz = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 ) type compressedFileType int type chunkedDiffer struct { - stream ImageSourceSeekable - manifest []byte - layersMetadata map[string][]internal.FileMetadata - layersTarget map[string]string - tocOffset int64 - fileType compressedFileType + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, } -func timeToTimespec(time time.Time) (ts unix.Timespec) { - if time.IsZero() { +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) @@ -67,11 +81,29 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) { return unix.NsecToTimespec(time.UnixNano()) } +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true @@ -83,20 +115,7 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us if err == nil { defer destDir.Close() - doLink := func() error { - // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses - // /proc/self/fd doesn't and can be used with rootless. - srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) - return unix.Linkat(unix.AT_FDCWD, srcPath, int(destDir.Fd()), destBase, unix.AT_SYMLINK_FOLLOW) - } - - err := doLink() - - // if the destination exists, unlink it first and try again - if err != nil && os.IsExist(err) { - unix.Unlinkat(int(destDir.Fd()), destBase, 0) - err = doLink() - } + err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } @@ -106,63 +125,15 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) if err != nil { dstFile.Close() - return nil, -1, err - } - return dstFile, st.Size(), err -} - -func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata { - maps := make(map[string]map[string][]*internal.FileMetadata) - - for layerID, v := range layersMetadata { - r := make(map[string][]*internal.FileMetadata) - for i := range v { - if v[i].Digest != "" { - r[v[i].Digest] = append(r[v[i].Digest], &v[i]) - } - } - maps[layerID] = r - } - return maps -} - -func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) { - allLayers, err := store.Layers() - if err != nil { - return nil, nil, err - } - - layersMetadata := make(map[string][]internal.FileMetadata) - layersTarget := make(map[string]string) - for _, r := range allLayers { - manifestReader, err := store.LayerBigData(r.ID, bigDataKey) - if err != nil { - continue - } - defer manifestReader.Close() - manifest, err := ioutil.ReadAll(manifestReader) - if err != nil { - return nil, nil, err - } - var toc internal.TOC - if err := json.Unmarshal(manifest, &toc); err != nil { - continue - } - layersMetadata[r.ID] = toc.Entries - target, err := store.DifferTarget(r.ID) - if err != nil { - return nil, nil, err - } - layersTarget[r.ID] = target + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } - - return layersMetadata, layersTarget, nil + return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -179,67 +150,71 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeEstargz, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, }, nil } +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. -// otherFile contains the metadata for the file. +// name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file: %w", err) } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } - return true, dstFile, written, err + return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. @@ -275,10 +250,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return false } - xattrsToIgnore := map[string]interface{}{ - "security.selinux": true, - } - xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) @@ -301,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return canDedupMetadataWithHardLink(file, &otherFile) } -// findFileInOtherLayers finds the specified file in other layers. -// file is the file to look for. -// dirfd is an open file descriptor to the checkout root directory. -// layersMetadata contains the metadata for each layer in the storage. -// layersTarget maps each layer to its checkout on disk. -// useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) { - // this is ugly, needs to be indexed - for layerID, checksums := range layersMetadata { - source, ok := layersTarget[layerID] - if !ok { - continue - } - files, found := checksums[file.Digest] - if !found { - continue - } - for _, candidate := range files { - // check if it is a valid candidate to dedup file - if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) { - continue - } - - found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks) - if found && err == nil { - return found, dstFile, written, err - } - } - } - // If hard links deduplication was used and it has failed, try again without hard links. - if useHardLinks { - return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false) - } - return false, nil, 0, nil -} - -func getFileDigest(f *os.File) (digest.Digest, error) { +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() - if _, err := io.Copy(digester.Hash(), f); err != nil { + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil @@ -401,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. @@ -430,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return false, nil, 0, err } - checksum, err := getFileDigest(f) + checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } @@ -452,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool dstFile.Close() return false, nil, 0, err } - checksum, err = getFileDigest(f) + checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err @@ -464,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return true, dstFile, written, nil } +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil @@ -490,22 +438,50 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } -type missingFile struct { - File *internal.FileMetadata +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 } -func (m missingFile) Length() int64 { - return m.File.EndOffset - m.File.Offset +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk } -type missingChunk struct { - RawChunk ImageSourceChunk - Files []missingFile +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } @@ -515,209 +491,600 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetada if err != nil { return err } + + // If it is a symlink, force to use the path if t == tar.TypeSymlink { - return nil + usePath = true } - if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil { + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { if !options.IgnoreChownErrors { - return err + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } data, err := base64.StdEncoding.DecodeString(v) if err != nil { - return err + return fmt.Errorf("decode xattr %q: %w", v, err) } - if err := unix.Fsetxattr(fd, k, data, 0); err != nil { - return err + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } - ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} - if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) { - return err + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } - if err := unix.Fchmod(fd, uint32(mode)); err != nil { - return err + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } -// openFileUnderRoot safely opens a file under the specified root directory using openat2 -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// flags are the flags top pass to the open syscall. -// mode specifies the mode to use for newly created files. -func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } + return unix.Openat2(dirfd, name, &how) +} - fd, err := unix.Openat2(dirfd, name, &how) - if err != nil { - return nil, err +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } } - return os.NewFile(uintptr(fd), name), nil + return fd, err } -func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) { - file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) - if err != nil { - return err - } - defer func() { - err2 := file.Close() - if err == nil { - err = err2 +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } } - }() + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} - digester := digest.Canonical.Digester() - checksum := digester.Hash() - to := io.MultiWriter(file, checksum) +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() - switch c.fileType { - case fileTypeZstdChunked: - z, err := zstd.NewReader(reader) - if err != nil { - return err - } - defer z.Close() + baseName := filepath.Base(name) - if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil { - return err + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { - return err + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } } - case fileTypeEstargz: + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { - r, err := pgzip.NewReader(reader) + r, err := pgzip.NewReader(c.rawReader) if err != nil { - return err + return partCompression, err } c.gzipReader = r } else { - if err := c.gzipReader.Reset(reader); err != nil { - return err + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err } } - defer c.gzipReader.Close() + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} - if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil { +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { return err } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} - manifestChecksum, err := digest.Parse(metadata.Digest) +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } - if digester.Digest() != manifestChecksum { - return fmt.Errorf("checksum mismatch for %q", dest) + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) } - return setFileAttrs(file, mode, metadata, options) + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) } -func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { - for mc := 0; ; mc++ { - var part io.ReadCloser - select { - case p := <-streams: - part = p - case err := <-errs: - return err - } - if part == nil { - if mc == len(missingChunks) { - break +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e } - return errors.Errorf("invalid stream returned") } - if mc == len(missingChunks) { - part.Close() - return errors.Errorf("too many chunks returned") + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") } - for _, mf := range missingChunks[mc].Files { + for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) - _, err := io.Copy(ioutil.Discard, limitReader) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { - part.Close() - return err + Err = err + goto exit } continue } - limitReader := io.LimitReader(part, mf.Length()) + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } - if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil { - part.Close() - return err + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break } } - part.Close() } + + if destFile != nil { + return destFile.Close() + } + return nil } -func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk { - if len(missingChunks) <= target { - return missingChunks +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } } + missingParts = newMissingParts - getGap := func(missingChunks []missingChunk, i int) int { - prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length - return int(missingChunks[i].RawChunk.Offset - prev) + if len(missingParts) <= target { + return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. - var gaps []int - for i := 1; i < len(missingChunks); i++ { - gaps = append(gaps, getGap(missingChunks, i)) + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) } - sort.Ints(gaps) + sort.Ints(costs) - toShrink := len(missingChunks) - target - targetValue := gaps[toShrink-1] + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] - newMissingChunks := missingChunks[0:1] - for i := 1; i < len(missingChunks); i++ { - gap := getGap(missingChunks, i) - if gap > targetValue { - newMissingChunks = append(newMissingChunks, missingChunks[i]) + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) } else { - prev := &newMissingChunks[len(newMissingChunks)-1] - prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil if gap > 0 { - gapFile := missingFile{ + gapFile := missingFileChunk{ Gap: int64(gap), } - prev.Files = append(prev.Files, gapFile) + prev.Chunks = append(prev.Chunks, gapFile) } - prev.Files = append(prev.Files, missingChunks[i].Files...) + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } - return newMissingChunks + return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk - for _, c := range missingChunks { - chunksToRequest = append(chunksToRequest, c.RawChunk) + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } } // There are some missing files. Prepare a multirange request for the missing chunks. @@ -731,32 +1098,32 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingChunks) + requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request - missingChunks = mergeMissingChunks(missingChunks, requested/2) + missingParts = mergeMissingChunks(missingParts, requested/2) continue } return err } - if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil { + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } -func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - parent := filepath.Dir(metadata.Name) - base := filepath.Base(metadata.Name) +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) parentFd := dirfd if parent != "." { - parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0) + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } @@ -766,21 +1133,21 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opt if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { - return err + return fmt.Errorf("mkdir %q: %w", name, err) } } - file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0) + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() - return setFileAttrs(file, mode, metadata, options) + return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0) + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } @@ -789,7 +1156,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -797,25 +1164,35 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDirFd = int(f.Fd()) } - err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH) + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { - return err + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0) + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } return err } defer newFile.Close() - return setFileAttrs(newFile, mode, metadata, options) + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -823,7 +1200,10 @@ func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, o destDirFd = int(f.Fd()) } - return unix.Symlinkat(metadata.Linkname, destDirFd, destBase) + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil } type whiteoutHandler struct { @@ -832,13 +1212,16 @@ type whiteoutHandler struct { } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { - file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0) + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() - return unix.Fsetxattr(int(file.Fd()), name, value, 0) + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { @@ -847,7 +1230,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd := d.Dirfd if dir != "" { - dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0) + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } @@ -856,12 +1239,16 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd = int(dir.Fd()) } - return unix.Mknodat(dirfd, base, mode, dev) + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } @@ -899,7 +1286,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo return def } +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + bigData := map[string][]byte{ bigDataKey: c.manifest, } @@ -927,14 +1376,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest - var toc internal.TOC - if err := json.Unmarshal(c.manifest, &toc); err != nil { + toc, err := unmarshalToc(c.manifest) + if err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) - var missingChunks []missingChunk + var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { @@ -956,17 +1405,61 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { - return output, err + return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) - otherLayersCache := prepareOtherLayersCache(c.layersMetadata) - // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate - missingChunksSize, totalChunksSize := int64(0), int64(0) + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) @@ -1016,7 +1509,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra return err } defer file.Close() - if err := setFileAttrs(file, mode, &r, options); err != nil { + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil @@ -1028,7 +1521,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } case tar.TypeDir: - if err := safeMkdir(dirfd, mode, &r, options); err != nil { + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue @@ -1062,74 +1555,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra totalChunksSize += r.Size - finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dstFile, mode, &r, options); err != nil { - return err - } + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, } - return nil + copyFileJobs <- job + filesToWaitFor++ } + } - found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue - } + close(copyFileJobs) + copyFileJobs = nil - found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) - if err != nil { - return output, err + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + if res.err != nil { + return output, res.err } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } + // the file was already copied to its destination + // so nothing left to do. + if res.found { continue } - if enableHostDedup { - found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue + r := &mergedEntries[res.index] + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize } - } + remainingSize = remainingSize - size - missingChunksSize += r.Size - if t == tar.TypeReg { rawChunk := ImageSourceChunk{ - Offset: uint64(r.Offset), - Length: uint64(r.EndOffset - r.Offset), + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), } - - file := missingFile{ - File: &mergedEntries[i], + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, } - - missingChunks = append(missingChunks, missingChunk{ - RawChunk: rawChunk, - Files: []missingFile{ + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ file, }, - }) + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) } } // There are some missing files. Prepare a multirange request for the missing chunks. - if len(missingChunks) > 0 { - missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks) - if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil { + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } @@ -1141,31 +1655,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } if totalChunksSize > 0 { - logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize)) + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var mergedEntries []internal.FileMetadata - var prevEntry *internal.FileMetadata - for _, entry := range entries { - e := entry + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } - // ignore the metadata files for the estargz format. - if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) { + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { continue } + if entry.Type != TypeChunk { + size++ + } + } + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } if e.Type == TypeChunk { - if prevEntry == nil || prevEntry.Type != TypeReg { - return nil, errors.New("chunk type without a regular file") + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset } - prevEntry.EndOffset = e.EndOffset - continue + i += nChunks } - mergedEntries = append(mergedEntries, e) - prevEntry = &e + mergedEntries[m] = e + m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset @@ -1176,6 +1728,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } } return mergedEntries, nil } + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index e6622cf1466..f6e0cfcfe86 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct { // grown. This is specified in terms of % of pool size. So a value of // 20 means that when threshold is hit, pool will be grown by 20% of // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent"` + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` // AutoExtendThreshold determines the pool extension threshold in terms // of percentage of pool size. For example, if threshold is 60, that // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold"` + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` // BaseSize specifies the size to use when creating the base device, // which limits the size of images and containers. - BaseSize string `toml:"basesize"` + BaseSize string `toml:"basesize,omitempty"` // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize"` + BlockSize string `toml:"blocksize,omitempty"` // DirectLvmDevice specifies a custom block storage device to use for // the thin pool. - DirectLvmDevice string `toml:"directlvm_device"` + DirectLvmDevice string `toml:"directlvm_device,omitempty"` // DirectLvmDeviceForcewipes device even if device already has a // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force"` + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs"` + Fs string `toml:"fs,omitempty"` // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level"` + LogLevel string `toml:"log_level,omitempty"` // MetadataSize specifies the size of the metadata for the thinpool // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize"` + MetadataSize string `toml:"metadatasize,omitempty"` // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to - MinFreeSpace string `toml:"min_free_space"` + MinFreeSpace string `toml:"min_free_space,omitempty"` // MkfsArg specifies extra mkfs arguments to be used when creating the // basedevice. - MkfsArg string `toml:"mkfsarg"` + MkfsArg string `toml:"mkfsarg,omitempty"` // MountOpt specifies extra mount options used when mounting the thin // devices. - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion"` + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal"` + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of // retries XFS should attempt to complete IO when ENOSPC (no space) // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` } type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` } type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device - MinSpace string `toml:"min_space"` + MinSpace string `toml:"min_space,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } type OverlayOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // Inodes is used to set a maximum inodes of the container image. - Inodes string `toml:"inodes"` + Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories - ForceMask string `toml:"force_mask"` + ForceMask string `toml:"force_mask,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` } type ZfsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Name is the File System name of the ZFS File system - Name string `toml:"fsname"` + Name string `toml:"fsname,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } // OptionsConfig represents the "storage.options" TOML config table. @@ -122,82 +122,82 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores"` + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids"` + RemapUIDs string `toml:"remap-uids,omitempty"` // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids"` + RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. - ForceMask os.FileMode `toml:"force_mask"` + ForceMask os.FileMode `toml:"force_mask,omitempty"` // RemapUser is the name of one or more entries in /etc/subuid which // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user"` + RemapUser string `toml:"remap-user,omitempty"` // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group"` + RemapGroup string `toml:"remap-group,omitempty"` // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. - RootAutoUsernsUser string `toml:"root-auto-userns-user"` + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` // AutoUsernsMinSize is the minimum size for a user namespace that is // created automatically. - AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` // AutoUsernsMaxSize is the maximum size for a user namespace that is // created automatically. - AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` // Btrfs container options to be handed to btrfs drivers - Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers - Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` // Vfs container options to be handed to VFS drivers - Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` // Zfs container options to be handed to ZFS drivers - Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // PullOptions specifies options to be handed to pull managers // This API is experimental and can be changed without bumping the major version number. - PullOptions map[string]string `toml:"pull_options"` + PullOptions map[string]string `toml:"pull_options,omitempty"` // DisableVolatile doesn't allow volatile mounts when it is set. - DisableVolatile bool `toml:"disable-volatile"` + DisableVolatile bool `toml:"disable-volatile,omitempty"` } // GetGraphDriverOptions returns the driver specific options diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 8d58d24cac8..36e1bdd5fc8 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,8 +1,10 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package directory import ( + "io/fs" "os" "path/filepath" "syscall" @@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) { func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { return err } - if fileInfo == nil { - return nil + fileInfo, err := entry.Info() + if err != nil { + return err } // Check inode to only count the sizes of files with multiple hard links once. @@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) { // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} - // Ignore directory sizes - if fileInfo.IsDir() { + if entry.IsDir() { return nil } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index a7a81240bc2..482bc51a26e 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,8 +1,10 @@ +//go:build windows // +build windows package directory import ( + "io/fs" "os" "path/filepath" ) @@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) { // Usage walks a directory tree and returns its total size in bytes and the number of inodes. func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if os.IsNotExist(err) && path != dir { return nil } return err @@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) { usage.InodeCount++ // Ignore directory sizes - if fileInfo == nil { + if d.IsDir() { return nil } - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil + fileInfo, err := d.Info() + if err != nil { + return err } - - usage.Size += s + usage.Size += fileInfo.Size() return nil }) diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000000..85c5e76c844 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 06b53854b93..027db259c19 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -18,18 +18,3 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetCacheHome is unsupported on non-linux system. -func GetCacheHome() (string, error) { - return "", errors.New("homedir.GetCacheHome() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 2475e351bb5..33177bdf306 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -93,48 +93,3 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index 4f2615ed32f..af65f2c03de 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -17,7 +17,12 @@ func Key() string { // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { - return os.Getenv(Key()) + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home } // GetShortcutString returns the string that is shortcut to user's home directory diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 83bc8c34ff4..7a8fec0ce5f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -3,15 +3,18 @@ package idtools import ( "bufio" "fmt" + "io/ioutil" "os" "os/user" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -82,7 +85,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(uidMap) == 1 && uidMap[0].Size == 1 { uid = uidMap[0].HostID } else { - uid, err = toHost(0, uidMap) + uid, err = RawToHost(0, uidMap) if err != nil { return -1, -1, err } @@ -90,7 +93,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(gidMap) == 1 && gidMap[0].Size == 1 { gid = gidMap[0].HostID } else { - gid, err = toHost(0, gidMap) + gid, err = RawToHost(0, gidMap) if err != nil { return -1, -1, err } @@ -98,10 +101,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -114,10 +121,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -182,31 +193,87 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = RawToHost(pair.UID, i.uids) if err != nil { - return target, err + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } } - return target, err + return target, nil } // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) + uid, err := RawToContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := RawToContainer(pair.GID, i.gids) return uid, gid, err } @@ -293,7 +360,7 @@ func parseSubidFile(path, username string) (ranges, error) { func checkChownErr(err error, name string, uid, gid int) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name) } return err } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index db50a62e4c0..6e6e3b22bc9 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -12,11 +12,21 @@ import ( #cgo LDFLAGS: -l subid #include #include +#include const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + struct subid_range get_range(struct subid_range *ranges, int i) { - return ranges[i]; + shadow_logfd = stderr; + return ranges[i]; } + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + */ import "C" @@ -32,9 +42,9 @@ func readSubid(username string, isUser bool) (ranges, error) { var nRanges C.int var cRanges *C.struct_subid_range if isUser { - nRanges = C.get_subuid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) } else { - nRanges = C.get_subgid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) } if nRanges < 0 { return nil, errors.New("cannot read subids") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 9776b2a1287..7f270c61f82 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -46,6 +46,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..3ba99cf9351 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c06..ee0f593a50a 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0ff..72ceec3ddad 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -28,14 +28,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000000..6f63ae99170 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,37 @@ +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 372bee7321f..d3dd86d349f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -17,6 +17,7 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 1ecaa906fed..a56ada2161e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec @@ -17,6 +18,7 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -24,6 +26,7 @@ func Command(args ...string) *exec.Cmd { // CommandContext returns *exec.Cmd which has Path as current binary. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9d937426854..5b3605f319c 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -9,10 +9,12 @@ import ( // Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } // CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 673ab476abd..d868564767f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -17,6 +17,7 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index c56671d9192..a1938cd4f34 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -7,7 +7,10 @@ import ( "path/filepath" ) -var registeredInitializers = make(map[string]func()) +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { @@ -22,6 +25,7 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true if exists { initializer() @@ -30,6 +34,21 @@ func Init() bool { return false } +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848bdb..6b47c4e717f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5fe..3fc27f0b139 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -10,6 +10,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 6d351ce80a9..baeb8f1aab5 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -9,6 +10,7 @@ import ( "io" "os" "os/exec" + "os/signal" "os/user" "runtime" "strconv" @@ -75,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capabilitiy +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -214,15 +238,26 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return errors.Wrapf(err, "error finding newgidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) @@ -261,17 +296,29 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return errors.Wrapf(err, "error finding newuidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) @@ -484,6 +531,30 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // Finish up. logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + ExecRunnable(cmd, nil) } @@ -501,11 +572,11 @@ func ExecRunnable(cmd Runnable, cleanup func()) { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(waitStatus.ExitStatus()) } if waitStatus.Signaled() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(int(waitStatus.Signal()) + 128) } } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 722750c0cca..c17dd6d37ea 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,5 +1,14 @@ # This file is is the configuration file for all tools -# that use the containers/storage library. +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] @@ -11,8 +20,14 @@ driver = "overlay" runroot = "/run/containers/storage" # Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" + # Storage path for rootless users # # rootless_storage_path = "$HOME/.local/share/containers/storage" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 00000000000..cc655c62e56 --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 169c7d1513e..30d3e8715ee 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -31,6 +31,14 @@ import ( "github.com/pkg/errors" ) +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + var ( stores []*store storesLock sync.Mutex @@ -368,8 +376,17 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + // ListImageBigData retrieves a list of the (possibly large) chunks of // named data associated with an image. ListImageBigData(id string) ([]string, error) @@ -575,10 +592,11 @@ type ContainerOptions struct { // container's layer will inherit settings from the image's top layer // or, if it is not being created based on an image, the Store object. types.IDMappingOptions - LabelOpts []string - Flags map[string]interface{} - MountOpts []string - Volatile bool + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string } type store struct { @@ -646,17 +664,21 @@ func GetStore(options types.StoreOptions) (Store, error) { storesLock.Lock() defer storesLock.Unlock() + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { return s, nil } } - if options.GraphRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") - } - if options.RunRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -1384,7 +1406,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags["MountLabel"] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) if err != nil { return nil, err } @@ -1608,7 +1630,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { } } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id) + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) } return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) } @@ -2045,7 +2067,20 @@ func dedupeNames(names []string) []string { return deduped } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) rlstore, err := s.LayerStore() @@ -2058,7 +2093,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } ristore, err := s.ImageStore() @@ -2071,7 +2115,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if ristore.Exists(id) { - return ristore.SetNames(id, deduped) + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } // Check is id refers to a RO Store @@ -2109,7 +2162,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } return ErrLayerUnknown } @@ -2370,22 +2432,16 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err != nil { return nil, err } - childrenByParent := make(map[string]*[]string) + childrenByParent := make(map[string][]string) for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) - } + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) } - otherImagesByTopLayer := make(map[string]string) + otherImagesTopLayers := make(map[string]struct{}) for _, img := range images { if img.ID != id { - otherImagesByTopLayer[img.TopLayer] = img.ID + otherImagesTopLayers[img.TopLayer] = struct{}{} for _, layerID := range img.MappedTopLayers { - otherImagesByTopLayer[layerID] = img.ID + otherImagesTopLayers[layerID] = struct{}{} } } } @@ -2395,43 +2451,44 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } layer := image.TopLayer - lastRemoved := "" + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } for layer != "" { if rcstore.Exists(layer) { break } - if _, ok := otherImagesByTopLayer[layer]; ok { + if _, used := otherImagesTopLayers[layer]; used { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - hasOtherRefs := func() bool { + hasChildrenNotBeingRemoved := func() bool { layersToCheck := []string{layer} if layer == image.TopLayer { layersToCheck = append(layersToCheck, image.MappedTopLayers...) } for _, layer := range layersToCheck { - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - return true + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue } + return true } } } return false } - if hasOtherRefs() { + if hasChildrenNotBeingRemoved() { break } - lastRemoved = layer - if layer == image.TopLayer { - layersToRemove = append(layersToRemove, image.MappedTopLayers...) - } - layersToRemove = append(layersToRemove, lastRemoved) + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} layer = parent } } else { @@ -2499,23 +2556,29 @@ func (s *store) DeleteContainer(id string) error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) wg.Add(1) go func() { - var err error - for attempts := 0; attempts < 50; attempts++ { - err = os.RemoveAll(gcpath) - if err == nil || !system.IsEBUSY(err) { - break - } - time.Sleep(time.Millisecond * 100) + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return } - errChan <- err - wg.Done() + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) }() rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) wg.Add(1) go func() { - errChan <- os.RemoveAll(rcpath) - wg.Done() + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) }() go func() { @@ -2524,17 +2587,12 @@ func (s *store) DeleteContainer(id string) error { }() var errors []error - for { - select { - case err, ok := <-errChan: - if !ok { - return multierror.Append(nil, errors...).ErrorOrNil() - } - if err != nil { - errors = append(errors, err) - } + for err := range errChan { + if err != nil { + errors = append(errors, err) } } + return multierror.Append(nil, errors...).ErrorOrNil() } } return ErrNotAContainer @@ -2830,10 +2888,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if err != nil { return nil, err } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s store.RLock() if err := store.ReloadIfChanged(); err != nil { + store.Unlock() return nil, err } if store.Exists(to) { diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index d920d12eb50..ad12ffdbf2d 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -55,4 +55,6 @@ var ( ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f9bf7e6b658..a71c6d2efd8 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -3,44 +3,73 @@ package types import ( "fmt" "os" - "os/exec" "path/filepath" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/storage/drivers/overlay" cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/idtools" "github.com/sirupsen/logrus" ) // TOML-friendly explicit tables used for conversions. -type tomlConfig struct { +type TomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - RootlessStoragePath string `toml:"rootless_storage_path"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false // DefaultStoreOptions is a reasonable default set of options. defaultStoreOptions StoreOptions ) +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" +) + func init() { - defaultStoreOptions.RunRoot = "/run/containers/storage" - defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -168,7 +197,6 @@ func isRootlessDriver(driver string) bool { // getRootlessStorageOpts returns the storage opts for containers running as non root func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - const overlayDriver = "overlay" dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) if err != nil { @@ -190,25 +218,16 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { opts.GraphDriverName = driver } - if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { - supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) - if err != nil { - return opts, err - } - if supported { - opts.GraphDriverName = overlayDriver - } else { - if path, err := exec.LookPath("fuse-overlayfs"); err == nil { - opts.GraphDriverName = overlayDriver - opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} - } - } - if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break } } } @@ -271,7 +290,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { - config := new(tomlConfig) + config := new(TomlConfig) meta, err := toml.DecodeFile(configFile, &config) if err == nil { @@ -286,7 +305,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } } - // Clear storeOptions of previos settings + // Clear storeOptions of previous settings *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver @@ -295,6 +314,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { config.Storage.Driver = os.Getenv("STORAGE_DRIVER") storeOptions.GraphDriverName = config.Storage.Driver } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } if storeOptions.GraphDriverName == "" { logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) } @@ -385,3 +408,39 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { func Options() StoreOptions { return defaultStoreOptions } + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 80d56041b06..37d4b79b01b 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error { } return nil } + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3c8..b6bca4cef11 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/go-events/.gitignore b/vendor/github.com/docker/go-events/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/docker/go-events/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000000..d813af779b0 --- /dev/null +++ b/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000000..6d630cf595e --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000000..e414d82e964 --- /dev/null +++ b/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000000..0acafc279a3 --- /dev/null +++ b/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000000..5120078dfb8 --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000000..802cf51ffec --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000000..56db7c25103 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000000..f0f1d9ea5ff --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000000..e6c0eb69dd5 --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000000..4bb770afc2e --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000000..b7f0a542252 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf2..d31b3781527 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index e8ff994f8bc..5b7cf781a34 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,49 @@ This package provides various compression algorithms. # changelog +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + * Jan 11, 2022 (v1.14.1) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) @@ -53,6 +96,9 @@ This package provides various compression algorithms. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -74,9 +120,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -135,7 +182,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -297,11 +344,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -323,6 +365,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0b2e54972cd..d55ea2a7759 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index fd49efd75b1..25f6d1108fc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,6 +59,9 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, @@ -78,10 +89,10 @@ func init() { for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -97,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { @@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size = reuseSize } - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } // Check if we get a reasonable size decrease. if storable && ssize <= size { @@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) // Store predefined, if we don't get a reasonable improvement. - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } if storable && ssize <= size { @@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] + c := lits[t] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] + c := lengths[lengthCode] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) + extraLength := int32(length - lengthBase[lengthCode]) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { @@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] @@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63) - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1002,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 histogram(input, w.literalFreq[:numLiterals], fill) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } w.literalFreq[endBlockMarker] = 1 w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) if fill { @@ -1019,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -1031,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -1065,6 +1108,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } // Unroll, write 3 codes/loop. // Fastest number of unrolls. for len(input) > 3 { @@ -1074,13 +1120,16 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 - nbytes += uint8(n) + nbytes += n } if nbytes >= bufferFlushSize { if w.err != nil { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } @@ -1096,13 +1145,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len - if debugDeflate { - count += int(c.len) - } if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1114,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + if debugDeflate { + count += int(c.len) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index f35e00261d3..9ab497c275b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -17,7 +17,8 @@ const ( // hcode is a huffman code with a bit code and bit length. type hcode struct { - code, len uint16 + code uint16 + len uint8 } type huffmanEncoder struct { @@ -56,7 +57,7 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { +func (h *hcode) set(code uint16, length uint8) { h.len = length h.code = code } @@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = hcode{code: reverseBits(bits, size), len: size} } return h } @@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)} code++ } list = list[0 : len(list)-int(bits)] @@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i].len = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d5f62f6a2ca..414c0bea9fa 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27925c..8d632cea0f5 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -88,10 +87,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -224,6 +228,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -248,10 +253,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -332,10 +338,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -468,6 +479,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -492,10 +504,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -576,10 +589,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -712,6 +730,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -736,10 +755,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -818,12 +1089,14 @@ readLiteral: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -956,6 +1232,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -978,12 +1255,14 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3968a..0f14f8d63b4 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +154,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389ab3..8603fbd55ad 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a5c..039639f8989 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,7 +5,7 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. @@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits ) if debugDeflate && e.cur < 0 { @@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -141,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -156,7 +166,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,30 +175,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 5 { + nextHash := hash4u(load3232(src, i), tableBits) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hash4u(uint32(x), tableBits) candidates := e.table[currHash] cv = uint32(x) e.table[currHash] = tableEntryPrev{ @@ -200,15 +211,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if cv == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + // Match at prev... + continue } } cv = uint32(x >> 8) diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b1e..1cbffa1aefe 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320b7..4b97576bd38 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec49..62888edf3cd 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e89912463..544162a4318 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index 3a9618ee193..d818790c132 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go new file mode 100644 index 00000000000..ff2c69d60cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/autogen.go @@ -0,0 +1,5 @@ +package huff0 + +//go:generate go run generate.go +//go:generate asmfmt -w decompress_amd64.s +//go:generate asmfmt -w decompress_8b_amd64.s diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868a..451160edda3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { return uint16(b.value >> ((64 - n) & 63)) } +// peekTopBits(n) is equvialent to peekBitFast(64 - n) +func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { + return uint16(b.value >> n) +} + func (b *bitReaderShifted) advance(n uint8) { b.bitsRead += n b.value <<= n & 63 @@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc05389..bc95ac623bd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1a7e5..04f6529955e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) @@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} } // Decompress1X will decompress a 1X encoded stream. @@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 for br.off >= 8 { @@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { bitsLeft -= nBits dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,195 +721,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -916,12 +764,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +790,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +799,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +808,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +817,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +835,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +844,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +853,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,25 +862,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1040,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +933,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +942,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1135,12 +999,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,104 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1257,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1166,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1175,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s new file mode 100644 index 00000000000..0d6cb1a962b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s @@ -0,0 +1,488 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // SECOND PART: + // val2 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 0+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // SECOND PART: + // val2 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 256+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // SECOND PART: + // val2 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 512+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // SECOND PART: + // val2 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 768+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in new file mode 100644 index 00000000000..6d477a2c11e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in @@ -0,0 +1,197 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // SECOND PART: + // val2 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..ce8e93bcd06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,181 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// that uses an asm implementation of its main loop. +package huff0 + +import ( + "errors" + "fmt" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +//go:noescape +func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +//go:noescape +func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + const debug = false + + // see: bitReaderShifted.peekBitsFast() + peekBits := uint8((64 - d.actualTableLog) & 63) + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + if use8BitTables { + off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } else { + off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } + if debug { + fmt.Print("DEBUG: ") + fmt.Printf("off=%d,", off) + for i := 0; i < 4; i++ { + fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}", + i, br[i].bitsRead, br[i].value, br[i].off) + } + fmt.Println("") + } + + if off != 0 { + break + } + + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..2edad3ea5a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,506 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in new file mode 100644 index 00000000000..330d86ae155 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in @@ -0,0 +1,195 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..126b4d68a94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,193 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb470..e8ad17ad08e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1e..beb7fa87201 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,62 +369,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17df634..d7cd15ba29d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -132,6 +133,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index dc587b2c949..b2bca330182 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,6 +43,9 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 @@ -76,16 +84,25 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 tmp [4]byte @@ -108,13 +125,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -132,11 +144,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -147,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -157,7 +175,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -168,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -192,85 +222,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -293,14 +252,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -310,30 +278,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -349,7 +305,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -360,7 +316,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -371,7 +327,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -381,7 +337,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -392,13 +348,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -406,7 +364,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -417,7 +375,6 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, litRegenSize) } else { b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } } } @@ -433,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -441,31 +398,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = maxCompressedBlockSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { b.literalBuf = make([]byte, 0, litRegenSize) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = maxCompressedBlockSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -474,27 +465,61 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -511,8 +536,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } - var seqs = &sequenceDecs{} + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -541,6 +574,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -548,34 +584,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -585,140 +623,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) return err } - if debugDecoder { - println("History merged ok") + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf85..b80191e4b1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b572..c65ea9795f3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,13 @@ package zstd import ( - "errors" + "bytes" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,33 +328,39 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. @@ -368,33 +395,170 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok } + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + return true } +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + d.frame.crc.Write(d.current.b) + if d.current.d.Last { + d.current.err = d.frame.checkCRC() + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +566,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +620,307 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 4 go routines. +// 0: Read frames and decode blocks. +// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. +// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. +// 3: Wait for stream history, execute sequences, send stream history. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqPrepare = make(chan *blockDec, d.o.concurrent) + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Prepare blocks... + go func() { + var hist history + var hasErr bool + for block := range seqPrepare { + if hasErr { + if block != nil { + seqDecode <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history") + } + hist.reset() + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + continue + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqDecode) + }() + + // Async 2: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) } - break + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} + seqExecute <- block + } + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 3: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) } - break decodeStream - default: } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + seqPrepare <- dec + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + seqPrepare <- dec + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqPrepare) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81f..fc52ebc403a 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -28,7 +28,10 @@ func (o *decoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), maxWindowSize: MaxWindowSize, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -54,7 +66,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 5f08a283023..f51ab529a0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b0..dcc987a7cb6 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 5f2e1d020ee..44d8dbd199a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c31..509d5cecea5 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,23 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { @@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + d.history.allocFrameBuffer = d.history.windowSize * 2 + // TODO: Maybe use FrameContent size } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { @@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) { + if !bytes.Equal(tmp[:], want) && !ignoreCRC { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -352,133 +317,28 @@ func (d *frameDec) checkCRC() error { return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -489,22 +349,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { var n int n, err = d.crc.Write(dst[crcStart:]) if err == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c31..fde4e6b6011 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go new file mode 100644 index 00000000000..7f2210e0530 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go @@ -0,0 +1,11 @@ +//go:build ignorecrc +// +build ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go new file mode 100644 index 00000000000..6811c68a893 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go @@ -0,0 +1,11 @@ +//go:build !ignorecrc +// +build !ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d251..28b40153cc2 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4cb69..e80139dd9c6 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,127 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + if true { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + } + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +272,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +297,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +350,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +382,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -291,9 +408,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + } + // Add final literals - s.out = append(s.out, s.literals...) - return nil + s.out = append(out, s.literals...) + return br.close() } // update states, at least 27 bits must be available. @@ -457,36 +579,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..4676b09cc18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,350 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + + ok := sequenceDecs_executeSimple_amd64(&ctx) + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..01cc23fa8a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,3519 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_56_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, R11 + JZ copy_1_word + MOVB (SI)(R14*1), R15 + MOVB R15, (BX)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, R11 + JZ copy_1_dword + MOVW (SI)(R14*1), R15 + MOVW R15, (BX)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, R11 + JZ copy_1_qword + MOVL (SI)(R14*1), R15 + MOVL R15, (BX)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, R11 + JZ copy_1_test + MOVQ (SI)(R14*1), R15 + MOVQ R15, (BX)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JGE copy_all_from_history + XORQ R11, R11 + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(R11*1), R12 + MOVB R12, (BX)(R11*1) + ADDQ $0x01, R11 + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(R11*1), R12 + MOVW R12, (BX)(R11*1) + ADDQ $0x02, R11 + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(R11*1), R12 + MOVL R12, (BX)(R11*1) + ADDQ $0x04, R11 + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(R11*1), R12 + MOVQ R12, (BX)(R11*1) + ADDQ $0x08, R11 + JMP copy_4_test + +copy_4: + MOVUPS (R14)(R11*1), X0 + MOVUPS X0, (BX)(R11*1) + ADDQ $0x10, R11 + +copy_4_test: + CMPQ R11, R13 + JB copy_4 + ADDQ R13, DI + ADDQ R13, BX + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, R11 + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (BX)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, R11 + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (BX)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, R11 + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (BX)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, R11 + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (BX)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (BX)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, R11 + JB copy_5 + ADDQ R11, BX + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + +copy_2: + MOVUPS (R11)(R12*1), X0 + MOVUPS X0, (BX)(R12*1) + ADDQ $0x10, R12 + CMPQ R12, R13 + JB copy_2 + ADDQ R13, BX + ADDQ R13, DI + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (R11)(R12*1), R14 + MOVB R14, (BX)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, BX + ADDQ R13, DI + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ CX, CX + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + ADDQ R13, R12 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ CX, CX + +copy_slow_3: + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + INCQ CX + CMPQ CX, R13 + JB copy_slow_3 + ADDQ R13, R10 + ADDQ R13, R12 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + ADDQ R13, R11 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, R9 + ADDQ R13, R11 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + ADDQ $0x01, CX + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (AX)(CX*1), R14 + MOVW R14, (R10)(CX*1) + ADDQ $0x02, CX + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (AX)(CX*1), R14 + MOVL R14, (R10)(CX*1) + ADDQ $0x04, CX + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (AX)(CX*1), R14 + MOVQ R14, (R10)(CX*1) + ADDQ $0x08, CX + JMP copy_2_test + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + +copy_2_test: + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + ADDQ R13, R12 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ CX, CX + +copy_slow_3: + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + INCQ CX + CMPQ CX, R13 + JB copy_slow_3 + ADDQ R13, R10 + ADDQ R13, R12 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + XORQ R12, R12 + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + ADDQ $0x01, R12 + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (CX)(R12*1), R14 + MOVW R14, (R9)(R12*1) + ADDQ $0x02, R12 + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (CX)(R12*1), R14 + MOVL R14, (R9)(R12*1) + ADDQ $0x04, R12 + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (CX)(R12*1), R14 + MOVQ R14, (R9)(R12*1) + ADDQ $0x08, R12 + JMP copy_2_test + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + +copy_2_test: + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + ADDQ R13, R11 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + XORQ R12, R12 + +copy_slow_3: + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + INCQ R12 + CMPQ R12, R13 + JB copy_slow_3 + ADDQ R13, R9 + ADDQ R13, R11 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..c3452bc3a9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120..b53f606a18a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -20,34 +20,49 @@ const ZipMethodPKWare = 20 var zipReaderPool sync.Pool -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +72,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009c..c1c90b4a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -39,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -52,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -75,6 +82,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index 45ddad236f3..242f82cc72a 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -4,7 +4,6 @@ package mountinfo import ( - "fmt" "os" "path/filepath" @@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) { func normalizePath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + return "", err } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + return "", err } if _, err := os.Stat(realPath); err != nil { - return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + return "", err } return realPath, nil } diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go index 30572ea87ff..6a6acfd9ab3 100644 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant { } func (v *GVariant) Ptr() unsafe.Pointer { + if v == nil { + return nil + } return v.ptr } diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.appveyor.yml rename to vendor/github.com/proglottis/gpgme/.appveyor.yml diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.gitignore rename to vendor/github.com/proglottis/gpgme/.gitignore diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.travis.yml rename to vendor/github.com/proglottis/gpgme/.travis.yml diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE similarity index 100% rename from vendor/github.com/mtrmac/gpgme/LICENSE rename to vendor/github.com/proglottis/gpgme/LICENSE diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md similarity index 100% rename from vendor/github.com/mtrmac/gpgme/README.md rename to vendor/github.com/proglottis/gpgme/README.md diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/callbacks.go rename to vendor/github.com/proglottis/gpgme/callbacks.go diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/data.go rename to vendor/github.com/proglottis/gpgme/data.go diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c similarity index 100% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.c rename to vendor/github.com/proglottis/gpgme/go_gpgme.c diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h similarity index 92% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.h rename to vendor/github.com/proglottis/gpgme/go_gpgme.h index d4826ab368e..eb3a4ba88b1 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -6,11 +6,6 @@ #include -/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */ -#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402 -typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */ -#endif - extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go similarity index 96% rename from vendor/github.com/mtrmac/gpgme/gpgme.go rename to vendor/github.com/proglottis/gpgme/gpgme.go index c19b9aebc5c..9833057a663 100644 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -53,13 +53,13 @@ const ( type PinEntryMode int -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) +const ( + PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT + PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK + PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL + PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR + PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +) type EncryptFlag uint @@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode { return res } -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// runtime.KeepAlive(c) -// return err -// } +func (c *Context) SetPinEntryMode(m PinEntryMode) error { + err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) + runtime.KeepAlive(c) + return err +} -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// runtime.KeepAlive(c) -// return res -// } +func (c *Context) PinEntryMode() PinEntryMode { + res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} func (c *Context) SetCallback(callback Callback) error { var err error diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info.go diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md new file mode 100644 index 00000000000..dea3e409e11 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -0,0 +1,29 @@ +# LICENSE + +Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go new file mode 100644 index 00000000000..d6f667378a2 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go @@ -0,0 +1,72 @@ +// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +var ( + hdrArchUnknown archType = [...]byte{'0', '0', '\x00'} + hdrArch386 archType = [...]byte{'0', '1', '\x00'} + hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'} + hdrArchARM archType = [...]byte{'0', '3', '\x00'} + hdrArchARM64 archType = [...]byte{'0', '4', '\x00'} + hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'} + hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'} + hdrArchMIPS archType = [...]byte{'0', '7', '\x00'} + hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'} + hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'} + hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'} + hdrArchS390x archType = [...]byte{'1', '1', '\x00'} + hdrArchRISCV64 archType = [...]byte{'1', '2', '\x00'} +) + +type archType [3]byte + +// getSIFArch returns the archType corresponding to go runtime arch. +func getSIFArch(arch string) archType { + archMap := map[string]archType{ + "386": hdrArch386, + "amd64": hdrArchAMD64, + "arm": hdrArchARM, + "arm64": hdrArchARM64, + "ppc64": hdrArchPPC64, + "ppc64le": hdrArchPPC64le, + "mips": hdrArchMIPS, + "mipsle": hdrArchMIPSle, + "mips64": hdrArchMIPS64, + "mips64le": hdrArchMIPS64le, + "s390x": hdrArchS390x, + "riscv64": hdrArchRISCV64, + } + + t, ok := archMap[arch] + if !ok { + return hdrArchUnknown + } + return t +} + +// GoArch returns the go runtime arch corresponding to t. +func (t archType) GoArch() string { + archMap := map[archType]string{ + hdrArch386: "386", + hdrArchAMD64: "amd64", + hdrArchARM: "arm", + hdrArchARM64: "arm64", + hdrArchPPC64: "ppc64", + hdrArchPPC64le: "ppc64le", + hdrArchMIPS: "mips", + hdrArchMIPSle: "mipsle", + hdrArchMIPS64: "mips64", + hdrArchMIPS64le: "mips64le", + hdrArchS390x: "s390x", + hdrArchRISCV64: "riscv64", + } + + arch, ok := archMap[t] + if !ok { + arch = "unknown" + } + return arch +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go new file mode 100644 index 00000000000..d706fb1a59c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go @@ -0,0 +1,103 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "io" +) + +// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The +// zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte + pos int64 +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +var errNegativeOffset = errors.New("negative offset") + +// ReadAt implements the io.ReaderAt interface. +func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errNegativeOffset + } + + if off >= int64(len(b.buf)) { + return 0, io.EOF + } + + n = copy(p, b.buf[off:]) + if n < len(p) { + err = io.EOF + } + return n, err +} + +var errNegativePosition = errors.New("negative position") + +// Write implements the io.Writer interface. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.pos < 0 { + return 0, errNegativePosition + } + + if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need { + b.buf = append(b.buf, make([]byte, need-have)...) + } + + n = copy(b.buf[b.pos:], p) + b.pos += int64(n) + return n, nil +} + +var errInvalidWhence = errors.New("invalid whence") + +// Seek implements the io.Seeker interface. +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = b.pos + offset + case io.SeekEnd: + abs = int64(len(b.buf)) + offset + default: + return 0, errInvalidWhence + } + + if abs < 0 { + return 0, errNegativePosition + } + + b.pos = abs + return abs, nil +} + +var errTruncateRange = errors.New("truncation out of range") + +// Truncate discards all but the first n bytes from the buffer. +func (b *Buffer) Truncate(n int64) error { + if n < 0 || n > int64(len(b.buf)) { + return errTruncateRange + } + + b.buf = b.buf[:n] + return nil +} + +// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer +// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate). +func (b *Buffer) Bytes() []byte { return b.buf } + +// Len returns the number of bytes in the buffer. +func (b *Buffer) Len() int64 { return int64(len(b.buf)) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go new file mode 100644 index 00000000000..e65bdb7476d --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -0,0 +1,680 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/google/uuid" +) + +// nextAligned finds the next offset that satisfies alignment. +func nextAligned(offset int64, alignment int) int64 { + align64 := uint64(alignment) + offset64 := uint64(offset) + + if align64 != 0 && offset64%align64 != 0 { + offset64 = (offset64 & ^(align64 - 1)) + align64 + } + + return int64(offset64) +} + +// writeDataObjectAt writes the data object described by di to ws, using time t, recording details +// in d. The object is written at the first position that satisfies the alignment requirements +// described by di following offsetUnaligned. +func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll + offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + if err != nil { + return err + } + + n, err := io.Copy(ws, di.r) + if err != nil { + return err + } + + if err := di.fillDescriptor(t, d); err != nil { + return err + } + d.Used = true + d.Offset = offset + d.Size = n + d.SizeWithPadding = offset - offsetUnaligned + n + + return nil +} + +var ( + errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") + errPrimaryPartition = errors.New("image already contains a primary partition") +) + +// writeDataObject writes the data object described by di to f, using time t, recording details in +// the descriptor at index i. +func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error { + if i >= len(f.rds) { + return errInsufficientCapacity + } + + // If this is a primary partition, verify there isn't another primary partition, and update the + // architecture in the global header. + if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys { + if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { + return errPrimaryPartition + } + + f.h.Arch = p.Arch + } + + d := &f.rds[i] + d.ID = uint32(i) + 1 + + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { + return err + } + + // Update minimum object ID map. + if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID { + f.minIDs[d.GroupID] = d.ID + } + + f.h.DescriptorsFree-- + f.h.DataSize += d.SizeWithPadding + + return nil +} + +// writeDescriptors writes the descriptors in f to backing storage. +func (f *FileImage) writeDescriptors() error { + if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.rds) +} + +// writeHeader writes the the global header in f to backing storage. +func (f *FileImage) writeHeader() error { + if _, err := f.rw.Seek(0, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.h) +} + +// createOpts accumulates container creation options. +type createOpts struct { + launchScript [hdrLaunchLen]byte + id uuid.UUID + descriptorsOffset int64 + descriptorCapacity int64 + dis []DescriptorInput + t time.Time + closeOnUnload bool +} + +// CreateOpt are used to specify container creation options. +type CreateOpt func(*createOpts) error + +var errLaunchScriptLen = errors.New("launch script too large") + +// OptCreateWithLaunchScript specifies s as the launch script. +func OptCreateWithLaunchScript(s string) CreateOpt { + return func(co *createOpts) error { + b := []byte(s) + + if len(b) >= len(co.launchScript) { + return errLaunchScriptLen + } + + copy(co.launchScript[:], b) + + return nil + } +} + +// OptCreateDeterministic sets header/descriptor fields to values that support deterministic +// creation of images. +func OptCreateDeterministic() CreateOpt { + return func(co *createOpts) error { + co.id = uuid.Nil + co.t = time.Time{} + return nil + } +} + +// OptCreateWithID specifies id as the unique ID. +func OptCreateWithID(id string) CreateOpt { + return func(co *createOpts) error { + id, err := uuid.Parse(id) + co.id = id + return err + } +} + +// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a +// maximum of n descriptors. +func OptCreateWithDescriptorCapacity(n int64) CreateOpt { + return func(co *createOpts) error { + co.descriptorCapacity = n + return nil + } +} + +// OptCreateWithDescriptors appends dis to the list of descriptors. +func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt { + return func(co *createOpts) error { + co.dis = append(co.dis, dis...) + return nil + } +} + +// OptCreateWithTime specifies t as the image creation time. +func OptCreateWithTime(t time.Time) CreateOpt { + return func(co *createOpts) error { + co.t = t + return nil + } +} + +// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptCreateWithCloseOnUnload(b bool) CreateOpt { + return func(co *createOpts) error { + co.closeOnUnload = b + return nil + } +} + +// createContainer creates a new SIF container file in rw, according to opts. +func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + rds := make([]rawDescriptor, co.descriptorCapacity) + rdsSize := int64(binary.Size(rds)) + + h := header{ + LaunchScript: co.launchScript, + Magic: hdrMagic, + Version: CurrentVersion.bytes(), + Arch: hdrArchUnknown, + ID: co.id, + CreatedAt: co.t.Unix(), + ModifiedAt: co.t.Unix(), + DescriptorsFree: co.descriptorCapacity, + DescriptorsTotal: co.descriptorCapacity, + DescriptorsOffset: co.descriptorsOffset, + DescriptorsSize: rdsSize, + DataOffset: co.descriptorsOffset + rdsSize, + } + + f := &FileImage{ + rw: rw, + h: h, + rds: rds, + minIDs: make(map[uint32]uint32), + } + + for i, di := range co.dis { + if err := f.writeDataObject(i, di, co.t); err != nil { + return nil, err + } + } + + if err := f.writeDescriptors(); err != nil { + return nil, err + } + + if err := f.writeHeader(); err != nil { + return nil, err + } + + return f, nil +} + +// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects +// can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptCreateWithCloseOnUnload. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + + co := createOpts{ + id: id, + descriptorsOffset: 4096, + descriptorCapacity: 48, + t: time.Now(), + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&co); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := createContainer(rw, co) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = co.closeOnUnload + return f, nil +} + +// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more +// data objects can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { + fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := CreateContainer(fp, opts...) + if err != nil { + fp.Close() + os.Remove(fp.Name()) + + return nil, err + } + + f.closeOnUnload = true + return f, nil +} + +func zeroData(fimg *FileImage, descr *rawDescriptor) error { + // first, move to data object offset + if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil { + return err + } + + var zero [4096]byte + n := descr.Size + upbound := int64(4096) + for { + if n < 4096 { + upbound = n + } + + if _, err := fimg.rw.Write(zero[:upbound]); err != nil { + return err + } + n -= 4096 + if n <= 0 { + break + } + } + + return nil +} + +func resetDescriptor(fimg *FileImage, index int) error { + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if fimg.rds[index].isPartitionOfType(PartPrimSys) { + fimg.h.Arch = hdrArchUnknown + } + + offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0])) + + // first, move to descriptor offset + if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil { + return err + } + + var emptyDesc rawDescriptor + return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc) +} + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time. To override this, consider +// using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// isLast return true if the data object associated with d is the last in f. +func (f *FileImage) isLast(d *rawDescriptor) bool { + isLast := true + + end := d.Offset + d.Size + f.WithDescriptors(func(d Descriptor) bool { + isLast = d.Offset()+d.Size() <= end + return !isLast + }) + + return isLast +} + +// truncateAt truncates f at the start of the padded data object described by d. +func (f *FileImage) truncateAt(d *rawDescriptor) error { + start := d.Offset + d.Size - d.SizeWithPadding + + if err := f.rw.Truncate(start); err != nil { + return err + } + + return nil +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +var errCompactNotImplemented = errors.New("compact not implemented for non-last object") + +// DeleteObject deletes the data object with id, according to opts. +// +// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following +// object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to time.Now(). To override this, consider using +// OptDeleteDeterministic or OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + do := deleteOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + d, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if do.compact && !f.isLast(d) { + return fmt.Errorf("%w", errCompactNotImplemented) + } + + if do.zero { + if err := zeroData(f, d); err != nil { + return fmt.Errorf("%w", err) + } + } + + if do.compact { + if err := f.truncateAt(d); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.DataSize -= d.SizeWithPadding + } + + f.h.DescriptorsFree++ + f.h.ModifiedAt = do.t.Unix() + + index := 0 + for i, od := range f.rds { + if od.ID == id { + index = i + break + } + } + + if err := resetDescriptor(f, index); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to time.Now(). To override this, +// consider using OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + fs, pt, arch, err := descr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if pt == PartPrimSys { + return nil + } + + if pt != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + f.h.Arch = getSIFArch(arch) + + extra := partition{ + Fstype: fs, + Parttype: PartPrimSys, + } + copy(extra.Arch[:], arch) + + if err := descr.setExtra(extra); err != nil { + return fmt.Errorf("%w", err) + } + + if olddescr != nil { + oldfs, _, oldarch, err := olddescr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + oldextra := partition{ + Fstype: oldfs, + Parttype: PartSystem, + Arch: getSIFArch(oldarch), + } + + if err := olddescr.setExtra(oldextra); err != nil { + return fmt.Errorf("%w", err) + } + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go new file mode 100644 index 00000000000..da7a6a7c7be --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -0,0 +1,267 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "bytes" + "crypto" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" + "time" +) + +// rawDescriptor represents an on-disk object descriptor. +type rawDescriptor struct { + DataType DataType + Used bool + ID uint32 + GroupID uint32 + LinkedID uint32 + Offset int64 + Size int64 + SizeWithPadding int64 + + CreatedAt int64 + ModifiedAt int64 + UID int64 // Deprecated: UID exists for historical compatibility and should not be used. + GID int64 // Deprecated: GID exists for historical compatibility and should not be used. + Name [descrNameLen]byte + Extra [descrMaxPrivLen]byte +} + +// partition represents the SIF partition data object descriptor. +type partition struct { + Fstype FSType + Parttype PartType + Arch archType +} + +// signature represents the SIF signature data object descriptor. +type signature struct { + Hashtype hashType + Entity [descrEntityLen]byte +} + +// cryptoMessage represents the SIF crypto message object descriptor. +type cryptoMessage struct { + Formattype FormatType + Messagetype MessageType +} + +var errNameTooLarge = errors.New("name value too large") + +// setName encodes name into the name field of d. +func (d *rawDescriptor) setName(name string) error { + if len(name) > len(d.Name) { + return errNameTooLarge + } + + for i := copy(d.Name[:], name); i < len(d.Name); i++ { + d.Name[i] = 0 + } + + return nil +} + +var errExtraTooLarge = errors.New("extra value too large") + +// setExtra encodes v into the extra field of d. +func (d *rawDescriptor) setExtra(v interface{}) error { + if v == nil { + return nil + } + + if binary.Size(v) > len(d.Extra) { + return errExtraTooLarge + } + + b := new(bytes.Buffer) + if err := binary.Write(b, binary.LittleEndian, v); err != nil { + return err + } + + for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ { + d.Extra[i] = 0 + } + + return nil +} + +// getPartitionMetadata gets metadata for a partition data object. +func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + if got, want := d.DataType, DataPartition; got != want { + return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}} + } + + var p partition + + b := bytes.NewReader(d.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &p); err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + + return p.Fstype, p.Parttype, p.Arch.GoArch(), nil +} + +// isPartitionOfType returns true if d is a partition data object of type pt. +func (d rawDescriptor) isPartitionOfType(pt PartType) bool { + _, t, _, err := d.getPartitionMetadata() + if err != nil { + return false + } + return t == pt +} + +// Descriptor represents the SIF descriptor type. +type Descriptor struct { + r io.ReaderAt // Backing storage. + + raw rawDescriptor // Raw descriptor from image. + + relativeID uint32 // ID relative to minimum ID of object group. +} + +// DataType returns the type of data object. +func (d Descriptor) DataType() DataType { return d.raw.DataType } + +// ID returns the data object ID of d. +func (d Descriptor) ID() uint32 { return d.raw.ID } + +// GroupID returns the data object group ID of d, or zero if d is not part of a data object +// group. +func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask } + +// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked +// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a +// data object ID. +func (d Descriptor) LinkedID() (id uint32, isGroup bool) { + return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask +} + +// Offset returns the offset of the data object. +func (d Descriptor) Offset() int64 { return d.raw.Offset } + +// Size returns the data object size. +func (d Descriptor) Size() int64 { return d.raw.Size } + +// CreatedAt returns the creation time of the data object. +func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) } + +// ModifiedAt returns the modification time of the data object. +func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) } + +// Name returns the name of the data object. +func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } + +// PartitionMetadata gets metadata for a partition data object. +func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + return d.raw.getPartitionMetadata() +} + +var errHashUnsupported = errors.New("hash algorithm unsupported") + +// getHashType converts ht into a crypto.Hash. +func getHashType(ht hashType) (crypto.Hash, error) { + switch ht { + case hashSHA256: + return crypto.SHA256, nil + case hashSHA384: + return crypto.SHA384, nil + case hashSHA512: + return crypto.SHA512, nil + case hashBLAKE2S: + return crypto.BLAKE2s_256, nil + case hashBLAKE2B: + return crypto.BLAKE2b_256, nil + } + return 0, errHashUnsupported +} + +// SignatureMetadata gets metadata for a signature data object. +func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { + if got, want := d.raw.DataType, DataSignature; got != want { + return ht, fp, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s signature + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + if ht, err = getHashType(s.Hashtype); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + fp = make([]byte, 20) + copy(fp, s.Entity[:]) + + return ht, fp, nil +} + +// CryptoMessageMetadata gets metadata for a crypto message data object. +func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { + if got, want := d.raw.DataType, DataCryptoMessage; got != want { + return 0, 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var m cryptoMessage + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &m); err != nil { + return 0, 0, fmt.Errorf("%w", err) + } + + return m.Formattype, m.Messagetype, nil +} + +// GetData returns the data object associated with descriptor d. +func (d Descriptor) GetData() ([]byte, error) { + b := make([]byte, d.raw.Size) + if _, err := io.ReadFull(d.GetReader(), b); err != nil { + return nil, err + } + return b, nil +} + +// GetReader returns a io.Reader that reads the data object associated with descriptor d. +func (d Descriptor) GetReader() io.Reader { + return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size) +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d. +func (d Descriptor) GetIntegrityReader() io.Reader { + fields := []interface{}{ + d.raw.DataType, + d.raw.Used, + d.relativeID, + d.raw.LinkedID, + d.raw.Size, + d.raw.CreatedAt, + d.raw.UID, + d.raw.GID, + } + + // Encode endian-sensitive fields. + data := bytes.Buffer{} + for _, f := range fields { + if err := binary.Write(&data, binary.LittleEndian, f); err != nil { + panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error. + } + } + + return io.MultiReader( + &data, + bytes.NewReader(d.raw.Name[:]), + bytes.NewReader(d.raw.Extra[:]), + ) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go new file mode 100644 index 00000000000..c55cf51f9de --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -0,0 +1,300 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "crypto" + "errors" + "fmt" + "io" + "os" + "time" +) + +// descriptorOpts accumulates data object options. +type descriptorOpts struct { + groupID uint32 + linkID uint32 + alignment int + name string + extra interface{} + t time.Time +} + +// DescriptorInputOpt are used to specify data object options. +type DescriptorInputOpt func(DataType, *descriptorOpts) error + +// OptNoGroup specifies the data object is not contained within a data object group. +func OptNoGroup() DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.groupID = 0 + return nil + } +} + +// OptGroupID specifies groupID as data object group ID. +func OptGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.groupID = groupID + return nil + } +} + +// OptLinkedID specifies that the data object is linked to the data object with the specified ID. +func OptLinkedID(id uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if id == 0 { + return ErrInvalidObjectID + } + opts.linkID = id + return nil + } +} + +// OptLinkedGroupID specifies that the data object is linked to the data object group with the +// specified groupID. +func OptLinkedGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.linkID = groupID | descrGroupMask + return nil + } +} + +// OptObjectAlignment specifies n as the data alignment requirement. +func OptObjectAlignment(n int) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.alignment = n + return nil + } +} + +// OptObjectName specifies name as the data object name. +func OptObjectName(name string) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.name = name + return nil + } +} + +// OptObjectTime specifies t as the data object creation time. +func OptObjectTime(t time.Time) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.t = t + return nil + } +} + +type unexpectedDataTypeError struct { + got DataType + want []DataType +} + +func (e *unexpectedDataTypeError) Error() string { + return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want) +} + +func (e *unexpectedDataTypeError) Is(target error) bool { + //nolint:errorlint // don't compare wrapped errors in Is() + t, ok := target.(*unexpectedDataTypeError) + if !ok { + return false + } + + if len(t.want) > 0 { + // Use a map to check that the "want" errors in e and t contain the same values, ignoring + // any ordering differences. + acc := make(map[DataType]int, len(t.want)) + + // Increment counter for each data type in e. + for _, dt := range e.want { + if _, ok := acc[dt]; !ok { + acc[dt] = 0 + } + acc[dt]++ + } + + // Decrement counter for each data type in e. + for _, dt := range t.want { + if _, ok := acc[dt]; !ok { + return false + } + acc[dt]-- + } + + // If the "want" errors in e and t are equivalent, all counters should be zero. + for _, n := range acc { + if n != 0 { + return false + } + } + } + + return (e.got == t.got || t.got == 0) +} + +// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set +// to ft, and the message type is set to mt. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataCryptoMessage; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + m := cryptoMessage{ + Formattype: ft, + Messagetype: mt, + } + + opts.extra = m + return nil + } +} + +var errUnknownArchitcture = errors.New("unknown architecture") + +// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to +// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch +// should be the architecture as represented by the Go runtime. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataPartition; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + sifarch := getSIFArch(arch) + if sifarch == hdrArchUnknown { + return fmt.Errorf("%w: %v", errUnknownArchitcture, arch) + } + + p := partition{ + Fstype: fs, + Parttype: pt, + Arch: sifarch, + } + + opts.extra = p + return nil + } +} + +// sifHashType converts h into a HashType. +func sifHashType(h crypto.Hash) hashType { + switch h { + case crypto.SHA256: + return hashSHA256 + case crypto.SHA384: + return hashSHA384 + case crypto.SHA512: + return hashSHA512 + case crypto.BLAKE2s_256: + return hashBLAKE2S + case crypto.BLAKE2b_256: + return hashBLAKE2B + } + return 0 +} + +// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and +// the signing entity fingerprint is set to fp. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSignature; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := signature{ + Hashtype: sifHashType(ht), + } + copy(s.Entity[:], fp) + + opts.extra = s + return nil + } +} + +// DescriptorInput describes a new data object. +type DescriptorInput struct { + dt DataType + r io.Reader + opts descriptorOpts +} + +// DefaultObjectGroup is the default group that data objects are placed in. +const DefaultObjectGroup = 1 + +// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents +// read from r, configured according to opts. +// +// It is possible (and often necessary) to store additional metadata related to certain types of +// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, +// and OptSignatureMetadata for this purpose. +// +// By default, the data object will be placed in the default data object group (1). To override +// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or +// OptLinkedGroupID. +// +// By default, the data object will be aligned according to the system's memory page size. To +// override this behavior, consider using OptObjectAlignment. +// +// By default, no name is set for data object. To set a name, use OptObjectName. +// +// When creating a new image, data object creation/modification times are set to the image creation +// time. When modifying an existing image, the data object creation/modification time is set to the +// image modification time. To override this behavior, consider using OptObjectTime. +func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) { + dopts := descriptorOpts{ + groupID: DefaultObjectGroup, + alignment: os.Getpagesize(), + } + + for _, opt := range opts { + if err := opt(t, &dopts); err != nil { + return DescriptorInput{}, fmt.Errorf("%w", err) + } + } + + di := DescriptorInput{ + dt: t, + r: r, + opts: dopts, + } + + return di, nil +} + +// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t. +func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { + d.DataType = di.dt + d.GroupID = di.opts.groupID | descrGroupMask + d.LinkedID = di.opts.linkID + + if !di.opts.t.IsZero() { + t = di.opts.t + } + d.CreatedAt = t.Unix() + d.ModifiedAt = t.Unix() + + d.UID = 0 + d.GID = 0 + + if err := d.setName(di.opts.name); err != nil { + return err + } + + return d.setExtra(di.opts.extra) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go new file mode 100644 index 00000000000..75266e1945c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go @@ -0,0 +1,174 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" +) + +var ( + errInvalidMagic = errors.New("invalid SIF magic") + errIncompatibleVersion = errors.New("incompatible SIF version") +) + +// isValidSif looks at key fields from the global header to assess SIF validity. +func isValidSif(f *FileImage) error { + if f.h.Magic != hdrMagic { + return errInvalidMagic + } + + if f.h.Version != CurrentVersion.bytes() { + return errIncompatibleVersion + } + + return nil +} + +// populateMinIDs populates the minIDs field of f. +func (f *FileImage) populateMinIDs() { + f.minIDs = make(map[uint32]uint32) + f.WithDescriptors(func(d Descriptor) bool { + if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID { + f.minIDs[d.raw.GroupID] = d.ID() + } + return false + }) +} + +// loadContainer loads a SIF image from rw. +func loadContainer(rw ReadWriter) (*FileImage, error) { + f := FileImage{rw: rw} + + // Read global header. + err := binary.Read( + io.NewSectionReader(rw, 0, int64(binary.Size(f.h))), + binary.LittleEndian, + &f.h, + ) + if err != nil { + return nil, fmt.Errorf("reading global header: %w", err) + } + + if err := isValidSif(&f); err != nil { + return nil, err + } + + // Read descriptors. + f.rds = make([]rawDescriptor, f.h.DescriptorsTotal) + err = binary.Read( + io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize), + binary.LittleEndian, + &f.rds, + ) + if err != nil { + return nil, fmt.Errorf("reading descriptors: %w", err) + } + + f.populateMinIDs() + + return &f, nil +} + +// loadOpts accumulates container loading options. +type loadOpts struct { + flag int + closeOnUnload bool +} + +// LoadOpt are used to specify container loading options. +type LoadOpt func(*loadOpts) error + +// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file. +func OptLoadWithFlag(flag int) LoadOpt { + return func(lo *loadOpts) error { + lo.flag = flag + return nil + } +} + +// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptLoadWithCloseOnUnload(b bool) LoadOpt { + return func(lo *loadOpts) error { + lo.closeOnUnload = b + return nil + } +} + +// LoadContainerFromPath loads a new SIF container from path, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the file is opened for read and write access. To change this behavior, consider +// using OptLoadWithFlag. +func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + flag: os.O_RDWR, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + fp, err := os.OpenFile(path, lo.flag, 0) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := loadContainer(fp) + if err != nil { + fp.Close() + + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = true + return f, nil +} + +// LoadContainer loads a new SIF container from rw, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptLoadWithCloseOnUnload. +func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := loadContainer(rw) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = lo.closeOnUnload + return f, nil +} + +// UnloadContainer unloads f, releasing associated resources. +func (f *FileImage) UnloadContainer() error { + if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload { + if err := c.Close(); err != nil { + return fmt.Errorf("%w", err) + } + } + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go new file mode 100644 index 00000000000..635d6e89cfb --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -0,0 +1,210 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "fmt" +) + +// ErrNoObjects is the error returned when an image contains no data objects. +var ErrNoObjects = errors.New("no objects in image") + +// ErrObjectNotFound is the error returned when a data object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// ErrMultipleObjectsFound is the error returned when multiple data objects are found. +var ErrMultipleObjectsFound = errors.New("multiple objects found") + +// ErrInvalidObjectID is the error returned when an invalid object ID is supplied. +var ErrInvalidObjectID = errors.New("invalid object ID") + +// ErrInvalidGroupID is the error returned when an invalid group ID is supplied. +var ErrInvalidGroupID = errors.New("invalid group ID") + +// DescriptorSelectorFunc returns true if d matches, and false otherwise. +type DescriptorSelectorFunc func(d Descriptor) (bool, error) + +// WithDataType selects descriptors that have data type dt. +func WithDataType(dt DataType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.DataType() == dt, nil + } +} + +// WithID selects descriptors with a matching ID. +func WithID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + return d.ID() == id, nil + } +} + +// WithNoGroup selects descriptors that are not contained within an object group. +func WithNoGroup() DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.GroupID() == 0, nil + } +} + +// WithGroupID returns a selector func that selects descriptors with a matching groupID. +func WithGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + return d.GroupID() == groupID, nil + } +} + +// WithLinkedID selects descriptors that are linked to the data object with specified ID. +func WithLinkedID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + linkedID, isGroup := d.LinkedID() + return !isGroup && linkedID == id, nil + } +} + +// WithLinkedGroupID selects descriptors that are linked to the data object group with specified +// ID. +func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + linkedID, isGroup := d.LinkedID() + return isGroup && linkedID == groupID, nil + } +} + +// WithPartitionType selects descriptors containing a partition of type pt. +func WithPartitionType(pt PartType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.raw.isPartitionOfType(pt), nil + } +} + +// descriptorFromRaw populates a Descriptor from rd. +func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { + return Descriptor{ + raw: *rd, + r: f.rw, + relativeID: rd.ID - f.minIDs[rd.GroupID], + } +} + +// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true. +// If the image contains no data objects, an error wrapping ErrNoObjects is returned. +func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return nil, fmt.Errorf("%w", ErrNoObjects) + } + + var ds []Descriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error { + ds = append(ds, f.descriptorFromRaw(d)) + return nil + }) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return ds, nil +} + +// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is +// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns, +// ErrMultipleObjectsFound is returned. +func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) { + var d *rawDescriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error { + if d != nil { + return ErrMultipleObjectsFound + } + d = found + return nil + }) + + if err == nil && d == nil { + err = ErrObjectNotFound + } + + return d, err +} + +// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data +// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an +// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an +// error wrapping ErrMultipleObjectsFound is returned. +func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return Descriptor{}, fmt.Errorf("%w", ErrNoObjects) + } + + d, err := f.getDescriptor(fns...) + if err != nil { + return Descriptor{}, fmt.Errorf("%w", err) + } + + return f.descriptorFromRaw(d), nil +} + +// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns +// select the descriptor. +func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + for _, fn := range fns { + if ok, err := fn(d); !ok || err != nil { + return ok, err + } + } + return true, nil + } +} + +// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns +// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is +// returned to the caller. +func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + for i, d := range f.rds { + if !d.Used { + continue + } + + if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil { + return err + } else if !ok { + continue + } + + if err := onMatchFn(&f.rds[i]); err != nil { + return err + } + } + + return nil +} + +var errAbort = errors.New("abort") + +// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can +// be used as a no-op matchFn. +func abortOnMatch(*rawDescriptor) error { return errAbort } + +// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true. +func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) { + selectFn := func(d Descriptor) (bool, error) { + return fn(d), nil + } + _ = f.withDescriptors(selectFn, abortOnMatch) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go new file mode 100644 index 00000000000..704acee4a0e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -0,0 +1,364 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +// Package sif implements data structures and routines to create +// and access SIF files. +// +// Layout of a SIF file (example): +// +// .================================================. +// | GLOBAL HEADER: Sifheader | +// | - launch: "#!/usr/bin/env..." | +// | - magic: "SIF_MAGIC" | +// | - version: "1" | +// | - arch: "4" | +// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e | +// | - ctime: 1504657553 | +// | - mtime: 1504657653 | +// | - ndescr: 3 | +// | - descroff: 120 | --. +// | - descrlen: 432 | | +// | - dataoff: 4096 | | +// | - datalen: 619362 | | +// |------------------------------------------------| <-' +// | DESCR[0]: Sifdeffile | +// | - Sifcommon | +// | - datatype: DATA_DEFFILE | +// | - id: 1 | +// | - groupid: 1 | +// | - link: NONE | +// | - fileoff: 4096 | --. +// | - filelen: 222 | | +// |------------------------------------------------| <-----. +// | DESCR[1]: Sifpartition | | | +// | - Sifcommon | | | +// | - datatype: DATA_PARTITION | | | +// | - id: 2 | | | +// | - groupid: 1 | | | +// | - link: NONE | | | +// | - fileoff: 4318 | ----. | +// | - filelen: 618496 | | | | +// | - fstype: Squashfs | | | | +// | - parttype: System | | | | +// | - content: Linux | | | | +// |------------------------------------------------| | | | +// | DESCR[2]: Sifsignature | | | | +// | - Sifcommon | | | | +// | - datatype: DATA_SIGNATURE | | | | +// | - id: 3 | | | | +// | - groupid: NONE | | | | +// | - link: 2 | ------' +// | - fileoff: 622814 | ------. +// | - filelen: 644 | | | | +// | - hashtype: SHA384 | | | | +// | - entity: @ | | | | +// |------------------------------------------------| <-' | | +// | Definition file data | | | +// | . | | | +// | . | | | +// | . | | | +// |------------------------------------------------| <---' | +// | File system partition image | | +// | . | | +// | . | | +// | . | | +// |------------------------------------------------| <-----' +// | Signed verification data | +// | . | +// | . | +// | . | +// `================================================' +// +package sif + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/google/uuid" +) + +// SIF header constants and quantities. +const ( + hdrLaunchLen = 32 // len("#!/usr/bin/env... ") + hdrMagicLen = 10 // len("SIF_MAGIC") + hdrVersionLen = 3 // len("99") +) + +var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'} + +// SpecVersion specifies a SIF specification version. +type SpecVersion uint8 + +func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) } + +// bytes returns the value of b, formatted for direct inclusion in a SIF header. +func (v SpecVersion) bytes() [hdrVersionLen]byte { + var b [3]byte + copy(b[:], fmt.Sprintf("%02d", v)) + return b +} + +// SIF specification versions. +const ( + version01 SpecVersion = iota + 1 +) + +// CurrentVersion specifies the current SIF specification version. +const CurrentVersion = version01 + +const ( + descrGroupMask = 0xf0000000 // groups start at that offset + descrEntityLen = 256 // len("Joe Bloe ...") + descrNameLen = 128 // descriptor name (string identifier) + descrMaxPrivLen = 384 // size reserved for descriptor specific data +) + +// DataType represents the different SIF data object types stored in the image. +type DataType int32 + +// List of supported SIF data types. +const ( + DataDeffile DataType = iota + 0x4001 // definition file data object + DataEnvVar // environment variables data object + DataLabels // JSON labels data object + DataPartition // file system data object + DataSignature // signing/verification data object + DataGenericJSON // generic JSON meta-data + DataGeneric // generic / raw data + DataCryptoMessage // cryptographic message data object +) + +// String returns a human-readable representation of t. +func (t DataType) String() string { + switch t { + case DataDeffile: + return "Def.FILE" + case DataEnvVar: + return "Env.Vars" + case DataLabels: + return "JSON.Labels" + case DataPartition: + return "FS" + case DataSignature: + return "Signature" + case DataGenericJSON: + return "JSON.Generic" + case DataGeneric: + return "Generic/Raw" + case DataCryptoMessage: + return "Cryptographic Message" + } + return "Unknown" +} + +// FSType represents the different SIF file system types found in partition data objects. +type FSType int32 + +// List of supported file systems. +const ( + FsSquash FSType = iota + 1 // Squashfs file system, RDONLY + FsExt3 // EXT3 file system, RDWR (deprecated) + FsImmuObj // immutable data object archive + FsRaw // raw data + FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY +) + +// String returns a human-readable representation of t. +func (t FSType) String() string { + switch t { + case FsSquash: + return "Squashfs" + case FsExt3: + return "Ext3" + case FsImmuObj: + return "Archive" + case FsRaw: + return "Raw" + case FsEncryptedSquashfs: + return "Encrypted squashfs" + } + return "Unknown" +} + +// PartType represents the different SIF container partition types (system and data). +type PartType int32 + +// List of supported partition types. +const ( + PartSystem PartType = iota + 1 // partition hosts an operating system + PartPrimSys // partition hosts the primary operating system + PartData // partition hosts data only + PartOverlay // partition hosts an overlay +) + +// String returns a human-readable representation of t. +func (t PartType) String() string { + switch t { + case PartSystem: + return "System" + case PartPrimSys: + return "*System" + case PartData: + return "Data" + case PartOverlay: + return "Overlay" + } + return "Unknown" +} + +// hashType represents the different SIF hashing function types used to fingerprint data objects. +type hashType int32 + +// List of supported hash functions. +const ( + hashSHA256 hashType = iota + 1 + hashSHA384 + hashSHA512 + hashBLAKE2S + hashBLAKE2B +) + +// FormatType represents the different formats used to store cryptographic message objects. +type FormatType int32 + +// List of supported cryptographic message formats. +const ( + FormatOpenPGP FormatType = iota + 1 + FormatPEM +) + +// String returns a human-readable representation of t. +func (t FormatType) String() string { + switch t { + case FormatOpenPGP: + return "OpenPGP" + case FormatPEM: + return "PEM" + } + return "Unknown" +} + +// MessageType represents the different messages stored within cryptographic message objects. +type MessageType int32 + +// List of supported cryptographic message formats. +const ( + // openPGP formatted messages. + MessageClearSignature MessageType = 0x100 + + // PEM formatted messages. + MessageRSAOAEP MessageType = 0x200 +) + +// String returns a human-readable representation of t. +func (t MessageType) String() string { + switch t { + case MessageClearSignature: + return "Clear Signature" + case MessageRSAOAEP: + return "RSA-OAEP" + } + return "Unknown" +} + +// header describes a loaded SIF file. +type header struct { + LaunchScript [hdrLaunchLen]byte + + Magic [hdrMagicLen]byte + Version [hdrVersionLen]byte + Arch archType + ID uuid.UUID + + CreatedAt int64 + ModifiedAt int64 + + DescriptorsFree int64 + DescriptorsTotal int64 + DescriptorsOffset int64 + DescriptorsSize int64 + DataOffset int64 + DataSize int64 +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h. +func (h header) GetIntegrityReader() io.Reader { + return io.MultiReader( + bytes.NewReader(h.LaunchScript[:]), + bytes.NewReader(h.Magic[:]), + bytes.NewReader(h.Version[:]), + bytes.NewReader(h.ID[:]), + ) +} + +// ReadWriter describes the interface required to read and write SIF images. +type ReadWriter interface { + io.ReaderAt + io.WriteSeeker + Truncate(int64) error +} + +// FileImage describes the representation of a SIF file in memory. +type FileImage struct { + rw ReadWriter // Backing storage for image. + + h header // Raw global header from image. + rds []rawDescriptor // Raw descriptors from image. + + closeOnUnload bool // Close rw on Unload. + minIDs map[uint32]uint32 // Minimum object IDs for each group ID. +} + +// LaunchScript returns the image launch script. +func (f *FileImage) LaunchScript() string { + return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00")) +} + +// Version returns the SIF specification version of the image. +func (f *FileImage) Version() string { + return string(bytes.TrimRight(f.h.Version[:], "\x00")) +} + +// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU +// architecture cannot be determined. +func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() } + +// ID returns the ID of the image. +func (f *FileImage) ID() string { return f.h.ID.String() } + +// CreatedAt returns the creation time of the image. +func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) } + +// ModifiedAt returns the last modification time of the image. +func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) } + +// DescriptorsFree returns the number of free descriptors in the image. +func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree } + +// DescriptorsTotal returns the total number of descriptors in the image. +func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal } + +// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset } + +// DescriptorsSize returns the size (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize } + +// DataOffset returns the offset (in bytes) of the data section in the image. +func (f *FileImage) DataOffset() int64 { return f.h.DataOffset } + +// DataSize returns the size (in bytes) of the data section in the image. +func (f *FileImage) DataSize() int64 { return f.h.DataSize } + +// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the +// header of the image. +func (f *FileImage) GetHeaderIntegrityReader() io.Reader { + return f.h.GetIntegrityReader() +} diff --git a/vendor/github.com/vbauerster/mpb/v7/.travis.yml b/vendor/github.com/vbauerster/mpb/v7/.travis.yml deleted file mode 100644 index 9a203a67d2a..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le - -go: - - 1.14.x - -script: - - go test -race ./... - - for i in _examples/*/; do go build $i/*.go || exit 1; done diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md index 90d4fe639ca..413f9e1dbf0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v7/README.md @@ -1,8 +1,7 @@ # Multi Progress Bar [![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v7) -[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) -[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) +[![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) [![Donate with PayPal](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/vbauerster) **mpb** is a Go lib for rendering progress bars in terminal applications. @@ -37,10 +36,10 @@ func main() { total := 100 name := "Single Bar:" - // adding a single bar, which will inherit container's width - bar := p.Add(int64(total), - // progress bar filler with customized style - mpb.NewBarFiller(mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟")), + // create a single bar, which will inherit container's width + bar := p.New(int64(total), + // BarFillerBuilder with custom style + mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), @@ -66,7 +65,7 @@ func main() { ```go var wg sync.WaitGroup - // passed &wg will be accounted at p.Wait() call + // passed wg will be accounted at p.Wait() call p := mpb.New(mpb.WithWaitGroup(&wg)) total, numBars := 100, 3 wg.Add(numBars) @@ -104,7 +103,7 @@ func main() { } }() } - // Waiting for passed &wg and for all bars to complete and flush + // wait for passed wg and for all bars to complete and flush p.Wait() ``` diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go index dabe1a47546..4991f4f15b4 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar.go @@ -5,9 +5,9 @@ import ( "context" "fmt" "io" - "log" "runtime/debug" "strings" + "sync" "time" "github.com/acarl005/stripansi" @@ -17,32 +17,21 @@ import ( // Bar represents a progress bar. type Bar struct { - priority int // used by heap - index int // used by heap - - toShutdown bool - toDrop bool - noPop bool - hasEwmaDecorators bool - operateState chan func(*bState) - frameCh chan *frame - - // cancel is called either by user or on complete event - cancel func() - // done is closed after cacheState is assigned - done chan struct{} - // cacheState is populated, right after close(b.done) - cacheState *bState - + index int // used by heap + priority int // used by heap + hasEwma bool + frameCh chan *renderFrame + operateState chan func(*bState) + done chan struct{} container *Progress - dlogger *log.Logger + bs *bState + cancel func() recoveredPanic interface{} } type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) -// bState is actual bar state. It gets passed to *Bar.serve(...) monitor -// goroutine. +// bState is actual bar's state. type bState struct { id int priority int @@ -50,11 +39,10 @@ type bState struct { total int64 current int64 refill int64 - lastN int64 - iterated bool + lastIncrement int64 trimSpace bool completed bool - completeFlushed bool + aborted bool triggerComplete bool dropOnComplete bool noPop bool @@ -63,36 +51,33 @@ type bState struct { averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener - bufP, bufB, bufA *bytes.Buffer + buffers [3]*bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller extender extenderFunc + debugOut io.Writer - // runningBar is a key for *pState.parkedBars - runningBar *Bar - - debugOut io.Writer + afterBar *Bar // key for (*pState).queueBars + sync bool } -type frame struct { - reader io.Reader - lines int +type renderFrame struct { + reader io.Reader + lines int + shutdown bool } func newBar(container *Progress, bs *bState) *Bar { - logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) bar := &Bar{ - container: container, priority: bs.priority, - toDrop: bs.dropOnComplete, - noPop: bs.noPop, + hasEwma: len(bs.ewmaDecorators) != 0, + frameCh: make(chan *renderFrame, 1), operateState: make(chan func(*bState)), - frameCh: make(chan *frame, 1), done: make(chan struct{}), + container: container, cancel: cancel, - dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), } go bar.serve(ctx, bs) @@ -100,12 +85,20 @@ func newBar(container *Progress, bs *bState) *Bar { } // ProxyReader wraps r with metrics required for progress tracking. -// Panics if r is nil. +// If r is 'unknown total/size' reader it's mandatory to call +// (*Bar).SetTotal(-1, true) method after (Reader).Read returns io.EOF. +// Panics if r is nil. If bar is already completed or aborted, returns +// nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } - return newProxyReader(r, b) + select { + case <-b.done: + return nil + default: + return b.newProxyReader(r) + } } // ID returs id of the bar. @@ -115,18 +108,18 @@ func (b *Bar) ID() int { case b.operateState <- func(s *bState) { result <- s.id }: return <-result case <-b.done: - return b.cacheState.id + return b.bs.id } } -// Current returns bar's current number, in other words sum of all increments. +// Current returns bar's current value, in other words sum of all increments. func (b *Bar) Current() int64 { result := make(chan int64) select { case b.operateState <- func(s *bState) { result <- s.current }: return <-result case <-b.done: - return b.cacheState.current + return b.bs.current } } @@ -145,7 +138,7 @@ func (b *Bar) SetRefill(amount int64) { // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case b.operateState <- func(s *bState) { for _, decorators := range [...][]decor.Decorator{ @@ -156,28 +149,56 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { cb(extractBaseDecorator(d)) } } - close(done) + close(sync) + }: + <-sync + case <-b.done: + } +} + +// EnableTriggerComplete enables triggering complete event. It's +// effective only for bar which was constructed with `total <= 0` and +// after total has been set with (*Bar).SetTotal(int64, false). If bar +// has been incremented to the total, complete event is triggered right +// away. +func (b *Bar) EnableTriggerComplete() { + select { + case b.operateState <- func(s *bState) { + if s.triggerComplete || s.total <= 0 { + return + } + if s.current >= s.total { + s.current = s.total + s.completed = true + go b.forceRefresh() + } else { + s.triggerComplete = true + } }: - <-done case <-b.done: } } -// SetTotal sets total dynamically. -// If total is less than or equal to zero it takes progress' current value. -func (b *Bar) SetTotal(total int64, triggerComplete bool) { +// SetTotal sets total to an arbitrary value. It's effective only for +// bar which was constructed with `total <= 0`. Setting total to negative +// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool). +// If triggerCompleteNow is true, total value is set to current and +// complete event is triggered right away. +func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { select { case b.operateState <- func(s *bState) { - s.triggerComplete = triggerComplete - if total <= 0 { + if s.triggerComplete { + return + } + if total < 0 { s.total = s.current } else { s.total = total } - if s.triggerComplete && !s.completed { + if triggerCompleteNow { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -189,13 +210,12 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) { func (b *Bar) SetCurrent(current int64) { select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = current - s.current + s.lastIncrement = current - s.current s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -214,15 +234,17 @@ func (b *Bar) IncrBy(n int) { // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { + if n <= 0 { + return + } select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = n + s.lastIncrement = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -236,10 +258,18 @@ func (b *Bar) IncrInt64(n int64) { func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { select { case b.operateState <- func(s *bState) { - ewmaIterationUpdate(false, s, dur) + if s.lastIncrement > 0 { + s.decoratorEwmaUpdate(dur) + s.lastIncrement = 0 + } else { + panic("increment required before ewma iteration update") + } }: case <-b.done: - ewmaIterationUpdate(true, b.cacheState, dur) + if b.bs.lastIncrement > 0 { + b.bs.decoratorEwmaUpdate(dur) + b.bs.lastIncrement = 0 + } } } @@ -249,9 +279,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { case b.operateState <- func(s *bState) { - for _, d := range s.averageDecorators { - d.AverageAdjust(start) - } + s.decoratorAverageAdjust(start) }: case <-b.done: } @@ -266,43 +294,33 @@ func (b *Bar) SetPriority(priority int) { // Abort interrupts bar's running goroutine. Abort won't be engaged // if bar is already in complete state. If drop is true bar will be -// removed as well. +// removed as well. To make sure that bar has been removed call +// (*Bar).Wait method. func (b *Bar) Abort(drop bool) { - done := make(chan struct{}) select { case b.operateState <- func(s *bState) { - if s.completed == true { - close(done) + if s.completed || s.aborted { return } - // container must be run during lifetime of this inner goroutine - // we control this by done channel declared above - go func() { - if drop { - b.container.dropBar(b) - } else { - var uncompleted int - b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true - }) - if uncompleted == 0 { - b.container.refreshCh <- time.Now() - } - } - close(done) // release hold of Abort - }() - b.cancel() + s.aborted = true + s.dropOnComplete = drop + go b.forceRefresh() }: - // guarantee: container is alive during lifetime of this hold - <-done case <-b.done: } } +// Aborted reports whether the bar is in aborted state. +func (b *Bar) Aborted() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- s.aborted }: + return <-result + case <-b.done: + return b.bs.aborted + } +} + // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { result := make(chan bool) @@ -310,22 +328,28 @@ func (b *Bar) Completed() bool { case b.operateState <- func(s *bState) { result <- s.completed }: return <-result case <-b.done: - return true + return b.bs.completed } } -func (b *Bar) serve(ctx context.Context, s *bState) { +// Wait blocks until bar is completed or aborted. +func (b *Bar) Wait() { + <-b.done +} + +func (b *Bar) serve(ctx context.Context, bs *bState) { defer b.container.bwg.Done() + if bs.afterBar != nil && bs.sync { + bs.afterBar.Wait() + } for { select { case op := <-b.operateState: - op(s) + op(bs) case <-ctx.Done(): - // Notifying decorators about shutdown event - for _, sl := range s.shutdownListeners { - sl.Shutdown() - } - b.cacheState = s + bs.aborted = !bs.completed + bs.decoratorShutdownNotify() + b.bs = bs close(b.done) return } @@ -335,76 +359,62 @@ func (b *Bar) serve(ctx context.Context, s *bState) { func (b *Bar) render(tw int) { select { case b.operateState <- func(s *bState) { + var reader io.Reader + var lines int stat := newStatistics(tw, s) defer func() { // recovering if user defined decorator panics for example if p := recover(); p != nil { - if b.recoveredPanic == nil { - s.extender = makePanicExtender(p) - b.toShutdown = !b.toShutdown - b.recoveredPanic = p + if s.debugOut != nil { + fmt.Fprintln(s.debugOut, p) + _, _ = s.debugOut.Write(debug.Stack()) } - reader, lines := s.extender(nil, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - b.dlogger.Println(p) + s.aborted = !s.completed + s.extender = makePanicExtender(p) + reader, lines = s.extender(nil, s.reqWidth, stat) + b.recoveredPanic = p } - s.completeFlushed = s.completed + frame := renderFrame{ + reader: reader, + lines: lines + 1, + shutdown: s.completed || s.aborted, + } + if frame.shutdown { + b.cancel() + } + b.frameCh <- &frame }() - reader, lines := s.extender(s.draw(stat), s.reqWidth, stat) - b.toShutdown = s.completed && !s.completeFlushed - b.frameCh <- &frame{reader, lines + 1} + if b.recoveredPanic == nil { + reader = s.draw(stat) + } + reader, lines = s.extender(reader, s.reqWidth, stat) }: case <-b.done: - s := b.cacheState - stat := newStatistics(tw, s) - var r io.Reader + var reader io.Reader + var lines int + stat, s := newStatistics(tw, b.bs), b.bs if b.recoveredPanic == nil { - r = s.draw(stat) - } - reader, lines := s.extender(r, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - } -} - -func (b *Bar) subscribeDecorators() { - var averageDecorators []decor.AverageDecorator - var ewmaDecorators []decor.EwmaDecorator - var shutdownListeners []decor.ShutdownListener - b.TraverseDecorators(func(d decor.Decorator) { - if d, ok := d.(decor.AverageDecorator); ok { - averageDecorators = append(averageDecorators, d) + reader = s.draw(stat) } - if d, ok := d.(decor.EwmaDecorator); ok { - ewmaDecorators = append(ewmaDecorators, d) + reader, lines = s.extender(reader, s.reqWidth, stat) + b.frameCh <- &renderFrame{ + reader: reader, + lines: lines + 1, } - if d, ok := d.(decor.ShutdownListener); ok { - shutdownListeners = append(shutdownListeners, d) - } - }) - b.hasEwmaDecorators = len(ewmaDecorators) != 0 - select { - case b.operateState <- func(s *bState) { - s.averageDecorators = averageDecorators - s.ewmaDecorators = ewmaDecorators - s.shutdownListeners = shutdownListeners - }: - case <-b.done: } } -func (b *Bar) forceRefreshIfLastUncompleted() { - var uncompleted int +func (b *Bar) forceRefresh() { + var anyOtherRunning bool b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true + anyOtherRunning = b != bar && bar.isRunning() + return !anyOtherRunning }) - if uncompleted == 0 { + if !anyOtherRunning { for { select { case b.container.refreshCh <- time.Now(): + time.Sleep(prr) case <-b.done: return } @@ -412,51 +422,64 @@ func (b *Bar) forceRefreshIfLastUncompleted() { } } +func (b *Bar) isRunning() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { + result <- !s.completed && !s.aborted + }: + return <-result + case <-b.done: + return false + } +} + func (b *Bar) wSyncTable() [][]chan int { result := make(chan [][]chan int) select { case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: return <-result case <-b.done: - return b.cacheState.wSyncTable() + return b.bs.wSyncTable() } } func (s *bState) draw(stat decor.Statistics) io.Reader { + bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] nlr := strings.NewReader("\n") tw := stat.AvailableWidth for _, d := range s.pDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufP.WriteString(str) + bufP.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…")) - s.bufP.Reset() + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…")) + bufP.Reset() return io.MultiReader(trunc, nlr) } if !s.trimSpace && stat.AvailableWidth > 1 { stat.AvailableWidth -= 2 - s.bufB.WriteByte(' ') - defer s.bufB.WriteByte(' ') + bufB.WriteByte(' ') + defer bufB.WriteByte(' ') } tw = stat.AvailableWidth for _, d := range s.aDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufA.WriteString(str) + bufA.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…")) - s.bufA.Reset() - return io.MultiReader(s.bufP, s.bufB, trunc, nlr) + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…")) + bufA.Reset() + return io.MultiReader(bufP, bufB, trunc, nlr) } - s.filler.Fill(s.bufB, s.reqWidth, stat) + s.filler.Fill(bufB, s.reqWidth, stat) - return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr) + return io.MultiReader(bufP, bufB, bufA, nlr) } func (s *bState) wSyncTable() [][]chan int { @@ -481,14 +504,86 @@ func (s *bState) wSyncTable() [][]chan int { return table } +func (s *bState) subscribeDecorators() { + for _, decorators := range [...][]decor.Decorator{ + s.pDecorators, + s.aDecorators, + } { + for _, d := range decorators { + d = extractBaseDecorator(d) + if d, ok := d.(decor.AverageDecorator); ok { + s.averageDecorators = append(s.averageDecorators, d) + } + if d, ok := d.(decor.EwmaDecorator); ok { + s.ewmaDecorators = append(s.ewmaDecorators, d) + } + if d, ok := d.(decor.ShutdownListener); ok { + s.shutdownListeners = append(s.shutdownListeners, d) + } + } + } +} + +func (s bState) decoratorEwmaUpdate(dur time.Duration) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.ewmaDecorators); i++ { + switch d := s.ewmaDecorators[i]; i { + case len(s.ewmaDecorators) - 1: + d.EwmaUpdate(s.lastIncrement, dur) + default: + wg.Add(1) + go func() { + d.EwmaUpdate(s.lastIncrement, dur) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorAverageAdjust(start time.Time) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.averageDecorators); i++ { + switch d := s.averageDecorators[i]; i { + case len(s.averageDecorators) - 1: + d.AverageAdjust(start) + default: + wg.Add(1) + go func() { + d.AverageAdjust(start) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorShutdownNotify() { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.shutdownListeners); i++ { + switch d := s.shutdownListeners[i]; i { + case len(s.shutdownListeners) - 1: + d.Shutdown() + default: + wg.Add(1) + go func() { + d.Shutdown() + wg.Done() + }() + } + } + wg.Wait() +} + func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ - ID: s.id, AvailableWidth: tw, + ID: s.id, Total: s.total, Current: s.current, Refill: s.refill, - Completed: s.completeFlushed, + Completed: s.completed, + Aborted: s.aborted, } } @@ -499,27 +594,13 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator { return d } -func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { - if !done && !s.iterated { - panic("increment required before ewma iteration update") - } else { - s.iterated = false - } - for _, d := range s.ewmaDecorators { - d.EwmaUpdate(s.lastN, dur) - } -} - func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) - stack := debug.Stack() - stackLines := bytes.Count(stack, []byte("\n")) return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) { mr := io.MultiReader( strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")), - strings.NewReader(fmt.Sprintf("\n%#v\n", st)), - bytes.NewReader(stack), + strings.NewReader("\n"), ) - return mr, stackLines + 1 + return mr, 0 } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go index a69087c474e..81177fc2eea 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go @@ -9,31 +9,42 @@ import ( // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // -// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. +// reqWidth is requested width set by `func WithWidth(int) ContainerOption`. // If not set, it defaults to terminal width. // -// Default implementations can be obtained via: -// -// func NewBarFiller(BarStyle()) BarFiller -// func NewBarFiller(SpinnerStyle()) BarFiller -// type BarFiller interface { Fill(w io.Writer, reqWidth int, stat decor.Statistics) } // BarFillerBuilder interface. +// Default implementations are: +// +// BarStyle() +// SpinnerStyle() +// NopStyle() +// type BarFillerBuilder interface { Build() BarFiller } -// BarFillerFunc is function type adapter to convert function into BarFiller. +// BarFillerFunc is function type adapter to convert compatible function +// into BarFiller interface. type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { f(w, reqWidth, stat) } +// BarFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type BarFillerBuilderFunc func() BarFiller + +func (f BarFillerBuilderFunc) Build() BarFiller { + return f() +} + // NewBarFiller constructs a BarFiller from provided BarFillerBuilder. +// Deprecated. Prefer using `*Progress.New(...)` directly. func NewBarFiller(b BarFillerBuilder) BarFiller { return b.Build() } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go index 80b2104555d..d8bf90a4a94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go @@ -32,13 +32,13 @@ type BarStyleComposer interface { } type bFiller struct { + rev bool components [components]*component tip struct { count uint onComplete *component frames []*component } - flush func(dst io.Writer, filling, padding [][]byte) } type component struct { @@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer { } func (s *barStyle) Build() BarFiller { - bf := new(bFiller) - if s.rev { - bf.flush = func(dst io.Writer, filling, padding [][]byte) { - flush(dst, padding, filling) - } - } else { - bf.flush = flush - } + bf := &bFiller{rev: s.rev} bf.components[iLbound] = &component{ width: runewidth.StringWidth(stripansi.Strip(s.lbound)), bytes: []byte(s.lbound), @@ -164,8 +157,8 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } - w.Write(s.components[iLbound].bytes) - defer w.Write(s.components[iRbound].bytes) + mustWrite(w, s.components[iLbound].bytes) + defer mustWrite(w, s.components[iRbound].bytes) if width == 0 { return @@ -236,14 +229,25 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { } } - s.flush(w, filling, padding) + if s.rev { + flush(w, padding, filling) + } else { + flush(w, filling, padding) + } } -func flush(dst io.Writer, filling, padding [][]byte) { +func flush(w io.Writer, filling, padding [][]byte) { for i := len(filling) - 1; i >= 0; i-- { - dst.Write(filling[i]) + mustWrite(w, filling[i]) } for i := 0; i < len(padding); i++ { - dst.Write(padding[i]) + mustWrite(w, padding[i]) + } +} + +func mustWrite(w io.Writer, p []byte) { + _, err := w.Write(p) + if err != nil { + panic(err) } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go new file mode 100644 index 00000000000..1a7086fecb6 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go @@ -0,0 +1,14 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v7/decor" +) + +// NopStyle provides BarFillerBuilder which builds NOP BarFiller. +func NopStyle() BarFillerBuilder { + return BarFillerBuilderFunc(func() BarFiller { + return BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + }) +} diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go index 58ae1c53283..d38525efc94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go @@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } + var err error rest := width - frameWidth switch s.position { case positionLeft: - io.WriteString(w, frame+strings.Repeat(" ", rest)) + _, err = io.WriteString(w, frame+strings.Repeat(" ", rest)) case positionRight: - io.WriteString(w, strings.Repeat(" ", rest)+frame) + _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame) default: str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) - io.WriteString(w, str) + _, err = io.WriteString(w, str) + } + if err != nil { + panic(err) } s.count++ } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go index 46b7de0bf56..8599f0a57b9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go @@ -5,12 +5,20 @@ import ( "io" "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) +func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) { + for _, d := range decorators { + if d != nil { + filtered = append(filtered, d) + } + } + return +} + func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { type mergeWrapper interface { MergeUnwrap() []decor.Decorator @@ -26,14 +34,14 @@ func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Deco // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.aDecorators, decorators...) + s.addDecorators(&s.aDecorators, skipNil(decorators)...) } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.pDecorators, decorators...) + s.addDecorators(&s.pDecorators, skipNil(decorators)...) } } @@ -51,14 +59,17 @@ func BarWidth(width int) BarOption { } } -// BarQueueAfter queues this (being constructed) bar to relplace -// runningBar after it has been completed. -func BarQueueAfter(runningBar *Bar) BarOption { - if runningBar == nil { +// BarQueueAfter puts this (being constructed) bar into the queue. +// When argument bar completes or aborts queued bar replaces its place. +// If sync is true queued bar is suspended until argument bar completes +// or aborts. +func BarQueueAfter(bar *Bar, sync bool) BarOption { + if bar == nil { return nil } return func(s *bState) { - s.runningBar = runningBar + s.afterBar = bar + s.sync = sync } } @@ -81,7 +92,10 @@ func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { if st.Completed { - io.WriteString(w, message) + _, err := io.WriteString(w, message) + if err != nil { + panic(err) + } } else { base.Fill(w, reqWidth, st) } @@ -138,9 +152,12 @@ func BarNoPop() BarOption { } } -// BarOptional will invoke provided option only when pick is true. -func BarOptional(option BarOption, pick bool) BarOption { - return BarOptOn(option, internal.Predicate(pick)) +// BarOptional will invoke provided option only when cond is true. +func BarOptional(option BarOption, cond bool) BarOption { + if cond { + return option + } + return nil } // BarOptOn will invoke provided option only when higher order predicate diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go index a858c3c51dd..e523a175938 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go @@ -5,8 +5,6 @@ import ( "io/ioutil" "sync" "time" - - "github.com/vbauerster/mpb/v7/internal" ) // ContainerOption is a func option to alter default behavior of a bar @@ -101,9 +99,12 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptional will invoke provided option only when pick is true. -func ContainerOptional(option ContainerOption, pick bool) ContainerOption { - return ContainerOptOn(option, internal.Predicate(pick)) +// ContainerOptional will invoke provided option only when cond is true. +func ContainerOptional(option ContainerOption, cond bool) ContainerOption { + if cond { + return option + } + return nil } // ContainerOptOn will invoke provided option only when higher order diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go index 925c8b1dcf1..fac15b3bce9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go @@ -11,7 +11,7 @@ import ( // ErrNotTTY not a TeleTYpewriter error. var ErrNotTTY = errors.New("not a terminal") -// http://ascii-table.com/ansi-escape-sequences.php +// https://github.com/dylanaraps/pure-sh-bible#cursor-movement const ( escOpen = "\x1b[" cuuAndEd = "A\x1b[J" @@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) { return tw, err } -func (w *Writer) ansiCuuAndEd() (err error) { +func (w *Writer) ansiCuuAndEd() error { buf := make([]byte, 8) buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) - _, err = w.out.Write(append(buf, cuuAndEd...)) - return + _, err := w.out.Write(append(buf, cuuAndEd...)) + return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go index e81fae367ee..aad7709c062 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go @@ -47,12 +47,13 @@ const ( // Statistics consists of progress related statistics, that Decorator // may need. type Statistics struct { - ID int AvailableWidth int + ID int Total int64 Current int64 Refill int64 Completed bool + Aborted bool } // Decorator interface. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go index e41406a64da..84767115515 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go @@ -17,6 +17,9 @@ import ( // +----+--------+---------+--------+ // func Merge(decorator Decorator, placeholders ...WC) Decorator { + if decorator == nil { + return nil + } if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { return decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go new file mode 100644 index 00000000000..10ff67009cc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go @@ -0,0 +1,41 @@ +package decor + +// OnAbort returns decorator, which wraps provided decorator with sole +// purpose to display provided message on abort event. It has no effect +// if bar.Abort(drop bool) is called with true argument. +// +// `decorator` Decorator to wrap +// +// `message` message to display on abort event +// +func OnAbort(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + d := &onAbortWrapper{ + Decorator: decorator, + msg: message, + } + if md, ok := decorator.(*mergeDecorator); ok { + d.Decorator, md.Decorator = md.Decorator, d + return md + } + return d +} + +type onAbortWrapper struct { + Decorator + msg string +} + +func (d *onAbortWrapper) Decor(s Statistics) string { + if s.Aborted { + wc := d.GetConf() + return wc.FormatMsg(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d *onAbortWrapper) Base() Decorator { + return d.Decorator +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go index f46b19abaa9..2ada2b31b16 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go @@ -1,6 +1,6 @@ package decor -// OnComplete returns decorator, which wraps provided decorator, with +// OnComplete returns decorator, which wraps provided decorator with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap @@ -8,6 +8,9 @@ package decor // `message` message to display on complete event // func OnComplete(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } d := &onCompleteWrapper{ Decorator: decorator, msg: message, diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go new file mode 100644 index 00000000000..a9db0653aee --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go @@ -0,0 +1,27 @@ +package decor + +// OnPredicate returns decorator if predicate evaluates to true. +// +// `decorator` Decorator +// +// `predicate` func() bool +// +func OnPredicate(decorator Decorator, predicate func() bool) Decorator { + if predicate() { + return decorator + } + return nil +} + +// OnCondition returns decorator if condition is true. +// +// `decorator` Decorator +// +// `cond` bool +// +func OnCondition(decorator Decorator, cond bool) Decorator { + if cond { + return decorator + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go new file mode 100644 index 00000000000..c6a34384e63 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go @@ -0,0 +1,10 @@ +package decor + +import "io" + +func mustWriteString(w io.Writer, s string) { + _, err := io.WriteString(w, s) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go index 2b0a7a9562c..e72668993ee 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "strconv" "github.com/vbauerster/mpb/v7/internal" @@ -24,12 +23,11 @@ func (s percentageType) Format(st fmt.State, verb rune) { } } - io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, "%") + mustWriteString(st, "%") } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go index e4b9740584b..09ecc23f807 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go @@ -2,8 +2,6 @@ package decor import ( "fmt" - "io" - "math" "strconv" ) @@ -47,16 +45,15 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iMiB case self < _iTiB: unit = _iGiB - case self <= math.MaxInt64: + default: unit = _iTiB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } const ( @@ -96,14 +93,13 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _MB case self < _TB: unit = _GB - case self <= math.MaxInt64: + default: unit = _TB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go index 634edabfdc0..f052352fc40 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "math" "time" @@ -24,7 +23,7 @@ type speedFormatter struct { func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) - io.WriteString(st, "/s") + mustWriteString(st, "/s") } // EwmaSpeed exponential-weighted-moving-average based speed decorator. diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go deleted file mode 100644 index 1e4dd24d9da..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go +++ /dev/null @@ -1,6 +0,0 @@ -package internal - -// Predicate helper for internal use. -func Predicate(pick bool) func() bool { - return func() bool { return pick } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go index c60c6569406..1d9a53e5c9d 100644 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ b/vendor/github.com/vbauerster/mpb/v7/progress.go @@ -6,8 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" - "log" "math" "os" "sync" @@ -18,12 +16,10 @@ import ( ) const ( - // default RefreshRate - prr = 150 * time.Millisecond + prr = 150 * time.Millisecond // default RefreshRate ) -// Progress represents a container that renders one or more progress -// bars. +// Progress represents a container that renders one or more progress bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup @@ -33,17 +29,14 @@ type Progress struct { done chan struct{} refreshCh chan time.Time once sync.Once - dlogger *log.Logger } -// pState holds bars in its priorityQueue. It gets passed to -// *Progress.serve(...) monitor goroutine. +// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { - bHeap priorityQueue - heapUpdated bool - pMatrix map[int][]chan int - aMatrix map[int][]chan int - barShutdownQueue []*Bar + bHeap priorityQueue + heapUpdated bool + pMatrix map[int][]chan int + aMatrix map[int][]chan int // following are provided/overrided by user idCount int @@ -55,27 +48,26 @@ type pState struct { externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} - parkedBars map[*Bar]*Bar + queueBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to -// reuse instance after *Progress.Wait() method has been called. +// reuse instance after (*Progress).Wait method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided -// context. It's not possible to reuse instance after *Progress.Wait() +// context. It's not possible to reuse instance after (*Progress).Wait // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { s := &pState{ - bHeap: priorityQueue{}, - rr: prr, - parkedBars: make(map[*Bar]*Bar), - output: os.Stdout, - debugOut: ioutil.Discard, + bHeap: priorityQueue{}, + rr: prr, + queueBars: make(map[*Bar]*Bar), + output: os.Stdout, } for _, opt := range options { @@ -91,7 +83,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), done: make(chan struct{}), - dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile), } p.cwg.Add(1) @@ -99,25 +90,27 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a bar with default bar filler. Different filler can -// be chosen and applied via `*Progress.Add(...) *Bar` method. +// AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(BarStyle()), options...) + return p.New(total, BarStyle(), options...) } -// AddSpinner creates a bar with default spinner filler. Different -// filler can be chosen and applied via `*Progress.Add(...) *Bar` -// method. +// AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(SpinnerStyle()), options...) + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar with provided BarFillerBuilder. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + return p.Add(total, builder.Build(), options...) } // Add creates a bar which renders itself by provided filler. -// If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool). -// Panics if *Progress instance is done, i.e. called after *Progress.Wait(). +// If `total <= 0` triggering complete event by increment methods is disabled. +// Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { - filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) @@ -125,9 +118,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) - if bs.runningBar != nil { - bs.runningBar.noPop = true - ps.parkedBars[bs.runningBar] = bar + if bs.afterBar != nil { + ps.queueBars[bs.afterBar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true @@ -136,7 +128,6 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar result <- bar }: bar := <-result - bar.subscribeDecorators() return bar case <-p.done: p.bwg.Done() @@ -144,21 +135,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar } } -func (p *Progress) dropBar(b *Bar) { - select { - case p.operateState <- func(s *pState) { - if b.index < 0 { - return - } - heap.Remove(&s.bHeap, b.index) - s.heapUpdated = true - }: - case <-p.done: - } -} - func (p *Progress) traverseBars(cb func(b *Bar) bool) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case p.operateState <- func(s *pState) { for i := 0; i < s.bHeap.Len(); i++ { @@ -167,9 +145,9 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { break } } - close(done) + close(sync) }: - <-done + <-sync case <-p.done: } } @@ -203,8 +181,8 @@ func (p *Progress) BarCount() int { // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { + // wait for user wg, if any if p.uwg != nil { - // wait for user wg p.uwg.Wait() } @@ -232,12 +210,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { op(s) case <-p.refreshCh: if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } case <-s.shutdownNotifier: for s.heapUpdated { if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } } return @@ -245,6 +237,64 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } +func (s *pState) render(cw *cwriter.Writer) error { + if s.heapUpdated { + s.updateSyncMatrix() + s.heapUpdated = false + } + syncWidth(s.pMatrix) + syncWidth(s.aMatrix) + + tw, err := cw.GetWidth() + if err != nil { + tw = s.reqWidth + } + for i := 0; i < s.bHeap.Len(); i++ { + bar := s.bHeap[i] + go bar.render(tw) + } + + return s.flush(cw) +} + +func (s *pState) flush(cw *cwriter.Writer) error { + var lines int + pool := make([]*Bar, 0, s.bHeap.Len()) + for s.bHeap.Len() > 0 { + b := heap.Pop(&s.bHeap).(*Bar) + frame := <-b.frameCh + lines += frame.lines + _, err := cw.ReadFrom(frame.reader) + if err != nil { + return err + } + if frame.shutdown { + b.Wait() // waiting for b.done, so it's safe to read b.bs + var toDrop bool + if qb, ok := s.queueBars[b]; ok { + delete(s.queueBars, b) + qb.priority = b.priority + pool = append(pool, qb) + toDrop = true + } else if s.popCompleted && !b.bs.noPop { + lines -= frame.lines + toDrop = true + } + if toDrop || b.bs.dropOnComplete { + s.heapUpdated = true + continue + } + } + pool = append(pool, b) + } + + for _, b := range pool { + heap.Push(&s.bHeap, b) + } + + return cw.Flush(lines) +} + func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { @@ -283,75 +333,6 @@ func (s *pState) newTicker(done <-chan struct{}) chan time.Time { return ch } -func (s *pState) render(cw *cwriter.Writer) error { - if s.heapUpdated { - s.updateSyncMatrix() - s.heapUpdated = false - } - syncWidth(s.pMatrix) - syncWidth(s.aMatrix) - - tw, err := cw.GetWidth() - if err != nil { - tw = s.reqWidth - } - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - go bar.render(tw) - } - - return s.flush(cw) -} - -func (s *pState) flush(cw *cwriter.Writer) error { - var totalLines int - bm := make(map[*Bar]int, s.bHeap.Len()) - for s.bHeap.Len() > 0 { - b := heap.Pop(&s.bHeap).(*Bar) - frame := <-b.frameCh - cw.ReadFrom(frame.reader) - if b.toShutdown { - if b.recoveredPanic != nil { - s.barShutdownQueue = append(s.barShutdownQueue, b) - b.toShutdown = false - } else { - // shutdown at next flush - // this ensures no bar ends up with less than 100% rendered - defer func() { - s.barShutdownQueue = append(s.barShutdownQueue, b) - }() - } - } - bm[b] = frame.lines - totalLines += frame.lines - } - - for _, b := range s.barShutdownQueue { - if parkedBar := s.parkedBars[b]; parkedBar != nil { - parkedBar.priority = b.priority - heap.Push(&s.bHeap, parkedBar) - delete(s.parkedBars, b) - b.toDrop = true - } - if s.popCompleted && !b.noPop { - totalLines -= bm[b] - b.toDrop = true - } - if b.toDrop { - delete(bm, b) - s.heapUpdated = true - } - b.cancel() - } - s.barShutdownQueue = s.barShutdownQueue[0:0] - - for b := range bm { - heap.Push(&s.bHeap, b) - } - - return cw.Flush(totalLines) -} - func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) @@ -400,9 +381,11 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio bs.priority = -(math.MaxInt32 - s.idCount) } - bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) - bs.bufB = bytes.NewBuffer(make([]byte, 0, 256)) - bs.bufA = bytes.NewBuffer(make([]byte, 0, 128)) + for i := 0; i < len(bs.buffers); i++ { + bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) + } + + bs.subscribeDecorators() return bs } @@ -413,7 +396,7 @@ func syncWidth(matrix map[int][]chan int) { } } -var maxWidthDistributor = func(column []chan int) { +func maxWidthDistributor(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go index 316f438d76b..b0dd89d458c 100644 --- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go +++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go @@ -11,73 +11,63 @@ type proxyReader struct { bar *Bar } -func (x *proxyReader) Read(p []byte) (int, error) { +func (x proxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) x.bar.IncrBy(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type proxyWriterTo struct { - io.ReadCloser // *proxyReader - wt io.WriterTo - bar *Bar + proxyReader + wt io.WriterTo } -func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) x.bar.IncrInt64(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type ewmaProxyReader struct { - io.ReadCloser // *proxyReader - bar *Bar - iT time.Time + proxyReader } -func (x *ewmaProxyReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.proxyReader.Read(p) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } type ewmaProxyWriterTo struct { - io.ReadCloser // *ewmaProxyReader - wt io.WriterTo // *proxyWriterTo - bar *Bar - iT time.Time + ewmaProxyReader + wt proxyWriterTo } -func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() n, err := x.wt.WriteTo(w) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } -func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser { - rc := toReadCloser(r) - rc = &proxyReader{rc, bar} - - if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators { - now := time.Now() - rc = &ewmaProxyReader{rc, bar, now} - if isWriterTo { - rc = &ewmaProxyWriterTo{rc, wt, bar, now} +func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) { + pr := proxyReader{toReadCloser(r), b} + if wt, ok := r.(io.WriterTo); ok { + pw := proxyWriterTo{pr, wt} + if b.hasEwma { + rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw} + } else { + rc = pw } - } else if isWriterTo { - rc = &proxyWriterTo{rc, wt, bar} + } else if b.hasEwma { + rc = ewmaProxyReader{pr} + } else { + rc = pr } return rc } diff --git a/vendor/modules.txt b/vendor/modules.txt index cb19532f807..9a36b55e469 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,7 +52,7 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion -# github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 +# github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -178,7 +178,7 @@ github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.5.8 +# github.com/containerd/containerd v1.6.1 ## explicit; go 1.16 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/types @@ -187,6 +187,8 @@ github.com/containerd/containerd/cio github.com/containerd/containerd/defaults github.com/containerd/containerd/errdefs github.com/containerd/containerd/events +github.com/containerd/containerd/events/exchange +github.com/containerd/containerd/filters github.com/containerd/containerd/identifiers github.com/containerd/containerd/log github.com/containerd/containerd/namespaces @@ -195,9 +197,11 @@ github.com/containerd/containerd/pkg/cri/io github.com/containerd/containerd/pkg/cri/util github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/ioutil +github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/ttrpcutil github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +github.com/containerd/containerd/plugin github.com/containerd/containerd/reference/docker github.com/containerd/containerd/runtime/v2/shim github.com/containerd/containerd/runtime/v2/task @@ -213,7 +217,7 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter/estargz v0.10.1 +# github.com/containerd/stargz-snapshotter/estargz v0.11.4 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -288,8 +292,8 @@ github.com/containers/conmon/runner/config ## explicit; go 1.18 github.com/containers/conmon-rs/internal/proto github.com/containers/conmon-rs/pkg/client -# github.com/containers/image/v5 v5.17.0 -## explicit; go 1.13 +# github.com/containers/image/v5 v5.21.1 +## explicit; go 1.16 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -302,14 +306,15 @@ github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image github.com/containers/image/v5/internal/blobinfocache +github.com/containers/image/v5/internal/imagedestination +github.com/containers/image/v5/internal/imagesource github.com/containers/image/v5/internal/iolimits -github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform +github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/streamdigest github.com/containers/image/v5/internal/tmpdir -github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive @@ -330,6 +335,7 @@ github.com/containers/image/v5/pkg/shortnames github.com/containers/image/v5/pkg/strslice github.com/containers/image/v5/pkg/sysregistriesv2 github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/sif github.com/containers/image/v5/signature github.com/containers/image/v5/storage github.com/containers/image/v5/tarball @@ -337,7 +343,7 @@ github.com/containers/image/v5/transports github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/types github.com/containers/image/v5/version -# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a ## explicit github.com/containers/libtrust # github.com/containers/ocicrypt v1.1.5 @@ -421,8 +427,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.37.0 -## explicit; go 1.14 +# github.com/containers/storage v1.40.0 +## explicit; go 1.16 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -512,7 +518,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v20.10.12+incompatible +# github.com/docker/docker v20.10.14+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -553,6 +559,9 @@ github.com/docker/docker-credential-helpers/credentials github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig +# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c +## explicit +github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics @@ -822,12 +831,13 @@ github.com/json-iterator/go # github.com/kevinburke/ssh_config v1.1.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.14.2 -## explicit; go 1.15 +# github.com/klauspost/compress v1.15.2 +## explicit; go 1.16 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -886,7 +896,7 @@ github.com/moby/spdystream/spdy # github.com/moby/sys/mount v0.2.0 ## explicit; go 1.14 github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.6.0 +# github.com/moby/sys/mountinfo v0.6.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 @@ -902,9 +912,6 @@ github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec -# github.com/mtrmac/gpgme v0.1.2 -## explicit; go 1.11 -github.com/mtrmac/gpgme # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -997,7 +1004,7 @@ github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/signal github.com/openshift/imagebuilder/strslice -# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 +# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f ## explicit github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin @@ -1021,6 +1028,9 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat +# github.com/proglottis/gpgme v0.1.1 +## explicit; go 1.11 +github.com/proglottis/gpgme # github.com/prometheus/client_golang v1.12.2 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus @@ -1105,6 +1115,9 @@ github.com/stefanberger/go-pkcs11uri # github.com/stretchr/testify v1.7.2 ## explicit; go 1.13 github.com/stretchr/testify/assert +# github.com/sylabs/sif/v2 v2.7.0 +## explicit; go 1.17 +github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability @@ -1125,7 +1138,7 @@ github.com/urfave/cli/v2 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.1.5 +# github.com/vbauerster/mpb/v7 v7.4.1 ## explicit; go 1.14 github.com/vbauerster/mpb/v7 github.com/vbauerster/mpb/v7/cwriter