diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 164940d1282be..7978b5a2ec959 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -12,7 +12,7 @@ "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/devcontainers/features/go:1": { - "version": "1.25.7" + "version": "1.25.9" } }, diff --git a/.github/actions/install-go/action.yml b/.github/actions/install-go/action.yml index 58de8937589bc..a4817cdf75c33 100644 --- a/.github/actions/install-go/action.yml +++ b/.github/actions/install-go/action.yml @@ -3,7 +3,7 @@ description: "Reusable action to install Go, so there is one place to bump Go ve inputs: go-version: required: true - default: "1.25.7" + default: "1.25.9" description: "Go version to install" runs: diff --git a/.github/workflows/api-release.yml b/.github/workflows/api-release.yml index 24f554f25c1f1..a9356149bf919 100644 --- a/.github/workflows/api-release.yml +++ b/.github/workflows/api-release.yml @@ -6,7 +6,7 @@ on: name: API Release env: - GO_VERSION: "1.25.7" + GO_VERSION: "1.25.9" permissions: # added using https://github.com/step-security/secure-workflows contents: read diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8f7cefdbe8187..c46a15acd751e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -192,7 +192,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, ubuntu-24.04-arm, macos-latest, windows-latest] - go-version: ["1.25.7", "1.26.0"] + go-version: ["1.25.9", "1.26.2"] exclude: - os: ${{ github.event.repository.private && 'ubuntu-24.04-arm' || '' }} steps: diff --git a/.github/workflows/release/Dockerfile b/.github/workflows/release/Dockerfile index 22cce7c1ff88d..d97ee2db9d535 100644 --- a/.github/workflows/release/Dockerfile +++ b/.github/workflows/release/Dockerfile @@ -14,7 +14,7 @@ ARG UBUNTU_VERSION=22.04 ARG BASE_IMAGE=ubuntu:${UBUNTU_VERSION} -ARG GO_VERSION=1.25.7 +ARG GO_VERSION=1.25.9 ARG GO_IMAGE=golang:${GO_VERSION} FROM --platform=$BUILDPLATFORM $GO_IMAGE AS go FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.6.1@sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3 AS xx diff --git a/Vagrantfile b/Vagrantfile index 70cd352798e38..0f82b67331f0a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -109,7 +109,7 @@ EOF config.vm.provision "install-golang", type: "shell", run: "once" do |sh| sh.upload_path = "/tmp/vagrant-install-golang" sh.env = { - 'GO_VERSION': ENV['GO_VERSION'] || "1.25.7", + 'GO_VERSION': ENV['GO_VERSION'] || "1.25.9", } sh.inline = <<~SHELL #!/usr/bin/env bash @@ -278,6 +278,7 @@ EOF 'GOTESTSUM_JSONFILE': ENV['GOTESTSUM_JSONFILE'], 'GITHUB_WORKSPACE': '', 'CGROUP_DRIVER': ENV['CGROUP_DRIVER'], + 'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc", } sh.inline = <<~SHELL #!/usr/bin/env bash @@ -306,6 +307,7 @@ EOF 'GOTEST': ENV['GOTEST'] || "go test", 'REPORT_DIR': ENV['REPORT_DIR'], 'CGROUP_DRIVER': ENV['CGROUP_DRIVER'], + 'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc", } sh.inline = <<~SHELL #!/usr/bin/env bash diff --git a/contrib/Dockerfile.test b/contrib/Dockerfile.test index 1348eb0bce501..8572461d2ee62 100644 --- a/contrib/Dockerfile.test +++ b/contrib/Dockerfile.test @@ -34,7 +34,7 @@ # docker run --privileged --group-add keep-groups -v ./critest_exit_code.txt:/tmp/critest_exit_code.txt containerd-test # ------------------------------------------------------------------------------ -ARG GOLANG_VERSION=1.25.7 +ARG GOLANG_VERSION=1.25.9 ARG GOLANG_IMAGE=golang FROM ${GOLANG_IMAGE}:${GOLANG_VERSION} AS golang diff --git a/contrib/fuzz/oss_fuzz_build.sh b/contrib/fuzz/oss_fuzz_build.sh index 357b2e7d31033..42a1e21a8af4c 100755 --- a/contrib/fuzz/oss_fuzz_build.sh +++ b/contrib/fuzz/oss_fuzz_build.sh @@ -39,11 +39,11 @@ compile_fuzzers() { apt-get update && apt-get install -y wget cd $SRC -wget --quiet https://go.dev/dl/go1.25.7.linux-amd64.tar.gz +wget --quiet https://go.dev/dl/go1.25.9.linux-amd64.tar.gz mkdir temp-go rm -rf /root/.go/* -tar -C temp-go/ -xzf go1.25.7.linux-amd64.tar.gz +tar -C temp-go/ -xzf go1.25.9.linux-amd64.tar.gz mv temp-go/go/* /root/.go/ cd $SRC/containerd diff --git a/core/diff/apply/apply.go b/core/diff/apply/apply.go index 9acecea35cc40..67dccf922225d 100644 --- a/core/diff/apply/apply.go +++ b/core/diff/apply/apply.go @@ -18,6 +18,8 @@ package apply import ( "context" + "crypto/rand" + "encoding/base64" "fmt" "io" "time" @@ -25,6 +27,7 @@ import ( "github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/core/diff" "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/errdefs" "github.com/containerd/log" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -33,13 +36,22 @@ import ( // NewFileSystemApplier returns an applier which simply mounts // and applies diff onto the mounted filesystem. func NewFileSystemApplier(cs content.Provider) diff.Applier { + return NewFileSystemApplierWithMountManager(cs, nil) +} + +// NewFileSystemApplierWithMountManager returns an applier which simply mounts and +// applies diff onto the mounted filesystem. +// An optional mount manager can be specified and it will take effect when applying. +func NewFileSystemApplierWithMountManager(cs content.Provider, mm mount.Manager) diff.Applier { return &fsApplier{ store: cs, + mount: mm, } } type fsApplier struct { store content.Provider + mount mount.Manager } var emptyDesc = ocispec.Descriptor{} @@ -98,6 +110,23 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ r: io.TeeReader(processor, digester.Hash()), } + // The number of `mounts` that need to be parsed by the mount manager + // will be more than 1 in reality; this is needed to work around some + // overlayfs/bind shortcuts in core/diff/apply/apply_linux.go + if s.mount != nil && len(mounts) > 1 { + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + id := fmt.Sprintf("fs-diffapply-%d-%s", t1.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) + info, err := s.mount.Activate(ctx, id, mounts) + if err == nil { + defer s.mount.Deactivate(ctx, id) + mounts = info.System + } else if !errdefs.IsNotImplemented(err) { + return emptyDesc, fmt.Errorf("failed to activate mounts: %w", err) + } + } + if err := apply(ctx, mounts, rc, config.SyncFs); err != nil { return emptyDesc, err } diff --git a/core/unpack/unpacker.go b/core/unpack/unpacker.go index d964580c81c50..a5763edeb807d 100644 --- a/core/unpack/unpacker.go +++ b/core/unpack/unpacker.go @@ -524,6 +524,15 @@ func (u *Unpacker) unpack( case <-fetchC[i-fetchOffset]: } + // In case of parallel unpack, the parent snapshot isn't provided to the snapshotter. + // The overlayfs will return bind mounts for all layers, we need to convert them + // to overlay mounts for the applier to perform whiteout conversion correctly. + // TODO: this is a temporary workaround until #13053 lands. + // See: https://github.com/containerd/containerd/issues/13030 + if i > 0 && parallel && unpack.SnapshotterKey == "overlayfs" { + mounts = bindToOverlay(mounts) + } + diff, err := a.Apply(ctx, desc, mounts, unpack.ApplyOpts...) if err != nil { cleanup.Do(ctx, abort) @@ -747,3 +756,23 @@ func uniquePart() string { rand.Read(b[:]) return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) } + +// TODO: this is a temporary workaround until #13053 lands. +func bindToOverlay(mounts []mount.Mount) []mount.Mount { + if len(mounts) != 1 || mounts[0].Type != "bind" { + return mounts + } + + m := mount.Mount{ + Type: "overlay", + Source: "overlay", + } + for _, o := range mounts[0].Options { + if o != "rbind" { + m.Options = append(m.Options, o) + } + } + m.Options = append(m.Options, "upperdir="+mounts[0].Source) + + return []mount.Mount{m} +} diff --git a/core/unpack/unpacker_test.go b/core/unpack/unpacker_test.go index 32581dbd189f9..48636d6f9ff1e 100644 --- a/core/unpack/unpacker_test.go +++ b/core/unpack/unpacker_test.go @@ -19,8 +19,10 @@ package unpack import ( "crypto/rand" "fmt" + "reflect" "testing" + "github.com/containerd/containerd/v2/core/mount" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ) @@ -91,3 +93,87 @@ func BenchmarkUnpackWithChainIDs(b *testing.B) { }) } } + +func TestBindToOverlay(t *testing.T) { + testCases := []struct { + name string + mounts []mount.Mount + expect []mount.Mount + }{ + { + name: "single bind mount", + mounts: []mount.Mount{ + { + Type: "bind", + Source: "/path/to/source", + Options: []string{"ro", "rbind"}, + }, + }, + expect: []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "ro", + "upperdir=/path/to/source", + }, + }, + }, + }, + { + name: "overlay mount", + mounts: []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "lowerdir=/path/to/lower", + "upperdir=/path/to/upper", + }, + }, + }, + expect: []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{ + "lowerdir=/path/to/lower", + "upperdir=/path/to/upper", + }, + }, + }, + }, + { + name: "multiple mounts", + mounts: []mount.Mount{ + { + Type: "bind", + Source: "/path/to/source1", + }, + { + Type: "bind", + Source: "/path/to/source2", + }, + }, + expect: []mount.Mount{ + { + Type: "bind", + Source: "/path/to/source1", + }, + { + Type: "bind", + Source: "/path/to/source2", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := bindToOverlay(tc.mounts) + if !reflect.DeepEqual(result, tc.expect) { + t.Errorf("unexpected result: got %v, want %v", result, tc.expect) + } + }) + } +} diff --git a/go.mod b/go.mod index c939980fd2fd3..648cb3e438178 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( dario.cat/mergo v1.0.2 github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 github.com/Microsoft/go-winio v0.6.2 - github.com/Microsoft/hcsshim v0.14.0-rc.1 + github.com/Microsoft/hcsshim v0.14.1 github.com/checkpoint-restore/checkpointctl v1.4.0 github.com/checkpoint-restore/go-criu/v7 v7.2.0 github.com/containerd/btrfs/v2 v2.0.0 @@ -43,7 +43,7 @@ require ( github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 github.com/intel/goresctrl v0.10.0 - github.com/klauspost/compress v1.18.1 + github.com/klauspost/compress v1.18.5 github.com/mdlayher/vsock v1.2.1 github.com/moby/locker v1.0.1 github.com/moby/sys/mountinfo v0.7.2 @@ -119,7 +119,7 @@ require ( github.com/mdlayher/socket v0.5.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect - github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/spdystream v0.5.1 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect diff --git a/go.sum b/go.sum index 35d94fe0b3590..99c5839946ce9 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.14.0-rc.1 h1:qAPXKwGOkVn8LlqgBN8GS0bxZ83hOJpcjxzmlQKxKsQ= -github.com/Microsoft/hcsshim v0.14.0-rc.1/go.mod h1:hTKFGbnDtQb1wHiOWv4v0eN+7boSWAHyK/tNAaYZL0c= +github.com/Microsoft/hcsshim v0.14.1 h1:CMuB3fqQVfPdhyXhUqYdUmPUIOhJkmghCx3dJet8Cqs= +github.com/Microsoft/hcsshim v0.14.1/go.mod h1:VnzvPLyWUhxiPVsJ31P6XadxCcTogTguBFDy/1GR/OM= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -202,8 +202,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/knqyf263/go-plugin v0.9.0 h1:CQs2+lOPIlkZVtcb835ZYDEoyyWJWLbSTWeCs0EwTwI= github.com/knqyf263/go-plugin v0.9.0/go.mod h1:2z5lCO1/pez6qGo8CvCxSlBFSEat4MEp1DrnA+f7w8Q= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -229,8 +229,8 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= -github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/spdystream v0.5.1 h1:9sNYeYZUcci9R6/w7KDaFWEWeV4LStVG78Mpyq/Zm/Y= +github.com/moby/spdystream v0.5.1/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= diff --git a/integration/client/container_linux_test.go b/integration/client/container_linux_test.go index 974f3e64234d6..e2d1354a1bca9 100644 --- a/integration/client/container_linux_test.go +++ b/integration/client/container_linux_test.go @@ -35,6 +35,7 @@ import ( cgroupsv2 "github.com/containerd/cgroups/v3/cgroup2" "github.com/containerd/containerd/api/types/runc/options" "github.com/containerd/errdefs" + "github.com/containerd/platforms" "github.com/stretchr/testify/assert" . "github.com/containerd/containerd/v2/client" @@ -50,6 +51,7 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/require" + "golang.org/x/sync/semaphore" "golang.org/x/sys/unix" ) @@ -1823,3 +1825,70 @@ func TestIssue10589(t *testing.T) { require.NoError(t, err, "container status") assert.Equal(t, Stopped, status.Status) } + +// TestIssue13030 is a regression test for parallel image unpacking. +// The test validates that when multiple layers are unpacked in parallel, +// that whiteout files are properly processed and do not cause files to +// be unexpectedly present in the final rootfs. +// +// https://github.com/containerd/containerd/issues/13030 +func TestIssue13030(t *testing.T) { + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { client.Close() }) + + ctx, cancel := testContext(t) + t.Cleanup(cancel) + + image, err := client.Pull(ctx, + images.Get(images.Whiteout), + WithPlatformMatcher(platforms.Default()), + WithPullUnpack, + WithUnpackOpts([]UnpackOpt{WithUnpackLimiter(semaphore.NewWeighted(3))}), + ) + t.Cleanup(func() { + client.ImageService().Delete(ctx, images.Get(images.Whiteout)) + }) + if err != nil { + t.Fatal(err) + } + + container, err := client.NewContainer(ctx, t.Name(), + WithNewSnapshot(t.Name(), image), + WithNewSpec(oci.WithImageConfig(image), + withProcessArgs("/bin/sh", "-e", "-c", "test ! -e /file-to-delete && test ! -e /dir-to-delete")), + ) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + container.Delete(ctx, WithSnapshotCleanup) + }) + + task, err := container.NewTask(ctx, empty()) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + task.Delete(ctx) + }) + + statusC, err := task.Wait(ctx) + if err != nil { + t.Fatal(err) + } + err = task.Start(ctx) + if err != nil { + t.Fatal(err) + } + status := <-statusC + code, _, err := status.Result() + if err != nil { + t.Fatal(err) + } + if code != 0 { + t.Errorf("expected status 0 from wait but received %d", code) + } +} diff --git a/integration/client/import_test.go b/integration/client/import_test.go index 7a809a3af9dee..3922b5c30d59e 100644 --- a/integration/client/import_test.go +++ b/integration/client/import_test.go @@ -65,6 +65,10 @@ func TestExportAndImport(t *testing.T) { // images remain sane, and that the Garbage Collector won't delete part of its // content. func TestExportAndImportMultiLayer(t *testing.T) { + // ghcr.io/containerd/volume-copy-up:2.1 is not available on s390x + if runtime.GOARCH == "s390x" { + t.Skip("test image not available on s390x") + } testExportImport(t, testMultiLayeredImage) } diff --git a/integration/container_cgroup_mount_options_linux_test.go b/integration/container_cgroup_mount_options_linux_test.go new file mode 100644 index 0000000000000..9de39eb4a6aeb --- /dev/null +++ b/integration/container_cgroup_mount_options_linux_test.go @@ -0,0 +1,79 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package integration + +import ( + "os" + "strings" + "testing" + + "github.com/containerd/cgroups/v3" + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/containerd/v2/integration/images" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPrivilegedContainerCgroupMountOptions(t *testing.T) { + if f := os.Getenv("RUNC_FLAVOR"); f == "crun" { + t.Skip("Skipping until crun supports cgroup v2 mount options (https://github.com/containers/crun/pull/2040)") + } + if cgroups.Mode() != cgroups.Unified { + t.Skip("Requires cgroup v2") + } + + hostMountBefore, err := mount.Lookup("/sys/fs/cgroup") + require.NoError(t, err) + + if !strings.Contains(hostMountBefore.VFSOptions, "nsdelegate") && !strings.Contains(hostMountBefore.VFSOptions, "memory_recursiveprot") { + t.Skip("requires host cgroup mount to have nsdelegate or memory_recursiveprot") + } + + testImage := images.Get(images.BusyBox) + EnsureImageExists(t, testImage) + + t.Log("Create a sandbox with privileged=true") + sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "privileged-cgroup-mount-test", WithPodSecurityContext(true)) + + t.Log("Create a container with privileged=true") + cnConfig := ContainerConfig("container", testImage, WithCommand("sh", "-c", "sleep 1d"), WithSecurityContext(true)) + cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig) + require.NoError(t, err) + t.Cleanup(func() { + if err := runtimeService.RemoveContainer(cn); err != nil { + t.Logf("failed to remove container %s: %v", cn, err) + } + }) + + t.Log("Start the container") + require.NoError(t, runtimeService.StartContainer(cn)) + t.Cleanup(func() { + if err := runtimeService.StopContainer(cn, 10); err != nil { + t.Logf("failed to stop container %s: %v", cn, err) + } + }) + + hostMountAfter, err := mount.Lookup("/sys/fs/cgroup") + require.NoError(t, err) + + if strings.Contains(hostMountBefore.VFSOptions, "nsdelegate") { + assert.Contains(t, hostMountAfter.VFSOptions, "nsdelegate", "nsdelegate should be preserved on the host cgroup mount") + } + if strings.Contains(hostMountBefore.VFSOptions, "memory_recursiveprot") { + assert.Contains(t, hostMountAfter.VFSOptions, "memory_recursiveprot", "memory_recursiveprot should be preserved on the host cgroup mount") + } +} diff --git a/integration/images/image_list.go b/integration/images/image_list.go index 47cbb1b1e1e0c..2d851f68da61a 100644 --- a/integration/images/image_list.go +++ b/integration/images/image_list.go @@ -38,6 +38,7 @@ type ImageList struct { VolumeOwnership string ArgsEscaped string Nginx string + Whiteout string } var ( @@ -57,6 +58,7 @@ func initImages(imageListFile string) { VolumeOwnership: "ghcr.io/containerd/volume-ownership:2.1", ArgsEscaped: "cplatpublic.azurecr.io/args-escaped-test-image-ns:1.0", Nginx: "ghcr.io/containerd/nginx:1.27.0", + Whiteout: "ghcr.io/containerd/whiteout-test:1.0", } if imageListFile != "" { @@ -96,6 +98,8 @@ const ( ArgsEscaped // Nginx image Nginx + // Whiteout image + Whiteout ) func initImageMap(imageList ImageList) map[int]string { @@ -108,6 +112,7 @@ func initImageMap(imageList ImageList) map[int]string { images[VolumeOwnership] = imageList.VolumeOwnership images[ArgsEscaped] = imageList.ArgsEscaped images[Nginx] = imageList.Nginx + images[Whiteout] = imageList.Whiteout return images } diff --git a/internal/cri/instrument/instrumented_service.go b/internal/cri/instrument/instrumented_service.go index 52169c3b89910..3509bd3ce6c5e 100644 --- a/internal/cri/instrument/instrumented_service.go +++ b/internal/cri/instrument/instrumented_service.go @@ -18,7 +18,6 @@ package instrument import ( "context" - "errors" "github.com/containerd/errdefs" "github.com/containerd/errdefs/pkg/errgrpc" @@ -664,5 +663,5 @@ func (in *instrumentedService) RuntimeConfig(ctx context.Context, r *runtime.Run } func (in *instrumentedService) UpdatePodSandboxResources(ctx context.Context, r *runtime.UpdatePodSandboxResourcesRequest) (res *runtime.UpdatePodSandboxResourcesResponse, err error) { - return nil, errors.New("not implemented yet") + return nil, errgrpc.ToGRPC(errdefs.ErrNotImplemented) } diff --git a/internal/cri/opts/spec_linux_opts.go b/internal/cri/opts/spec_linux_opts.go index 348e699496def..1818352736189 100644 --- a/internal/cri/opts/spec_linux_opts.go +++ b/internal/cri/opts/spec_linux_opts.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -70,11 +71,35 @@ func withMounts(osi osinterface.OS, config *runtime.ContainerConfig, extra []*ru if cgroupWritable { mode = "rw" } + + cgroupOptions := []string{"nosuid", "noexec", "nodev", "relatime", mode} + + hasCgroupNS := false + if s.Linux != nil { + hasCgroupNS = slices.ContainsFunc(s.Linux.Namespaces, func(ns runtimespec.LinuxNamespace) bool { + return ns.Type == runtimespec.CgroupNamespace + }) + } + + // If a container shares the host's cgroup namespace, mounting cgroup2 + // inside the container applies the new mount options to the single shared + // cgroup2 VFS superblock. Therefore, explicitly copy these options from + // the host's /sys/fs/cgroup to avoid being stripped. + if !hasCgroupNS { + if mountInfo, err := osi.LookupMount("/sys/fs/cgroup"); err == nil { + for opt := range strings.SplitSeq(mountInfo.VFSOptions, ",") { + if opt == "nsdelegate" || opt == "memory_recursiveprot" { + cgroupOptions = append(cgroupOptions, opt) + } + } + } + } + s.Mounts = append(s.Mounts, runtimespec.Mount{ Source: "cgroup", Destination: "/sys/fs/cgroup", Type: "cgroup", - Options: []string{"nosuid", "noexec", "nodev", "relatime", mode}, + Options: cgroupOptions, }) // Copy all mounts from default mounts, except for diff --git a/internal/cri/opts/spec_linux_test.go b/internal/cri/opts/spec_linux_test.go index 1c9942f80cc34..2d729d1bb8058 100644 --- a/internal/cri/opts/spec_linux_test.go +++ b/internal/cri/opts/spec_linux_test.go @@ -17,10 +17,15 @@ package opts import ( + "context" "testing" + "github.com/containerd/containerd/v2/core/mount" + ostesting "github.com/containerd/containerd/v2/pkg/os/testing" + runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) func TestMergeGids(t *testing.T) { @@ -45,3 +50,73 @@ func TestRestrictOOMScoreAdj(t *testing.T) { require.NoError(t, err) assert.Equal(t, got, current+1) } + +func TestWithMountsCgroupNamespaceOptions(t *testing.T) { + tests := []struct { + name string + hasCgroupNS bool + hostMountOpts string + expectedOpts []string + }{ + { + name: "has cgroupns, should use default options", + hasCgroupNS: true, + hostMountOpts: "rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot", + expectedOpts: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, + }, + { + name: "no cgroupns, with host options present", + hasCgroupNS: false, + hostMountOpts: "rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot", + expectedOpts: []string{"nosuid", "noexec", "nodev", "relatime", "ro", "nsdelegate", "memory_recursiveprot"}, + }, + { + name: "no cgroupns, with host missing nsdelegate", + hasCgroupNS: false, + hostMountOpts: "rw,nosuid,nodev,noexec,relatime,memory_recursiveprot", + expectedOpts: []string{"nosuid", "noexec", "nodev", "relatime", "ro", "memory_recursiveprot"}, + }, + { + name: "no cgroupns, with host missing all extra options", + hasCgroupNS: false, + hostMountOpts: "rw,nosuid,nodev,noexec,relatime", + expectedOpts: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeOS := ostesting.NewFakeOS() + fakeOS.LookupMountFn = func(path string) (mount.Info, error) { + if path == "/sys/fs/cgroup" { + return mount.Info{VFSOptions: tt.hostMountOpts}, nil + } + return mount.Info{}, nil + } + + config := &runtime.ContainerConfig{ + Linux: &runtime.LinuxContainerConfig{}, + } + + spec := &runtimespec.Spec{} + if tt.hasCgroupNS { + spec.Linux = &runtimespec.Linux{Namespaces: []runtimespec.LinuxNamespace{{Type: runtimespec.CgroupNamespace}}} + } + + opt := withMounts(fakeOS, config, nil, "", nil, false) + err := opt(context.Background(), nil, nil, spec) + require.NoError(t, err) + + var cgroupMount *runtimespec.Mount + for _, m := range spec.Mounts { + if m.Destination == "/sys/fs/cgroup" { + cgroupMount = &m + break + } + } + + require.NotNil(t, cgroupMount) + assert.ElementsMatch(t, tt.expectedOpts, cgroupMount.Options) + }) + } +} diff --git a/internal/cri/server/container_create.go b/internal/cri/server/container_create.go index 5278568f956cb..bcc5080d5a764 100644 --- a/internal/cri/server/container_create.go +++ b/internal/cri/server/container_create.go @@ -792,6 +792,14 @@ func (c *criService) buildLinuxSpec( } }() + // cgroupns is used for hiding /sys/fs/cgroup from containers. + // For compatibility, cgroupns is not used when running in cgroup v1 mode or in privileged. + // https://github.com/containers/libpod/issues/4363 + // https://github.com/kubernetes/enhancements/blob/0e409b47497e398b369c281074485c8de129694f/keps/sig-node/20191118-cgroups-v2.md#cgroup-namespace + if isUnifiedCgroupsMode() && !securityContext.GetPrivileged() { + specOpts = append(specOpts, oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.CgroupNamespace})) + } + var ociSpecOpts oci.SpecOpts if ociRuntime.CgroupWritable { ociSpecOpts = customopts.WithMountsCgroupWritable(c.os, config, extraMounts, mountLabel, runtimeHandler) @@ -930,14 +938,6 @@ func (c *criService) buildLinuxSpec( annotations.DefaultCRIAnnotations(sandboxID, containerName, imageName, sandboxConfig, false)..., ) - // cgroupns is used for hiding /sys/fs/cgroup from containers. - // For compatibility, cgroupns is not used when running in cgroup v1 mode or in privileged. - // https://github.com/containers/libpod/issues/4363 - // https://github.com/kubernetes/enhancements/blob/0e409b47497e398b369c281074485c8de129694f/keps/sig-node/20191118-cgroups-v2.md#cgroup-namespace - if isUnifiedCgroupsMode() && !securityContext.GetPrivileged() { - specOpts = append(specOpts, oci.WithLinuxNamespace(runtimespec.LinuxNamespace{Type: runtimespec.CgroupNamespace})) - } - return specOpts, nil } diff --git a/internal/cri/server/container_create_linux_test.go b/internal/cri/server/container_create_linux_test.go index afd4feaea1c5c..149a9d9dee545 100644 --- a/internal/cri/server/container_create_linux_test.go +++ b/internal/cri/server/container_create_linux_test.go @@ -487,6 +487,52 @@ func TestPrivilegedBindMount(t *testing.T) { } } +func TestCgroupNamespace(t *testing.T) { + testPid := uint32(1234) + c := newTestCRIService() + testSandboxID := "sandbox-id" + testContainerName := "container-name" + containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData() + ociRuntime := config.Runtime{} + + tests := []struct { + desc string + privileged bool + expectCgroupNamespace bool + }{ + { + desc: "non-privileged container should get cgroup namespace", + privileged: false, + expectCgroupNamespace: true, + }, + { + desc: "privileged container should not get cgroup namespace", + privileged: true, + expectCgroupNamespace: false, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + containerConfig.Linux.SecurityContext.Privileged = tt.privileged + sandboxConfig.Linux.SecurityContext.Privileged = tt.privileged + + spec, err := c.buildContainerSpec(currentPlatform, t.Name(), testSandboxID, testPid, "", testContainerName, testImageName, containerConfig, sandboxConfig, imageConfig, nil, ociRuntime, nil) + assert.NoError(t, err) + + hasCgroupNS := false + for _, ns := range spec.Linux.Namespaces { + if ns.Type == runtimespec.CgroupNamespace { + hasCgroupNS = true + break + } + } + + assert.Equal(t, tt.expectCgroupNamespace, hasCgroupNS) + }) + } +} + func TestMountPropagation(t *testing.T) { sharedLookupMountFn := func(string) (mount.Info, error) { diff --git a/pkg/archive/tar_unix.go b/pkg/archive/tar_unix.go index fa611006098f7..684ea5783d950 100644 --- a/pkg/archive/tar_unix.go +++ b/pkg/archive/tar_unix.go @@ -80,7 +80,7 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { return nil, err } // Call chmod to avoid permission mask - if err := os.Chmod(name, perm); err != nil { + if err := f.Chmod(perm); err != nil { f.Close() return nil, err } diff --git a/pkg/oci/spec_opts.go b/pkg/oci/spec_opts.go index 6fc72da6bd17d..c298e4bb29c2c 100644 --- a/pkg/oci/spec_opts.go +++ b/pkg/oci/spec_opts.go @@ -963,7 +963,7 @@ func WithAppendAdditionalGroups(groups ...string) SpecOpts { defer ensureAdditionalGids(s) var ugroups []user.Group - f, groupErr := root.Open("etc/group") + f, groupErr := openUserFile(root, "etc/group") if groupErr == nil { defer f.Close() ugroups, groupErr = user.ParseGroup(f) @@ -1142,7 +1142,7 @@ func UserFromPath(root string, filter func(user.User) bool) (user.User, error) { // UserFromFS inspects the user object using /etc/passwd in the specified fs.FS. // filter can be nil. func UserFromFS(root fs.FS, filter func(user.User) bool) (user.User, error) { - f, err := root.Open("etc/passwd") + f, err := openUserFile(root, "etc/passwd") if err != nil { return user.User{}, err } @@ -1174,7 +1174,7 @@ func GIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err err // GIDFromFS inspects the GID using /etc/group in the specified fs.FS. // filter can be nil. func GIDFromFS(root fs.FS, filter func(user.Group) bool) (gid uint32, err error) { - f, err := root.Open("etc/group") + f, err := openUserFile(root, "etc/group") if err != nil { return 0, err } @@ -1191,7 +1191,7 @@ func GIDFromFS(root fs.FS, filter func(user.Group) bool) (gid uint32, err error) } func getSupplementalGroupsFromFS(root fs.FS, filter func(user.Group) bool) ([]uint32, error) { - f, err := root.Open("etc/group") + f, err := openUserFile(root, "etc/group") if err != nil { return []uint32{}, err } @@ -1789,3 +1789,43 @@ func WithWindowsNetworkNamespace(ns string) SpecOpts { return nil } } + +// readLinker defines the ReadLink method locally. +// We keep this shim to ensure compatibility with build environments where +// the standard library's fs.ReadLinkFS interface is not yet available or recognized. +type readLinker interface { + ReadLink(name string) (string, error) +} + +// openUserFile attempts to open a file within the root fs. +// It handles cases where the file is an absolute symlink (e.g., NixOS /etc/passwd -> /nix/store/...), +// which triggers "path escapes from parent" errors in Go 1.24+ due to stricter os.DirFS validation. +func openUserFile(root fs.FS, name string) (fs.File, error) { + f, err := root.Open(name) + if err == nil { + return f, nil + } + + // Check if the FS implements our local ReadLink interface. + // We use a local interface instead of fs.ReadLinkFS to avoid strict dependency + // issues in some build environments. + if lfs, ok := root.(readLinker); ok { + if target, lerr := lfs.ReadLink(name); lerr == nil { + // Use filepath.IsAbs to handle platform-agnostic absolute path checks + if filepath.IsAbs(target) { + // Re-anchor the absolute path to the root. + // e.g. /nix/store/... becomes nix/store/... (relative to root fs) + // We use filepath.Rel to safely strip the leading separator. + rel, rerr := filepath.Rel(string(filepath.Separator), target) + if rerr == nil { + // filepath.Rel might return OS-specific separators (backslashes on Windows). + // fs.Open strictly expects forward slashes, so we convert it. + return root.Open(filepath.ToSlash(rel)) + } + } + } + } + + // Return the original error if we couldn't resolve it + return nil, err +} diff --git a/pkg/oci/spec_test.go b/pkg/oci/spec_test.go index 5e0a48986e93a..dfb7d6d65b143 100644 --- a/pkg/oci/spec_test.go +++ b/pkg/oci/spec_test.go @@ -18,14 +18,19 @@ package oci import ( "context" + "io" + "io/fs" + "os" + "path/filepath" "runtime" "testing" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/containerd/containerd/v2/core/containers" "github.com/containerd/containerd/v2/pkg/namespaces" "github.com/containerd/containerd/v2/pkg/testutil" + "github.com/containerd/continuity/fs/fstest" + "github.com/moby/sys/user" + "github.com/opencontainers/runtime-spec/specs-go" ) func TestGenerateSpec(t *testing.T) { @@ -325,3 +330,105 @@ func TestWithPrivileged(t *testing.T) { t.Error("Did not find mount for cgroupfs") } } + +func TestOpenUserFile_AbsoluteSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("absolute symlink handling is only supported on non-Windows platforms") + } + + expectedContent := []byte("root:x:0:0:root:/root:/bin/bash" + t.Name()) + + root := t.TempDir() + // Use 'continuity' library to create a directory structure simulating NixOS + if err := fstest.Apply( + fstest.CreateDir("/etc", 0o755), + fstest.CreateDir("/nix/store/abcd", 0o755), + fstest.CreateFile("/nix/store/abcd/passwd", expectedContent, 0o644), + // /etc/passwd -> /nix/store/abcd/passwd (absolute symlink) + fstest.Symlink("/nix/store/abcd/passwd", "/etc/passwd"), + ).Apply(root); err != nil { + t.Fatal(err) + } + + rootFS := os.DirFS(root) + + // Ensure the FS implements the ReadLink interface. + // If the native os.DirFS doesn't implement it (depending on Go version), + // wrap it in our readLinkFS helper. + if _, ok := rootFS.(readLinker); !ok { + t.Logf("os.DirFS does not implement ReadLink; wrapping to use ReadLink") + rootFS = readLinkFS{root: root, fs: rootFS} + } + + f, err := openUserFile(rootFS, "etc/passwd") + if err != nil { + t.Fatalf("openUserFile failed on absolute symlink: %v", err) + } + defer f.Close() + + content, err := io.ReadAll(f) + if err != nil { + t.Fatal(err) + } + if string(content) != string(expectedContent) { + t.Errorf("expected content %q, got %q", string(expectedContent), string(content)) + } +} + +func TestGroupLookup_AbsoluteSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("absolute symlink handling is only supported on non-Windows platforms") + } + + expectedContent := []byte("dummygroup:x:1001:paulo\n") + + root := t.TempDir() + if err := fstest.Apply( + fstest.CreateDir("/etc", 0o755), + fstest.CreateDir("/nix/store/abcd", 0o755), + fstest.CreateFile("/nix/store/abcd/group", expectedContent, 0o644), + fstest.Symlink("/nix/store/abcd/group", "/etc/group"), + ).Apply(root); err != nil { + t.Fatal(err) + } + + rootFS := os.DirFS(root) + if _, ok := rootFS.(readLinker); !ok { + rootFS = readLinkFS{root: root, fs: rootFS} + } + + gid, err := GIDFromFS(rootFS, func(g user.Group) bool { + return g.Name == "dummygroup" + }) + if err != nil { + t.Fatalf("GIDFromFS failed on absolute symlink: %v", err) + } + if gid != 1001 { + t.Errorf("expected GID 1001, got %d", gid) + } + + gids, err := getSupplementalGroupsFromFS(rootFS, func(g user.Group) bool { + return g.Name == "dummygroup" + }) + if err != nil { + t.Fatalf("getSupplementalGroupsFromFS failed on absolute symlink: %v", err) + } + if len(gids) != 1 || gids[0] != 1001 { + t.Errorf("expected supplemental GIDs [1001], got %v", gids) + } +} + +// Helpers for testing ReadLink support +type readLinkFS struct { + root string + fs fs.FS +} + +func (r readLinkFS) Open(name string) (fs.File, error) { + return r.fs.Open(name) +} + +func (r readLinkFS) ReadLink(name string) (string, error) { + // Force link reading using the actual path on disk + return os.Readlink(filepath.Join(r.root, filepath.FromSlash(name))) +} diff --git a/plugins/diff/walking/plugin/plugin.go b/plugins/diff/walking/plugin/plugin.go index 0c5ce67d0b1df..baccf1daf402e 100644 --- a/plugins/diff/walking/plugin/plugin.go +++ b/plugins/diff/walking/plugin/plugin.go @@ -17,9 +17,12 @@ package plugin import ( + "errors" + "github.com/containerd/containerd/v2/core/diff" "github.com/containerd/containerd/v2/core/diff/apply" "github.com/containerd/containerd/v2/core/metadata" + "github.com/containerd/containerd/v2/core/mount" "github.com/containerd/containerd/v2/plugins" "github.com/containerd/containerd/v2/plugins/diff/walking" "github.com/containerd/platforms" @@ -33,6 +36,7 @@ func init() { ID: "walking", Requires: []plugin.Type{ plugins.MetadataPlugin, + plugins.MountManagerPlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { md, err := ic.GetSingle(plugins.MetadataPlugin) @@ -40,12 +44,19 @@ func init() { return nil, err } + var mm mount.Manager + if mountsI, err := ic.GetSingle(plugins.MountManagerPlugin); err == nil { + mm = mountsI.(mount.Manager) + } else if !errors.Is(err, plugin.ErrPluginNotFound) { + return nil, err + } + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) cs := md.(*metadata.DB).ContentStore() return diffPlugin{ Comparer: walking.NewWalkingDiff(cs), - Applier: apply.NewFileSystemApplier(cs), + Applier: apply.NewFileSystemApplierWithMountManager(cs, mm), }, nil }, }) diff --git a/releases/v2.2.3.toml b/releases/v2.2.3.toml new file mode 100644 index 0000000000000..863563cc0f040 --- /dev/null +++ b/releases/v2.2.3.toml @@ -0,0 +1,33 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "containerd" +github_repo = "containerd/containerd" +match_deps = "^github.com/(containerd/[a-zA-Z0-9-]+)$" +ignore_deps = [ "github.com/containerd/containerd" ] + +# previous release +previous = "v2.2.2" + +pre_release = false + +preface = """\ +The third patch release for containerd 2.2 contains various fixes +and updates including a security patch. + +### Security Updates + +* **spdystream** + * [**CVE-2026-35469**](https://github.com/moby/spdystream/security/advisories/GHSA-pc3f-x583-g7j2) +""" + +postface = """\ +### Which file should I download? +* `containerd---.tar.gz`: ✅Recommended. Dynamically linked with glibc 2.35 (Ubuntu 22.04). +* `containerd-static---.tar.gz`: Statically linked. Expected to be used on Linux distributions that do not use glibc >= 2.35. Not position-independent. + +In addition to containerd, typically you will have to install [runc](https://github.com/opencontainers/runc/releases) +and [CNI plugins](https://github.com/containernetworking/plugins/releases) from their official sites too. + +See also the [Getting Started](https://github.com/containerd/containerd/blob/main/docs/getting-started.md) documentation. +""" diff --git a/script/resize-vagrant-root.sh b/script/resize-vagrant-root.sh index 1da8681cc9127..11df94226f7ab 100755 --- a/script/resize-vagrant-root.sh +++ b/script/resize-vagrant-root.sh @@ -23,7 +23,8 @@ df_line=$(df -T / | grep '^/dev/') if [[ "$df_line" =~ ^/dev/([a-z]+)([0-9+]) ]]; then dev="${BASH_REMATCH[1]}" part="${BASH_REMATCH[2]}" - growpart "/dev/$dev" "$part" + # growpart prints "NOCHANGE" when the partition is already at max size + out=$(growpart "/dev/$dev" "$part" 2>&1) || grep -q "^NOCHANGE:" <<< "$out" fstype=$(echo "$df_line" | awk '{print $2}') if [[ "$fstype" = 'btrfs' ]]; then diff --git a/script/setup/prepare_env_windows.ps1 b/script/setup/prepare_env_windows.ps1 index 70eb1e2d91770..7d589bfcdbbdf 100644 --- a/script/setup/prepare_env_windows.ps1 +++ b/script/setup/prepare_env_windows.ps1 @@ -5,7 +5,7 @@ # lived test environment. Set-MpPreference -DisableRealtimeMonitoring:$true -$PACKAGES= @{ mingw = "10.2.0"; git = ""; golang = "1.25.7"; make = ""; nssm = "" } +$PACKAGES= @{ mingw = "10.2.0"; git = ""; golang = "1.25.9"; make = ""; nssm = "" } Write-Host "Downloading chocolatey package" curl.exe -L "https://packages.chocolatey.org/chocolatey.0.10.15.nupkg" -o 'c:\choco.zip' diff --git a/script/setup/runc-version b/script/setup/runc-version index 043ba4f603801..3252f641be89e 100644 --- a/script/setup/runc-version +++ b/script/setup/runc-version @@ -1 +1 @@ -v1.3.4 +v1.3.5 diff --git a/script/setup/runhcs-version b/script/setup/runhcs-version index e2a331bbdf440..64a3b7907b07d 100644 --- a/script/setup/runhcs-version +++ b/script/setup/runhcs-version @@ -1 +1 @@ -v0.14.0-rc.1 +v0.14.1 diff --git a/script/test/cri-integration.sh b/script/test/cri-integration.sh index dec53e8f88ddd..ac70168ad0cca 100755 --- a/script/test/cri-integration.sh +++ b/script/test/cri-integration.sh @@ -45,6 +45,10 @@ CMD="" if [ -n "${sudo}" ]; then CMD+="${sudo} " fi +CMD+="env " +if [ -n "${RUNC_FLAVOR:-}" ]; then + CMD+="RUNC_FLAVOR=${RUNC_FLAVOR} " +fi CMD+="${PWD}/bin/cri-integration.test" ${CMD} --test.run="${FOCUS}" --test.v \ diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4528059ca6815..804a20181677f 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -31,6 +31,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2d" binary: s2d @@ -57,6 +60,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2sx" binary: s2sx @@ -84,6 +90,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm archives: - @@ -91,7 +100,7 @@ archives: name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows - format: zip + formats: ['zip'] files: - unpack/* - s2/LICENSE diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4bfa1..e839fe9c60cc2 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -7,7 +7,7 @@ This package provides various compression algorithms. * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). * [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) @@ -27,6 +27,28 @@ Use the links above for more information on each. # changelog +* Feb 9th, 2026 [1.18.4](https://github.com/klauspost/compress/releases/tag/v1.18.4) + * gzhttp: Add zstandard to server handler wrapper https://github.com/klauspost/compress/pull/1121 + * zstd: Add ResetWithOptions to encoder/decoder https://github.com/klauspost/compress/pull/1122 + * gzhttp: preserve qvalue when extra parameters follow in Accept-Encoding by @analytically in https://github.com/klauspost/compress/pull/1116 + +* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) + * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). + +* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2) + * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115 + * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106 + +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +58,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +127,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -589,7 +615,7 @@ While the release has been extensively tested, it is recommended to testing when # deflate usage -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: +The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them: Typical speed is about 2x of the standard library packages. @@ -600,17 +626,15 @@ Typical speed is about 2x of the standard library packages. | `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | | `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). +The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). Currently there is only minor speedup on decompression (mostly CRC32 calculation). Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. +the stateless compression described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). @@ -669,3 +693,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + + + + + diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index 99ddd4af97c79..2d6ef64be15b2 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X // and Decoder.Decompress1X that use an asm implementation of thir main loops. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 908c17de63fc8..610392322240b 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // This file contains a generic implementation of Decoder.Decompress4X. package huff0 diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go index e802579c4f967..b97f9056f4cb9 100644 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package cpuinfo diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd35ea1480a0d..0e33aea4422f3 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -78,6 +78,7 @@ func (b *blockEnc) initNewEncode() { b.recentOffsets = [3]uint32{1, 4, 8} b.litEnc.Reuse = huff0.ReusePolicyNone b.coders.setPrev(nil, nil, nil) + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30df5513d5607..c7e500f02a955 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -39,9 +39,6 @@ type Decoder struct { frame *frameDec - // Custom dictionaries. - dicts map[uint32]*dict - // streamWg is the waitgroup for all streams streamWg sync.WaitGroup } @@ -101,12 +98,10 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { d.current.err = ErrDecoderNilInput } - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc + // Initialize dict map if needed. + if d.o.dicts == nil { + d.o.dicts = make(map[uint32]*dict) } - d.o.dicts = nil // Create decoders d.decoders = make(chan *blockDec, d.o.concurrent) @@ -238,6 +233,21 @@ func (d *Decoder) Reset(r io.Reader) error { return nil } +// ResetWithOptions will reset the decoder and apply the given options +// for the next stream or DecodeAll operation. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (d *Decoder) ResetWithOptions(r io.Reader, opts ...DOption) error { + d.o.resetOpt = true + defer func() { d.o.resetOpt = false }() + for _, o := range opts { + if err := o(&d.o); err != nil { + return err + } + } + return d.Reset(r) +} + // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { @@ -930,7 +940,7 @@ decodeStream: } func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] + dict, ok := d.o.dicts[frame.DictionaryID] if ok { if debugDecoder { println("setting dict", frame.DictionaryID) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 774c5f00fe420..537627a0789a9 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -20,10 +20,11 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []*dict + dicts map[uint32]*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int + resetOpt bool } func (o *decoderOptions) setDefault() { @@ -42,8 +43,15 @@ func (o *decoderOptions) setDefault() { // WithDecoderLowmem will set whether to use a lower amount of memory, // but possibly have to allocate more while running. +// Cannot be changed with ResetWithOptions. func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } + return func(o *decoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithDecoderLowmem cannot be changed on Reset") + } + o.lowMem = b + return nil + } } // WithDecoderConcurrency sets the number of created decoders. @@ -53,18 +61,23 @@ func WithDecoderLowmem(b bool) DOption { // inflight blocks. // When decoding streams and setting maximum to 1, // no async decoding will be done. +// The value supplied must be at least 0. // When a value of 0 is provided GOMAXPROCS will be used. // By default this will be set to 4 or GOMAXPROCS, whatever is lower. +// Cannot be changed with ResetWithOptions. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { if n < 0 { - return errors.New("concurrency must be at least 1") + return errors.New("concurrency must be at least 0") } + newVal := n if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n + newVal = runtime.GOMAXPROCS(0) } + if o.resetOpt && newVal != o.concurrent { + return errors.New("WithDecoderConcurrency cannot be changed on Reset") + } + o.concurrent = newVal return nil } } @@ -73,6 +86,7 @@ func WithDecoderConcurrency(n int) DOption { // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. // Maximum is 1 << 63 bytes. Default is 64GiB. +// Can be changed with ResetWithOptions. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -92,16 +106,20 @@ func WithDecoderMaxMemory(n uint64) DOption { // "zstd --train" from the Zstandard reference implementation. // // If several dictionaries with the same ID are provided, the last one will be used. +// Can be changed with ResetWithOptions. // // [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } for _, b := range dicts { d, err := loadDict(b) if err != nil { return err } - o.dicts = append(o.dicts, d) + o.dicts[d.id] = d } return nil } @@ -109,12 +127,16 @@ func WithDecoderDicts(dicts ...[]byte) DOption { // WithDecoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. +// Can be changed with ResetWithOptions. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } + o.dicts[id] = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} return nil } } @@ -124,6 +146,7 @@ func WithDecoderDictRaw(id uint32, content []byte) DOption { // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. // If WithDecoderMaxMemory is set to a lower value, that will be used. // Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +// Can be changed with ResetWithOptions. func WithDecoderMaxWindow(size uint64) DOption { return func(o *decoderOptions) error { if size < MinWindowSize { @@ -141,6 +164,7 @@ func WithDecoderMaxWindow(size uint64) DOption { // or any size set in WithDecoderMaxMemory. // This can be used to limit decoding to a specific maximum output size. // Disabled by default. +// Can be changed with ResetWithOptions. func WithDecodeAllCapLimit(b bool) DOption { return func(o *decoderOptions) error { o.limitToCap = b @@ -153,17 +177,37 @@ func WithDecodeAllCapLimit(b bool) DOption { // This typically uses less allocations but will have the full decompressed object in memory. // Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. // Default is 128KiB. +// Cannot be changed with ResetWithOptions. func WithDecodeBuffersBelow(size int) DOption { return func(o *decoderOptions) error { + if o.resetOpt && size != o.decodeBufsBelow { + return errors.New("WithDecodeBuffersBelow cannot be changed on Reset") + } o.decodeBufsBelow = size return nil } } // IgnoreChecksum allows to forcibly ignore checksum checking. +// Can be changed with ResetWithOptions. func IgnoreChecksum(b bool) DOption { return func(o *decoderOptions) error { o.ignoreChecksum = b return nil } } + +// WithDecoderDictDelete removes dictionaries by ID. +// If no ids are passed, all dictionaries are deleted. +// Should be used with ResetWithOptions. +func WithDecoderDictDelete(ids ...uint32) DOption { + return func(o *decoderOptions) error { + if len(ids) == 0 { + clear(o.dicts) + } + for _, id := range ids { + delete(o.dicts, id) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index c1192ec38f4db..c4de134a7a4d7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -21,7 +21,7 @@ type fastBase struct { crc *xxhash.Digest tmp [8]byte blk *blockEnc - lastDictID uint32 + lastDict *dict lowMem bool } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c1581cfcb8b4b..851799322bd8c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -479,10 +479,13 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]prevEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -510,13 +513,14 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -538,8 +542,8 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id } + e.lastDict = d // Reset table to initial state copy(e.longTable[:], e.dictLongTable) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 85dcd28c32eb2..3305f09248c04 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -1102,10 +1102,13 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -1133,14 +1136,15 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id e.allDirty = true } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1162,9 +1166,9 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id e.allDirty = true } + e.lastDict = d // Reset table to initial state { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index cf8cad00dcf74..2fb6da112bcd0 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1040,15 +1040,18 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { allDirty := e.allDirty + dictChanged := d != e.lastDict e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]tableEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1065,7 +1068,6 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id allDirty = true } // Reset table to initial state diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 9180a3a58203c..5e104f1a48288 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -805,9 +805,11 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || d != e.lastDict { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 @@ -827,7 +829,7 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id + e.lastDict = d e.allDirty = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 8f8223cd3a678..0f2a00a003328 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -131,6 +131,29 @@ func (e *Encoder) Reset(w io.Writer) { s.frameContentSize = 0 } +// ResetWithOptions will re-initialize the writer and apply the given options +// as a new, independent stream. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { + e.o.resetOpt = true + defer func() { e.o.resetOpt = false }() + hadDict := e.o.dict != nil + for _, o := range opts { + if err := o(&e.o); err != nil { + return err + } + } + hasDict := e.o.dict != nil + if hadDict != hasDict { + // Dict presence changed — encoder type must be recreated. + e.state.encoder = nil + e.init = sync.Once{} + } + e.Reset(w) + return nil +} + // ResetContentSize will reset and set a content size for the next stream. // If the bytes written does not match the size given an error will be returned // when calling Close(). @@ -432,6 +455,12 @@ func (e *Encoder) Close() error { if s.encoder == nil { return nil } + if s.w == nil { + if len(s.filling) == 0 && !s.headerWritten && !s.eofWritten && s.nInput == 0 { + return nil + } + return errors.New("zstd: encoder has no writer") + } err := e.nextBlock(true) if err != nil { if errors.Is(s.err, ErrEncoderClosed) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 20671dcb91d93..e217be0a17ac5 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -14,6 +14,7 @@ type EOption func(*encoderOptions) error // options retains accumulated state of multiple options. type encoderOptions struct { + resetOpt bool concurrent int level EncoderLevel single *bool @@ -41,6 +42,7 @@ func (o *encoderOptions) setDefault() { level: SpeedDefault, allLitEntropy: false, lowMem: false, + fullZero: true, } } @@ -71,19 +73,28 @@ func (o encoderOptions) encoder() encoder { // WithEncoderCRC will add CRC value to output. // Output will be 4 bytes larger. +// Can be changed with ResetWithOptions. func WithEncoderCRC(b bool) EOption { return func(o *encoderOptions) error { o.crc = b; return nil } } // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. +// The value supplied must be at least 0. +// When a value of 0 is provided GOMAXPROCS will be used. // For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. +// Cannot be changed with ResetWithOptions. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") + if n < 0 { + return errors.New("concurrency must at least 0") + } + if n == 0 { + n = runtime.GOMAXPROCS(0) + } + if o.resetOpt && n != o.concurrent { + return errors.New("WithEncoderConcurrency cannot be changed on Reset") } o.concurrent = n return nil @@ -95,6 +106,7 @@ func WithEncoderConcurrency(n int) EOption { // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. // The default value is determined by the compression level and max 8MB. +// Cannot be changed with ResetWithOptions. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -105,6 +117,9 @@ func WithWindowSize(n int) EOption { case (n & (n - 1)) != 0: return errors.New("window size must be a power of 2") } + if o.resetOpt && n != o.windowSize { + return errors.New("WithWindowSize cannot be changed on Reset") + } o.windowSize = n o.customWindow = true @@ -122,6 +137,7 @@ func WithWindowSize(n int) EOption { // n must be > 0 and <= 1GB, 1<<30 bytes. // The padded area will be filled with data from crypto/rand.Reader. // If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +// Can be changed with ResetWithOptions. func WithEncoderPadding(n int) EOption { return func(o *encoderOptions) error { if n <= 0 { @@ -215,12 +231,16 @@ func (e EncoderLevel) String() string { } // WithEncoderLevel specifies a predefined compression level. +// Cannot be changed with ResetWithOptions. func WithEncoderLevel(l EncoderLevel) EOption { return func(o *encoderOptions) error { switch { case l <= speedNotSet || l >= speedLast: return fmt.Errorf("unknown encoder level") } + if o.resetOpt && l != o.level { + return errors.New("WithEncoderLevel cannot be changed on Reset") + } o.level = l if !o.customWindow { switch o.level { @@ -248,6 +268,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { // WithZeroFrames will encode 0 length input as full frames. // This can be needed for compatibility with zstandard usage, // but is not needed for this package. +// Can be changed with ResetWithOptions. func WithZeroFrames(b bool) EOption { return func(o *encoderOptions) error { o.fullZero = b @@ -259,6 +280,7 @@ func WithZeroFrames(b bool) EOption { // Disabling this will skip incompressible data faster, but in cases with no matches but // skewed character distribution compression is lost. // Default value depends on the compression level selected. +// Can be changed with ResetWithOptions. func WithAllLitEntropyCompression(b bool) EOption { return func(o *encoderOptions) error { o.customALEntropy = true @@ -270,6 +292,7 @@ func WithAllLitEntropyCompression(b bool) EOption { // WithNoEntropyCompression will always skip entropy compression of literals. // This can be useful if content has matches, but unlikely to benefit from entropy // compression. Usually the slight speed improvement is not worth enabling this. +// Can be changed with ResetWithOptions. func WithNoEntropyCompression(b bool) EOption { return func(o *encoderOptions) error { o.noEntropy = b @@ -287,6 +310,7 @@ func WithNoEntropyCompression(b bool) EOption { // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. // If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. +// Can be changed with ResetWithOptions. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { o.single = &b @@ -298,8 +322,12 @@ func WithSingleSegment(b bool) EOption { // slower encoding speed. // This will not change the window size which is the primary function for reducing // memory usage. See WithWindowSize. +// Cannot be changed with ResetWithOptions. func WithLowerEncoderMem(b bool) EOption { return func(o *encoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithLowerEncoderMem cannot be changed on Reset") + } o.lowMem = b return nil } @@ -311,6 +339,7 @@ func WithLowerEncoderMem(b bool) EOption { // "zstd --train" from the Zstandard reference implementation. // // The encoder *may* choose to use no dictionary instead for certain payloads. +// Can be changed with ResetWithOptions. // // [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { @@ -328,6 +357,7 @@ func WithEncoderDict(dict []byte) EOption { // // The slice content may contain arbitrary data. It will be used as an initial // history. +// Can be changed with ResetWithOptions. func WithEncoderDictRaw(id uint32, content []byte) EOption { return func(o *encoderOptions) error { if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { @@ -337,3 +367,12 @@ func WithEncoderDictRaw(id uint32, content []byte) EOption { return nil } } + +// WithEncoderDictDelete clears the dictionary, so no dictionary will be used. +// Should be used with ResetWithOptions. +func WithEncoderDictDelete() EOption { + return func(o *encoderOptions) error { + o.dict = nil + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go index d04a829b0a0e7..b8c8607b5dfb4 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 8adfebb029798..2138f8091a998 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 0be16cefc7f4a..9576426e6864c 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,4 @@ //go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go index f41932b7a4f75..1ed18927f95e8 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index bea1779e973ec..379746c96ca87 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 1f8c3cec28c84..18c3703ddc952 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 7cec2197cd9c4..516cd9b070137 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/moby/spdystream/NOTICE b/vendor/github.com/moby/spdystream/NOTICE index b9b11c9ab7b4a..24e2e2aa32402 100644 --- a/vendor/github.com/moby/spdystream/NOTICE +++ b/vendor/github.com/moby/spdystream/NOTICE @@ -3,3 +3,15 @@ Copyright 2014-2021 Docker Inc. This product includes software developed at Docker Inc. (https://www.docker.com/). + +SPDY implementation (spdy/) + +The spdy directory contains code derived from the Go project (golang.org/x/net). + +Copyright 2009-2013 The Go Authors. +Licensed under the BSD 3-Clause License. + +Modifications Copyright 2014-2021 Docker Inc. + +The BSD license text and Go patent grant are included in +spdy/LICENSE and spdy/PATENTS. diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go index 1394d0ad4c22c..69ce4777ea69b 100644 --- a/vendor/github.com/moby/spdystream/connection.go +++ b/vendor/github.com/moby/spdystream/connection.go @@ -224,7 +224,13 @@ type Connection struct { // NewConnection creates a new spdy connection from an existing // network connection. func NewConnection(conn net.Conn, server bool) (*Connection, error) { - framer, framerErr := spdy.NewFramer(conn, conn) + return NewConnectionWithOptions(conn, server) +} + +// NewConnectionWithOptions creates a new spdy connection and applies frame +// parsing limits via options. +func NewConnectionWithOptions(conn net.Conn, server bool, opts ...spdy.FramerOption) (*Connection, error) { + framer, framerErr := spdy.NewFramerWithOptions(conn, conn, opts...) if framerErr != nil { return nil, framerErr } @@ -350,6 +356,9 @@ Loop: } else { debugMessage("(%p) EOF received", s) } + if spdyErr, ok := err.(*spdy.Error); ok && spdyErr.Err == spdy.InvalidControlFrame { + _ = s.conn.Close() + } break } var priority uint8 diff --git a/vendor/github.com/moby/spdystream/spdy/LICENSE b/vendor/github.com/moby/spdystream/spdy/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/spdystream/spdy/PATENTS b/vendor/github.com/moby/spdystream/spdy/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/moby/spdystream/spdy/dictionary.go b/vendor/github.com/moby/spdystream/spdy/dictionary.go index 392232f174633..5a5ff0e14cd3b 100644 --- a/vendor/github.com/moby/spdystream/spdy/dictionary.go +++ b/vendor/github.com/moby/spdystream/spdy/dictionary.go @@ -1,19 +1,3 @@ -/* - Copyright 2014-2021 Docker Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/moby/spdystream/spdy/options.go b/vendor/github.com/moby/spdystream/spdy/options.go new file mode 100644 index 0000000000000..ec03e0b9a913f --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/options.go @@ -0,0 +1,25 @@ +package spdy + +// FramerOption allows callers to customize frame parsing limits. +type FramerOption func(*Framer) + +// WithMaxControlFramePayloadSize sets the control-frame payload limit. +func WithMaxControlFramePayloadSize(size uint32) FramerOption { + return func(f *Framer) { + f.maxFrameLength = size + } +} + +// WithMaxHeaderFieldSize sets the per-header name/value size limit. +func WithMaxHeaderFieldSize(size uint32) FramerOption { + return func(f *Framer) { + f.maxHeaderFieldSize = size + } +} + +// WithMaxHeaderCount sets the maximum number of headers in a frame. +func WithMaxHeaderCount(count uint32) FramerOption { + return func(f *Framer) { + f.maxHeaderCount = count + } +} diff --git a/vendor/github.com/moby/spdystream/spdy/read.go b/vendor/github.com/moby/spdystream/spdy/read.go index 75ea045b8e3ae..2abb69433deec 100644 --- a/vendor/github.com/moby/spdystream/spdy/read.go +++ b/vendor/github.com/moby/spdystream/spdy/read.go @@ -1,19 +1,3 @@ -/* - Copyright 2014-2021 Docker Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -24,6 +8,7 @@ import ( "compress/zlib" "encoding/binary" "io" + "io/ioutil" "net/http" "strings" ) @@ -59,6 +44,11 @@ func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { return err } + // Each setting is 8 bytes (4-byte id + 4-byte value). + // Payload is 4 bytes for numSettings + numSettings*8. + if h.length < 4 || numSettings > (h.length-4)/8 { + return &Error{InvalidControlFrame, 0} + } frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) for i := uint32(0); i < numSettings; i++ { if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { @@ -177,8 +167,19 @@ func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) ( if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { return nil, err } + maxControlFramePayload := uint32(MaxDataLength) + if f.maxFrameLength > 0 { + maxControlFramePayload = f.maxFrameLength + } + flags := ControlFlags((length & 0xff000000) >> 24) length &= 0xffffff + if length > maxControlFramePayload { + if _, err := io.CopyN(ioutil.Discard, f.r, int64(length)); err != nil { + return nil, err + } + return nil, &Error{InvalidControlFrame, 0} + } header := ControlFrameHeader{version, frameType, flags, length} cframe, err := newControlFrame(frameType) if err != nil { @@ -190,11 +191,22 @@ func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) ( return cframe, nil } -func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { +func (f *Framer) parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { var numHeaders uint32 if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { return nil, err } + maxHeaders := defaultMaxHeaderCount + if f.maxHeaderCount > 0 { + maxHeaders = f.maxHeaderCount + } + if numHeaders > maxHeaders { + return nil, &Error{InvalidControlFrame, streamId} + } + maxFieldSize := defaultMaxHeaderFieldSize + if f.maxHeaderFieldSize > 0 { + maxFieldSize = f.maxHeaderFieldSize + } var e error h := make(http.Header, int(numHeaders)) for i := 0; i < int(numHeaders); i++ { @@ -202,6 +214,9 @@ func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) if err := binary.Read(r, binary.BigEndian, &length); err != nil { return nil, err } + if length > maxFieldSize { + return nil, &Error{InvalidControlFrame, streamId} + } nameBytes := make([]byte, length) if _, err := io.ReadFull(r, nameBytes); err != nil { return nil, err @@ -217,6 +232,9 @@ func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) if err := binary.Read(r, binary.BigEndian, &length); err != nil { return nil, err } + if length > maxFieldSize { + return nil, &Error{InvalidControlFrame, streamId} + } value := make([]byte, length) if _, err := io.ReadFull(r, value); err != nil { return nil, err @@ -256,7 +274,7 @@ func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) } reader = f.headerDecompressor } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + frame.Headers, err = f.parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } @@ -288,7 +306,7 @@ func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) e } reader = f.headerDecompressor } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + frame.Headers, err = f.parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } @@ -320,7 +338,7 @@ func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) err } reader = f.headerDecompressor } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + frame.Headers, err = f.parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } diff --git a/vendor/github.com/moby/spdystream/spdy/types.go b/vendor/github.com/moby/spdystream/spdy/types.go index a254a43ab9d60..a5528618ca88d 100644 --- a/vendor/github.com/moby/spdystream/spdy/types.go +++ b/vendor/github.com/moby/spdystream/spdy/types.go @@ -1,23 +1,9 @@ -/* - Copyright 2014-2021 Docker Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Modifications Copyright 2014-2021 Docker Inc. + // Package spdy implements the SPDY protocol (currently SPDY/3), described in // http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. package spdy @@ -63,8 +49,20 @@ const ( ) // MaxDataLength is the maximum number of bytes that can be stored in one frame. +// +// SPDY frame headers encode the payload length using a 24-bit field, +// so the maximum representable size for both data and control frames +// is 2^24-1 bytes. +// +// See the SPDY/3 specification, "Frame Format": +// https://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3-1/ const MaxDataLength = 1<<24 - 1 +const ( + defaultMaxHeaderFieldSize uint32 = 1 << 20 + defaultMaxHeaderCount uint32 = 1000 +) + // headerValueSepator separates multiple header values. const headerValueSeparator = "\x00" @@ -269,6 +267,10 @@ type Framer struct { r io.Reader headerReader io.LimitedReader headerDecompressor io.ReadCloser + + maxFrameLength uint32 // overrides the default frame payload length limit. + maxHeaderFieldSize uint32 // overrides the default per-header name/value length limit. + maxHeaderCount uint32 // overrides the default header count limit. } // NewFramer allocates a new Framer for a given SPDY connection, represented by @@ -276,6 +278,16 @@ type Framer struct { // from/to the Reader and Writer, so the caller should pass in an appropriately // buffered implementation to optimize performance. func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + return newFramer(w, r) +} + +// NewFramerWithOptions allocates a new Framer for a given SPDY connection and +// applies frame parsing limits via options. +func NewFramerWithOptions(w io.Writer, r io.Reader, opts ...FramerOption) (*Framer, error) { + return newFramer(w, r, opts...) +} + +func newFramer(w io.Writer, r io.Reader, opts ...FramerOption) (*Framer, error) { compressBuf := new(bytes.Buffer) compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) if err != nil { @@ -287,5 +299,10 @@ func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { headerCompressor: compressor, r: r, } + for _, opt := range opts { + if opt != nil { + opt(framer) + } + } return framer, nil } diff --git a/vendor/github.com/moby/spdystream/spdy/write.go b/vendor/github.com/moby/spdystream/spdy/write.go index ab6d91f3b8247..75084d35d9552 100644 --- a/vendor/github.com/moby/spdystream/spdy/write.go +++ b/vendor/github.com/moby/spdystream/spdy/write.go @@ -1,19 +1,3 @@ -/* - Copyright 2014-2021 Docker Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -23,6 +7,7 @@ package spdy import ( "encoding/binary" "io" + "math" "net/http" "strings" ) @@ -63,13 +48,21 @@ func (frame *RstStreamFrame) write(f *Framer) (err error) { func (frame *SettingsFrame) write(f *Framer) (err error) { frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSettings - frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + payloadLen := len(frame.FlagIdValues)*8 + 4 + if payloadLen > MaxDataLength { + return &Error{InvalidControlFrame, 0} + } + frame.CFHeader.length = uint32(payloadLen) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } - if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + n := len(frame.FlagIdValues) + if uint64(n) > math.MaxUint32 { + return &Error{InvalidControlFrame, 0} + } + if err = binary.Write(f.w, binary.BigEndian, uint32(n)); err != nil { return } for _, flagIdValue := range frame.FlagIdValues { @@ -170,29 +163,41 @@ func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { n = 0 - if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + numHeaders := len(h) + if numHeaders > math.MaxInt32 { + return n, &Error{InvalidControlFrame, 0} + } + if err = binary.Write(w, binary.BigEndian, uint32(numHeaders)); err != nil { return } - n += 2 + n += 4 for name, values := range h { - if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + nameLen := len(name) + if nameLen > math.MaxInt32 { + return n, &Error{InvalidControlFrame, 0} + } + if err = binary.Write(w, binary.BigEndian, uint32(nameLen)); err != nil { return } - n += 2 + n += 4 name = strings.ToLower(name) if _, err = io.WriteString(w, name); err != nil { return } - n += len(name) + n += nameLen v := strings.Join(values, headerValueSeparator) - if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + vLen := len(v) + if vLen > math.MaxInt32 { + return n, &Error{InvalidControlFrame, 0} + } + if err = binary.Write(w, binary.BigEndian, uint32(vLen)); err != nil { return } - n += 2 + n += 4 if _, err = io.WriteString(w, v); err != nil { return } - n += len(v) + n += vLen } return } @@ -216,7 +221,11 @@ func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSynStream - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + hLen := len(f.headerBuf.Bytes()) + 10 + if hLen > MaxDataLength { + return &Error{InvalidControlFrame, 0} + } + frame.CFHeader.length = uint32(hLen) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { @@ -260,7 +269,11 @@ func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSynReply - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + hLen := len(f.headerBuf.Bytes()) + 4 + if hLen > MaxDataLength { + return &Error{InvalidControlFrame, 0} + } + frame.CFHeader.length = uint32(hLen) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { @@ -295,7 +308,11 @@ func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeHeaders - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + hLen := len(f.headerBuf.Bytes()) + 4 + if hLen > MaxDataLength { + return &Error{InvalidControlFrame, 0} + } + frame.CFHeader.length = uint32(hLen) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { @@ -323,7 +340,11 @@ func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } - flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + dLen := len(frame.Data) + if dLen > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(dLen) if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { return } diff --git a/vendor/modules.txt b/vendor/modules.txt index e6575714d800b..d3eeedac76ac4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/pkg/fs github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.14.0-rc.1 +# github.com/Microsoft/hcsshim v0.14.1 ## explicit; go 1.23.0 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options @@ -360,8 +360,8 @@ github.com/intel/goresctrl/pkg/utils # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.1 -## explicit; go 1.23 +# github.com/klauspost/compress v1.18.5 +## explicit; go 1.24 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -388,7 +388,7 @@ github.com/mistifyio/go-zfs/v3 # github.com/moby/locker v1.0.1 ## explicit; go 1.13 github.com/moby/locker -# github.com/moby/spdystream v0.5.0 +# github.com/moby/spdystream v0.5.1 ## explicit; go 1.13 github.com/moby/spdystream github.com/moby/spdystream/spdy diff --git a/version/version.go b/version/version.go index 14224f77b68d2..cf4cb8818a00c 100644 --- a/version/version.go +++ b/version/version.go @@ -24,7 +24,7 @@ var ( Package = "github.com/containerd/containerd/v2" // Version holds the complete version number. Filled in at linking time. - Version = "2.2.2+unknown" + Version = "2.2.3+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time.