Thanks to visit codestin.com
Credit goes to github.com

Skip to content

bug: Value for x-amz-checksum-crc32 header is invalid when uploading to S3 via signed URL using AWS SDK v3 for JS #12169

Closed
@this-self

Description

@this-self

Is there an existing issue for this?

  • I have searched the existing issues

Current Behavior

LocalStack throws an error when I'm trying to upload a file using a signed URL generated by AWS SDK v3 (for Javascript).

The part code example:

  const putObjectCommand = new PutObjectCommand({
    Bucket: AWS_S3_BUCKET_NAME,
    Key: testFileName,
  });

  const signedUrl = await getSignedUrl(s3Client, putObjectCommand, { expiresIn: 3600 });

  const response = await fetch(signedUrl, {
    method: 'PUT',
    body: testFileContent,
    headers: {
      'Content-Type': 'text/plain',
    },
  });

  if (!response.ok) {
    const responseBody = await response.text();
    console.error('Response body:', responseBody);
    throw new Error(`Upload failed: ${response.status} ${response.statusText}`);
  }

The error example:

<?xml version='1.0' encoding='utf-8'?>
<Error><Code>InvalidRequest</Code><Message>Value for x-amz-checksum-crc32 header is invalid.</Message><RequestId>5cade7dd-e5f4-4b8c-88c5-65f34ae39209</RequestId></Error>

Expected Behavior

The expected behaviour is to get the file uploaded without errors.

The issue happens only when I use AWS SDK v3 + LocalStack (v4 and v3).
It works perfectly when I use AWS SDK v2 + LocalStack (v4 and v3).
It works perfectly when I use the same code with AWS SDK v3 + real AWS.

Also, I found a temporary trick to get it to work:

  const putObjectCommand = new PutObjectCommand({
    Bucket: AWS_S3_BUCKET_NAME,
    Key: testFileName,
    ChecksumCRC32: '', // <<-- This helped me to make it work, but it is not how I would expect it to work.
  });

How are you starting LocalStack?

With a docker-compose file

Steps To Reproduce

To simplify reproducing I've created a repo with the demonstration of the issue: https://github.com/ifree92/localstack-s3-upload-issue-demo

Below is the full code I'm running to reproduce it.

docker-compose.yml

networks:
  experiments_aws:
    name: experiments_aws
    driver: bridge

services:
  localstack:
    image: localstack/localstack:4
    ports:
      - '4566:4566'
      - '4510-4559:4510-4559'
    environment:
      - DEBUG=0
      - DOCKER_HOST=unix:///var/run/docker.sock
      - LAMBDA_EXECUTOR=docker-reuse
      - DISABLE_CORS_CHECKS=1
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - experiments_aws

main.tf

The terraform file I'm using to create resources

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "5.81.0"
    }
  }
}

provider "aws" {
  access_key = "foobar"
  region     = "us-west-2"
  secret_key = "foobar"

  # only required for non virtual hosted-style endpoint use case.
  # https://registry.terraform.io/providers/hashicorp/aws/latest/docs#s3_use_path_style
  s3_use_path_style           = true
  skip_credentials_validation = true
  skip_metadata_api_check     = true
  skip_requesting_account_id  = true

  endpoints {
    apigateway     = "http://localhost:4566"
    cloudformation = "http://localhost:4566"
    cloudwatch     = "http://localhost:4566"
    dynamodb       = "http://localhost:4566"
    es             = "http://localhost:4566"
    firehose       = "http://localhost:4566"
    iam            = "http://localhost:4566"
    kinesis        = "http://localhost:4566"
    kms            = "http://localhost:4566"
    lambda         = "http://localhost:4566"
    route53        = "http://localhost:4566"
    redshift       = "http://localhost:4566"
    s3             = "http://localhost:4566"
    secretsmanager = "http://localhost:4566"
    ses            = "http://localhost:4566"
    sns            = "http://localhost:4566"
    sqs            = "http://localhost:4566"
    ssm            = "http://localhost:4566"
    stepfunctions  = "http://localhost:4566"
    sts            = "http://localhost:4566"
  }
}

# ====================== S3 ======================

resource "aws_s3_bucket" "system-assets" {
  bucket = "system-assets"
}

resource "aws_s3_bucket_cors_configuration" "system-assets-cors" {
  bucket = aws_s3_bucket.system-assets.id

  cors_rule {
    allowed_headers = ["*"]
    allowed_methods = ["GET", "PUT", "POST", "DELETE", "HEAD"]
    allowed_origins = [
      "http://localhost:3000",
      "http://localstack:3000",
      "http://127.0.0.1:3000",
    ]
    expose_headers  = ["ETag"]
    max_age_seconds = 3000
  }
}

output "s3-bucket_system-assets" {
  value = aws_s3_bucket.system-assets.bucket
}

app-s3.ts

import { PutObjectCommand, S3Client } from '@aws-sdk/client-s3';
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';

const { AWS_ENDPOINT, AWS_S3_BUCKET_NAME } = process.env;

function provideS3Client() {
  return AWS_ENDPOINT
    ? new S3Client({
        region: 'us-west-2',
        endpoint: 'http://localhost:4566',
        forcePathStyle: true,
        useAccelerateEndpoint: false,
        credentials: {
          accessKeyId: 'foobar',
          secretAccessKey: 'foobar',
        },
      })
    : new S3Client({ useAccelerateEndpoint: false });
}

async function main() {
  const s3Client = provideS3Client();

  const testFileContent = `Content ${Math.random()}`;
  const testFileName = `test-file_${new Date().toISOString()}.txt`;

  const putObjectCommand = new PutObjectCommand({
    Bucket: AWS_S3_BUCKET_NAME,
    Key: testFileName,
  });

  const signedUrl = await getSignedUrl(s3Client, putObjectCommand, { expiresIn: 3600 });

  console.log('signedUrl ->', signedUrl);

  // Upload the file content using the signed URL
  const response = await fetch(signedUrl, {
    method: 'PUT',
    body: testFileContent,
    headers: {
      'Content-Type': 'text/plain',
    },
  });

  if (!response.ok) {
    const responseBody = await response.text();
    console.error('Response body:', responseBody);
    throw new Error(`Upload failed: ${response.status} ${response.statusText}`);
  }

  console.log(`Successfully uploaded ${testFileName} to S3`);
}

main().catch(console.error);

.local.env

AWS_ACCESS_KEY_ID=foobar
AWS_SECRET_ACCESS_KEY=foobar
AWS_REGION=us-west-2
AWS_ENDPOINT=http://localstack:4566
AWS_S3_BUCKET_NAME=system-assets

package.json

{
  "name": "aws-sdk-experiments",
  "version": "1.0.0",
  "description": "",
  "main": "index.js",
  "scripts": {
    "start": "ts-node src/app-s3.ts"
  },
  "keywords": [],
  "author": "",
  "license": "ISC",
  "dependencies": {
    "@aws-sdk/client-dynamodb": "^3.576.0",
    "@aws-sdk/client-s3": "^3.204.0",
    "@aws-sdk/credential-providers": "^3.204.0",
    "@aws-sdk/lib-dynamodb": "^3.576.0",
    "@aws-sdk/s3-request-presigner": "^3.732.0",
    "aws-sdk": "^2.1692.0",
    "express": "^4.21.2",
    "sqs-consumer": "^3.8.0",
    "uuid": "^9.0.1"
  },
  "devDependencies": {
    "@types/express": "^5.0.0",
    "@types/node": "^18.11.9",
    "@types/sqs-consumer": "^5.0.0",
    "@types/uuid": "^9.0.8",
    "nodemon": "^3.1.9",
    "ts-node": "^10.9.2"
  }
}

How to run:

$ npm install
$ docker compose up -d
$ terraform init & terraform apply -auto-approve
$ set -a && source .local.env && set +a && npm start

Environment

- OS: macOS 15.2
- Docker version 27.4.0, build bde2b89
- LocalStack:
  LocalStack version: 4
  LocalStack Docker image sha: https://hub.docker.com/layers/localstack/localstack/4.0/images/sha256-8b1d40975ca01d830d7bb69131e68b9fdfbce9eee1c14405ee21457c869b5904
  LocalStack build date: ^^
  LocalStack build git hash: ^^

Anything else?

No response

Metadata

Metadata

Assignees

Labels

aws:s3Amazon Simple Storage Servicestatus: resolved/fixedResolved with a fix or an implementationtype: bugBug report

Type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions