From 454fdac5267fd28c98a9daf69d39c944b814e3ee Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Sun, 8 Feb 2026 16:05:55 +0000 Subject: [PATCH 1/3] Merge R2 upload & multipart docs Co-authored-by: elithrar --- public/__redirects | 1 + src/content/docs/r2/api/error-codes.mdx | 2 +- .../docs/r2/objects/multipart-objects.mdx | 40 - .../docs/r2/objects/upload-objects.mdx | 706 +++++++++++++++++- .../partials/r2/migrator-etag-caveat.mdx | 2 +- src/content/release-notes/r2.yaml | 2 +- 6 files changed, 674 insertions(+), 79 deletions(-) delete mode 100644 src/content/docs/r2/objects/multipart-objects.mdx diff --git a/public/__redirects b/public/__redirects index 2462118145ab5fa..bf22c14696ca4f1 100644 --- a/public/__redirects +++ b/public/__redirects @@ -1585,6 +1585,7 @@ /r2/examples/cloudflare-access/ /r2/tutorials/cloudflare-access/ 301 /r2/examples/upload-logs-event-notifications/ /r2/tutorials/upload-logs-event-notifications/ 301 /r2/examples/demo-worker/ /r2/api/workers/workers-api-usage/ 301 +/r2/objects/multipart-objects/ /r2/objects/upload-objects/ 301 /r2/sql/platform/troubleshooting/ /r2-sql/troubleshooting/ 301 # r2 sql diff --git a/src/content/docs/r2/api/error-codes.mdx b/src/content/docs/r2/api/error-codes.mdx index 76164f02f02e141..0a411f4d047a985 100644 --- a/src/content/docs/r2/api/error-codes.mdx +++ b/src/content/docs/r2/api/error-codes.mdx @@ -58,7 +58,7 @@ For the **S3-compatible API**, errors are returned as XML in the response body: |------------|------|-------------|---------|-----------------| | 10007 | NoSuchKey | 404 | The specified object key does not exist. For the [Workers API](/r2/api/workers/workers-api-reference/), `get()` and `head()` return `null` instead of throwing. | Verify the object key is correct and the object has not been deleted. | | 10020 | InvalidObjectName | 400 | Object key contains invalid characters or is too long. | Use valid UTF-8 characters. Maximum key length is 1024 bytes. | -| 100100 | EntityTooLarge | 400 | Object exceeds maximum size (5 GiB for single upload, 5 TiB for multipart). | Use [multipart upload](/r2/objects/multipart-objects/) for objects larger than 5 GiB. Maximum object size is 5 TiB. | +| 100100 | EntityTooLarge | 400 | Object exceeds maximum size (5 GiB for single upload, 5 TiB for multipart). | Use [multipart upload](/r2/objects/upload-objects/#multipart-upload) for objects larger than 5 GiB. Maximum object size is 5 TiB. | | 10012 | MetadataTooLarge | 400 | Custom metadata exceeds the 8,192 byte limit. | Reduce custom metadata size. Maximum is 8,192 bytes total for all custom metadata. | | 10069 | ObjectLockedByBucketPolicy | 403 | Object is protected by a bucket lock rule and cannot be modified or deleted. | Wait for the retention period to expire. Refer to [bucket locks](/r2/buckets/bucket-locks/). | diff --git a/src/content/docs/r2/objects/multipart-objects.mdx b/src/content/docs/r2/objects/multipart-objects.mdx deleted file mode 100644 index f0c0f4043f3164a..000000000000000 --- a/src/content/docs/r2/objects/multipart-objects.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Multipart upload -pcx_content_type: concept -sidebar: - order: 3 - ---- - -import { Render } from "~/components"; - -R2 supports [S3 API's Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html) with some limitations. - -## Limitations - -Object part sizes must be at least 5MiB but no larger than 5GiB. All parts except the last one must be the same size. The last part has no minimum size, but must be the same or smaller than the other parts. - -The maximum number of parts is 10,000. - -Most S3 clients conform to these expectations. - -## Lifecycles - -The default object lifecycle policy for multipart uploads is that incomplete uploads will be automatically aborted after 7 days. This can be changed by [configuring a custom lifecycle policy](/r2/buckets/object-lifecycles/). - -## ETags - -The ETags for objects uploaded via multipart are different than those uploaded with PutObject. - -For uploads created after June 21, 2023, R2's multipart ETags now mimic the behavior of S3. The ETag of each individual part is the MD5 hash of the contents of the part. The ETag of the completed multipart object is the hash of the MD5 sums of each of the constituent parts concatenated together followed by a hyphen and the number of parts uploaded. - -For example, consider a multipart upload with two parts. If they have the ETags `bce6bf66aeb76c7040fdd5f4eccb78e6` and `8165449fc15bbf43d3b674595cbcc406` respectively, the ETag of the completed multipart upload will be `f77dc0eecdebcd774a2a22cb393ad2ff-2`. - -Note that the binary MD5 sums themselves are concatenated and then summed, not the hexadecimal representation. For example, in order to validate the above example on the command line, you would need do the following: - -``` -echo -n $(echo -n bce6bf66aeb76c7040fdd5f4eccb78e6 | xxd -r -p -)\ -$(echo -n 8165449fc15bbf43d3b674595cbcc406 | xxd -r -p -) | md5sum -``` - - \ No newline at end of file diff --git a/src/content/docs/r2/objects/upload-objects.mdx b/src/content/docs/r2/objects/upload-objects.mdx index f48013512a6a7ad..b33766c05b92816 100644 --- a/src/content/docs/r2/objects/upload-objects.mdx +++ b/src/content/docs/r2/objects/upload-objects.mdx @@ -5,13 +5,24 @@ sidebar: order: 2 --- -import { Steps, Tabs, TabItem, Render, DashButton } from "~/components" +import { Steps, Tabs, TabItem, Render, DashButton, LinkCard } from "~/components" -There are several ways to upload objects to R2: -1. Using the [S3 API](/r2/api/s3/api/), which is supported by a wide range of tools and libraries (recommended) -2. Directly from within a Worker using R2's [Workers API](/r2/api/workers/) -3. Using the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/r2/overview) -4. Using the [Wrangler](/r2/reference/wrangler-commands/) command-line (`wrangler r2`) +There are several ways to upload objects to R2. Which approach you choose depends on the size of your objects and your performance requirements. + +## Choose an upload method + +| | Single upload (`PUT`) | Multipart upload | +|---|---|---| +| **Best for** | Small to medium files (under ~100 MB) | Large files, or when you need parallelism and resumability | +| **Maximum object size** | 5 GiB | 5 TiB (up to 10,000 parts) | +| **Part size** | N/A | 5 MiB – 5 GiB per part | +| **Resumable** | No — must restart the entire upload | Yes — only failed parts need to be retried | +| **Parallel upload** | No | Yes — parts can be uploaded concurrently | +| **When to use** | Quick, simple uploads of small objects | Video, backups, datasets, or any file where reliability matters | + +:::note +Most S3-compatible SDKs and tools (such as `rclone`) automatically choose multipart upload for large files based on a configurable threshold. You do not typically need to implement multipart logic yourself when using the S3 API. +::: ## Upload via dashboard @@ -30,82 +41,660 @@ You will receive a confirmation message after a successful upload. ## Upload via Workers API -Use R2 [bindings](/workers/runtime-apis/bindings/) in Workers to upload objects server-side: +Use R2 [bindings](/workers/runtime-apis/bindings/) in Workers to upload objects server-side. Refer to [Use R2 from Workers](/r2/api/workers/workers-api-usage/) for instructions on setting up an R2 binding. + +### Single upload + +Use `put()` to upload an object in a single request. This is the simplest approach for small to medium objects. -```ts ins={3} +```ts export default { - async fetch(request: Request, env: Env, ctx: ExecutionContext) { - await env.MY_BUCKET.put("image.png", request.body); - return new Response("Uploaded"); + async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { + // Upload the request body as an object + const object = await env.MY_BUCKET.put("image.png", request.body, { + httpMetadata: { + contentType: "image/png", + }, + }); + + return Response.json({ + key: object?.key, + size: object?.size, + etag: object?.etag, + }); }, } satisfies ExportedHandler; ``` -For complete documentation, refer to [Workers API](/r2/api/workers/workers-api-usage/). +### Multipart upload + +Use `createMultipartUpload()` and `resumeMultipartUpload()` for large files or when you need to upload parts in parallel. Each part must be at least 5 MiB (except the last part). + +```ts +export default { + async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { + const key = "large-file.bin"; + + // Create a new multipart upload + const multipartUpload = await env.MY_BUCKET.createMultipartUpload(key); + + try { + // Upload parts — in a real application, you would split the + // file and potentially upload parts concurrently from the client + const part1 = await multipartUpload.uploadPart(1, firstChunk); + const part2 = await multipartUpload.uploadPart(2, secondChunk); + + // Complete the upload with all parts + const object = await multipartUpload.complete([part1, part2]); + + return Response.json({ + key: object.key, + etag: object.httpEtag, + }); + } catch (err) { + // Abort on failure so incomplete uploads do not count against storage + await multipartUpload.abort(); + return new Response(String(err), { status: 500 }); + } + }, +} satisfies ExportedHandler; +``` + +In most cases, the multipart state (the `uploadId` and uploaded part ETags) is tracked by the client sending requests to your Worker. The following example exposes an HTTP API that a client application can call to create, upload parts for, and complete a multipart upload: + +```ts +export default { + async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { + const url = new URL(request.url); + const key = url.pathname.slice(1); + const action = url.searchParams.get("action"); + + if (!key || !action) { + return new Response("Missing key or action", { status: 400 }); + } + + switch (action) { + // Step 1: Client calls POST /?action=mpu-create + case "mpu-create": { + const upload = await env.MY_BUCKET.createMultipartUpload(key); + return Response.json({ key: upload.key, uploadId: upload.uploadId }); + } + + // Step 2: Client calls PUT /?action=mpu-uploadpart&uploadId=...&partNumber=... + case "mpu-uploadpart": { + const uploadId = url.searchParams.get("uploadId"); + const partNumber = Number(url.searchParams.get("partNumber")); + if (!uploadId || !partNumber || !request.body) { + return new Response("Missing uploadId, partNumber, or body", { status: 400 }); + } + const upload = env.MY_BUCKET.resumeMultipartUpload(key, uploadId); + try { + const part = await upload.uploadPart(partNumber, request.body); + return Response.json(part); + } catch (err) { + return new Response(String(err), { status: 400 }); + } + } + + // Step 3: Client calls POST /?action=mpu-complete&uploadId=... + case "mpu-complete": { + const uploadId = url.searchParams.get("uploadId"); + if (!uploadId) { + return new Response("Missing uploadId", { status: 400 }); + } + const upload = env.MY_BUCKET.resumeMultipartUpload(key, uploadId); + const body = await request.json<{ parts: R2UploadedPart[] }>(); + try { + const object = await upload.complete(body.parts); + return new Response(null, { + headers: { etag: object.httpEtag }, + }); + } catch (err) { + return new Response(String(err), { status: 400 }); + } + } + + // Abort an in-progress upload + case "mpu-abort": { + const uploadId = url.searchParams.get("uploadId"); + if (!uploadId) { + return new Response("Missing uploadId", { status: 400 }); + } + const upload = env.MY_BUCKET.resumeMultipartUpload(key, uploadId); + try { + await upload.abort(); + } catch (err) { + return new Response(String(err), { status: 400 }); + } + return new Response(null, { status: 204 }); + } + + default: + return new Response(`Unknown action: ${action}`, { status: 400 }); + } + }, +} satisfies ExportedHandler; +``` + +For the complete Workers API reference, refer to [Workers API reference](/r2/api/workers/workers-api-reference/). + +### Presigned URLs (Workers) + +When you need clients (browsers, mobile apps) to upload directly to R2 without proxying through your Worker, generate a presigned URL server-side and hand it to the client: + +```ts +import { AwsClient } from "aws4fetch"; + +const r2 = new AwsClient({ + accessKeyId: "", + secretAccessKey: "", +}); + +// Generate a presigned PUT URL valid for 1 hour +const url = new URL("https://.r2.cloudflarestorage.com/my-bucket/image.png"); +url.searchParams.set("X-Amz-Expires", "3600"); + +const signed = await r2.sign( + new Request(url, { method: "PUT" }), + { aws: { signQuery: true } }, +); + +// Return signed.url to the client — they can PUT directly to R2 +``` + +For full presigned URL documentation including GET, PUT, and security best practices, refer to [Presigned URLs](/r2/api/s3/presigned-urls/). ## Upload via S3 API -Use S3-compatible SDKs to upload objects. You'll need your [account ID](/fundamentals/account/find-account-and-zone-ids/) and [R2 API token](/r2/api/tokens/). +Use S3-compatible SDKs to upload objects. You will need your [account ID](/fundamentals/account/find-account-and-zone-ids/) and [R2 API token](/r2/api/tokens/). + +### Single upload - + ```ts import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; +import { readFile } from "node:fs/promises"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const fileContent: Buffer = await readFile("./image.png"); + +await S3.send( + new PutObjectCommand({ + Bucket: "my-bucket", + Key: "image.png", + Body: fileContent, + ContentType: "image/png", + }), +); +``` + + + + +```js +import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; +import { readFile } from "node:fs/promises"; const S3 = new S3Client({ - region: "auto", // Required by SDK but not used by R2 - // Provide your Cloudflare account ID + region: "auto", endpoint: `https://.r2.cloudflarestorage.com`, - // Retrieve your S3 API credentials for your R2 bucket via API tokens (see: https://developers.cloudflare.com/r2/api/tokens) credentials: { - accessKeyId: '', - secretAccessKey: '', + accessKeyId: "", + secretAccessKey: "", }, }); +const fileContent = await readFile("./image.png"); + await S3.send( new PutObjectCommand({ Bucket: "my-bucket", Key: "image.png", Body: fileContent, + ContentType: "image/png", }), ); ``` - + ```python import boto3 s3 = boto3.client( - service_name="s3", - # Provide your Cloudflare account ID - endpoint_url=f"https://{ACCOUNT_ID}.r2.cloudflarestorage.com", - # Retrieve your S3 API credentials for your R2 bucket via API tokens (see: https://developers.cloudflare.com/r2/api/tokens) - aws_access_key_id=ACCESS_KEY_ID, - aws_secret_access_key=SECRET_ACCESS_KEY, - region_name="auto", # Required by SDK but not used by R2 + service_name="s3", + endpoint_url="https://.r2.cloudflarestorage.com", + aws_access_key_id="", + aws_secret_access_key="", + region_name="auto", ) -s3.put_object(Bucket="my-bucket", Key="image.png", Body=file_content) +with open("./image.png", "rb") as f: + s3.put_object( + Bucket="my-bucket", + Key="image.png", + Body=f, + ContentType="image/png", + ) ``` -Refer to R2's [S3 API documentation](/r2/api/s3/api/) for all S3 API methods. +### Multipart upload + +Most S3 SDKs handle multipart uploads automatically when the file exceeds a configurable threshold. The examples below show both automatic (high-level) and manual (low-level) approaches. + +#### Automatic multipart upload + +The SDK splits the file and uploads parts in parallel. + + + + +```ts +import { S3Client } from "@aws-sdk/client-s3"; +import { Upload } from "@aws-sdk/lib-storage"; +import { createReadStream } from "node:fs"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const upload = new Upload({ + client: S3, + params: { + Bucket: "my-bucket", + Key: "large-file.bin", + Body: createReadStream("./large-file.bin"), + }, + // Upload parts in parallel (default: 4) + leavePartsOnError: false, +}); + +upload.on("httpUploadProgress", (progress) => { + console.log(`Uploaded ${progress.loaded} / ${progress.total} bytes`); +}); + +await upload.done(); +``` + + + + +```js +import { S3Client } from "@aws-sdk/client-s3"; +import { Upload } from "@aws-sdk/lib-storage"; +import { createReadStream } from "node:fs"; -### Presigned URLs +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const upload = new Upload({ + client: S3, + params: { + Bucket: "my-bucket", + Key: "large-file.bin", + Body: createReadStream("./large-file.bin"), + }, + leavePartsOnError: false, +}); -For client-side uploads where users upload directly to R2, use presigned URLs. Your server generates a temporary upload URL that clients can use without exposing your API credentials. +upload.on("httpUploadProgress", (progress) => { + console.log(`Uploaded ${progress.loaded} / ${progress.total} bytes`); +}); -1. Your application generates a presigned PUT URL using an S3 SDK -2. Send the URL to your client -3. Client uploads directly to R2 using the presigned URL +await upload.done(); +``` -For details on generating and using presigned URLs, refer to [Presigned URLs](/r2/api/s3/presigned-urls/). + + + +```python +import boto3 + +s3 = boto3.client( + service_name="s3", + endpoint_url="https://.r2.cloudflarestorage.com", + aws_access_key_id="", + aws_secret_access_key="", + region_name="auto", +) + +# upload_file automatically uses multipart for large files +s3.upload_file( + Filename="./large-file.bin", + Bucket="my-bucket", + Key="large-file.bin", +) +``` + + + + +#### Manual multipart upload + +Use the low-level API when you need full control over part sizes or upload order. + + + + +```ts +import { + S3Client, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + type CompletedPart, +} from "@aws-sdk/client-s3"; +import { createReadStream, statSync } from "node:fs"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const bucket = "my-bucket"; +const key = "large-file.bin"; +const partSize = 10 * 1024 * 1024; // 10 MiB per part + +// Step 1: Create the multipart upload +const { UploadId } = await S3.send( + new CreateMultipartUploadCommand({ Bucket: bucket, Key: key }), +); + +try { + const fileSize = statSync("./large-file.bin").size; + const partCount = Math.ceil(fileSize / partSize); + const parts: CompletedPart[] = []; + + // Step 2: Upload each part + for (let i = 0; i < partCount; i++) { + const start = i * partSize; + const end = Math.min(start + partSize, fileSize); + const { ETag } = await S3.send( + new UploadPartCommand({ + Bucket: bucket, + Key: key, + UploadId, + PartNumber: i + 1, + Body: createReadStream("./large-file.bin", { start, end: end - 1 }), + ContentLength: end - start, + }), + ); + parts.push({ PartNumber: i + 1, ETag }); + } + + // Step 3: Complete the upload + await S3.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId, + MultipartUpload: { Parts: parts }, + }), + ); +} catch (err) { + // Abort on failure to clean up incomplete parts + await S3.send( + new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), + ); + throw err; +} +``` + + + + +```js +import { + S3Client, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, +} from "@aws-sdk/client-s3"; +import { createReadStream, statSync } from "node:fs"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const bucket = "my-bucket"; +const key = "large-file.bin"; +const partSize = 10 * 1024 * 1024; // 10 MiB per part + +// Step 1: Create the multipart upload +const { UploadId } = await S3.send( + new CreateMultipartUploadCommand({ Bucket: bucket, Key: key }), +); + +try { + const fileSize = statSync("./large-file.bin").size; + const partCount = Math.ceil(fileSize / partSize); + const parts = []; + + // Step 2: Upload each part + for (let i = 0; i < partCount; i++) { + const start = i * partSize; + const end = Math.min(start + partSize, fileSize); + const { ETag } = await S3.send( + new UploadPartCommand({ + Bucket: bucket, + Key: key, + UploadId, + PartNumber: i + 1, + Body: createReadStream("./large-file.bin", { start, end: end - 1 }), + ContentLength: end - start, + }), + ); + parts.push({ PartNumber: i + 1, ETag }); + } + + // Step 3: Complete the upload + await S3.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId, + MultipartUpload: { Parts: parts }, + }), + ); +} catch (err) { + // Abort on failure to clean up incomplete parts + await S3.send( + new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), + ); + throw err; +} +``` + + + + +```python +import boto3 +import math +import os + +s3 = boto3.client( + service_name="s3", + endpoint_url="https://.r2.cloudflarestorage.com", + aws_access_key_id="", + aws_secret_access_key="", + region_name="auto", +) + +bucket = "my-bucket" +key = "large-file.bin" +file_path = "./large-file.bin" +part_size = 10 * 1024 * 1024 # 10 MiB per part + +# Step 1: Create the multipart upload +mpu = s3.create_multipart_upload(Bucket=bucket, Key=key) +upload_id = mpu["UploadId"] + +try: + file_size = os.path.getsize(file_path) + part_count = math.ceil(file_size / part_size) + parts = [] + + # Step 2: Upload each part + with open(file_path, "rb") as f: + for i in range(part_count): + data = f.read(part_size) + response = s3.upload_part( + Bucket=bucket, + Key=key, + UploadId=upload_id, + PartNumber=i + 1, + Body=data, + ) + parts.append({"PartNumber": i + 1, "ETag": response["ETag"]}) + + # Step 3: Complete the upload + s3.complete_multipart_upload( + Bucket=bucket, + Key=key, + UploadId=upload_id, + MultipartUpload={"Parts": parts}, + ) +except Exception: + # Abort on failure to clean up incomplete parts + s3.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) + raise +``` + + + + +### Presigned URLs (S3 API) + +For client-side uploads where users upload directly to R2 without going through your server, generate a presigned PUT URL. Your server creates the URL and the client uploads to it — no API credentials are exposed to the client. + + + + +```ts +import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; +import { getSignedUrl } from "@aws-sdk/s3-request-presigner"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const presignedUrl = await getSignedUrl( + S3, + new PutObjectCommand({ + Bucket: "my-bucket", + Key: "user-upload.png", + ContentType: "image/png", + }), + { expiresIn: 3600 }, // Valid for 1 hour +); + +// Return presignedUrl to the client +``` + + + + +```js +import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; +import { getSignedUrl } from "@aws-sdk/s3-request-presigner"; + +const S3 = new S3Client({ + region: "auto", + endpoint: `https://.r2.cloudflarestorage.com`, + credentials: { + accessKeyId: "", + secretAccessKey: "", + }, +}); + +const presignedUrl = await getSignedUrl( + S3, + new PutObjectCommand({ + Bucket: "my-bucket", + Key: "user-upload.png", + ContentType: "image/png", + }), + { expiresIn: 3600 }, // Valid for 1 hour +); + +// Return presignedUrl to the client +``` + + + + +```python +import boto3 + +s3 = boto3.client( + service_name="s3", + endpoint_url="https://.r2.cloudflarestorage.com", + aws_access_key_id="", + aws_secret_access_key="", + region_name="auto", +) + +presigned_url = s3.generate_presigned_url( + "put_object", + Params={ + "Bucket": "my-bucket", + "Key": "user-upload.png", + "ContentType": "image/png", + }, + ExpiresIn=3600, # Valid for 1 hour +) + +# Return presigned_url to the client +``` + + + + +For full presigned URL documentation, refer to [Presigned URLs](/r2/api/s3/presigned-urls/). + +Refer to R2's [S3 API documentation](/r2/api/s3/api/) for all supported S3 API methods. ## Upload via CLI @@ -137,7 +726,7 @@ For more information, refer to our [rclone example](/r2/examples/rclone/). :::note -Wrangler supports uploading files up to 315MB and only allows one object at a time. For large files or bulk uploads, use [rclone](/r2/examples/rclone/) or another [S3-compatible](/r2/api/s3/) tool. +Wrangler supports uploading files up to 315 MB and only allows one object at a time. For large files or bulk uploads, use [rclone](/r2/examples/rclone/) or another [S3-compatible](/r2/api/s3/) tool. ::: @@ -148,3 +737,48 @@ wrangler r2 object put test-bucket/image.png --file=image.png ``` You can set the `Content-Type` (MIME type), `Content-Disposition`, `Cache-Control` and other HTTP header metadata through optional flags. + +## Multipart upload details + +### Part size limits + +- Minimum part size: 5 MiB (except for the last part) +- Maximum part size: 5 GiB +- Maximum number of parts: 10,000 +- All parts except the last must be the same size + +### Incomplete upload lifecycles + +Incomplete multipart uploads are automatically aborted after 7 days by default. You can change this by [configuring a custom lifecycle policy](/r2/buckets/object-lifecycles/). + +### ETags + +ETags for objects uploaded via multipart differ from those uploaded with a single `PUT`. The ETag of each part is the MD5 hash of that part's contents. The ETag of the completed multipart object is the hash of the concatenated binary MD5 sums of all parts, followed by a hyphen and the number of parts. + +For example, if a two-part upload has part ETags `bce6bf66aeb76c7040fdd5f4eccb78e6` and `8165449fc15bbf43d3b674595cbcc406`, the completed object's ETag will be `f77dc0eecdebcd774a2a22cb393ad2ff-2`. + +## Related resources + + + + + + + + diff --git a/src/content/partials/r2/migrator-etag-caveat.mdx b/src/content/partials/r2/migrator-etag-caveat.mdx index 8a3e7d0fae97ef0..5b40fec510b86ec 100644 --- a/src/content/partials/r2/migrator-etag-caveat.mdx +++ b/src/content/partials/r2/migrator-etag-caveat.mdx @@ -6,7 +6,7 @@ inputParameters: param1 import { Markdown } from "~/components" While R2's ETag generation is compatible with S3's during the regular course of operations, ETags are not guaranteed to be equal when an object is migrated using {props.one}. -{props.one} makes autonomous decisions about the operations it uses when migrating objects to optimize for performance and network usage. It may choose to migrate an object in multiple parts, which affects [ETag calculation](/r2/objects/multipart-objects#etags). +{props.one} makes autonomous decisions about the operations it uses when migrating objects to optimize for performance and network usage. It may choose to migrate an object in multiple parts, which affects [ETag calculation](/r2/objects/upload-objects/#etags). For example, a 320 MiB object originally uploaded to S3 using a single `PutObject` operation might be migrated to R2 via multipart operations. In this case, its ETag on R2 will not be the same as its ETag on S3. Similarly, an object originally uploaded to S3 using multipart operations might also have a different ETag on R2 if the part sizes {props.one} chooses for its migration differ from the part sizes this object was originally uploaded with. diff --git a/src/content/release-notes/r2.yaml b/src/content/release-notes/r2.yaml index bc7ec61c741722a..35d473c8f30113a 100644 --- a/src/content/release-notes/r2.yaml +++ b/src/content/release-notes/r2.yaml @@ -121,7 +121,7 @@ entries: ranged reads on smaller files. Performance should now be consistently good independent of filesize. - publish_date: "2023-06-21" description: |- - - [Multipart ETags](/r2/objects/multipart-objects/#etags) are now MD5 + - [Multipart ETags](/r2/objects/upload-objects/#etags) are now MD5 hashes. - publish_date: "2023-06-16" description: |- From 6a383a5b598469b91b2afea6ab755f84ed9ffc64 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Sun, 8 Feb 2026 16:15:29 +0000 Subject: [PATCH 2/3] fix: improve R2 upload code examples for correctness and safety - Add error handling (try/catch) to Workers single upload example - Handle null return from put() (precondition failure) - Define placeholder variables in Workers multipart example (was using undefined vars) - Wrap Workers presigned URL in a proper fetch handler with env secrets - Remove unused ctx parameter from all Workers examples - Wrap abort calls in try/catch in manual multipart examples (TS/JS/Python) to avoid masking the original error - Fix progress.total potentially undefined in auto multipart upload - Add console.log/print output to S3 examples for copy-paste clarity - Remove unnecessary Buffer type annotation in TS single upload --- .../docs/r2/objects/upload-objects.mdx | 132 ++++++++++++------ 1 file changed, 88 insertions(+), 44 deletions(-) diff --git a/src/content/docs/r2/objects/upload-objects.mdx b/src/content/docs/r2/objects/upload-objects.mdx index b33766c05b92816..15bdd5a6cb057b2 100644 --- a/src/content/docs/r2/objects/upload-objects.mdx +++ b/src/content/docs/r2/objects/upload-objects.mdx @@ -49,19 +49,26 @@ Use `put()` to upload an object in a single request. This is the simplest approa ```ts export default { - async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { - // Upload the request body as an object - const object = await env.MY_BUCKET.put("image.png", request.body, { - httpMetadata: { - contentType: "image/png", - }, - }); + async fetch(request: Request, env: Env): Promise { + try { + const object = await env.MY_BUCKET.put("image.png", request.body, { + httpMetadata: { + contentType: "image/png", + }, + }); - return Response.json({ - key: object?.key, - size: object?.size, - etag: object?.etag, - }); + if (object === null) { + return new Response("Precondition failed or upload returned null", { status: 412 }); + } + + return Response.json({ + key: object.key, + size: object.size, + etag: object.etag, + }); + } catch (err) { + return new Response(`Upload failed: ${err}`, { status: 500 }); + } }, } satisfies ExportedHandler; ``` @@ -72,15 +79,18 @@ Use `createMultipartUpload()` and `resumeMultipartUpload()` for large files or w ```ts export default { - async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { + async fetch(request: Request, env: Env): Promise { const key = "large-file.bin"; // Create a new multipart upload const multipartUpload = await env.MY_BUCKET.createMultipartUpload(key); try { - // Upload parts — in a real application, you would split the - // file and potentially upload parts concurrently from the client + // In a real application, these would be actual data chunks. + // Each part except the last must be at least 5 MiB. + const firstChunk = new Uint8Array(5 * 1024 * 1024); // placeholder + const secondChunk = new Uint8Array(1024); // placeholder + const part1 = await multipartUpload.uploadPart(1, firstChunk); const part2 = await multipartUpload.uploadPart(2, secondChunk); @@ -94,7 +104,7 @@ export default { } catch (err) { // Abort on failure so incomplete uploads do not count against storage await multipartUpload.abort(); - return new Response(String(err), { status: 500 }); + return new Response(`Multipart upload failed: ${err}`, { status: 500 }); } }, } satisfies ExportedHandler; @@ -104,7 +114,7 @@ In most cases, the multipart state (the `uploadId` and uploaded part ETags) is t ```ts export default { - async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise { + async fetch(request: Request, env: Env): Promise { const url = new URL(request.url); const key = url.pathname.slice(1); const action = url.searchParams.get("action"); @@ -185,21 +195,33 @@ When you need clients (browsers, mobile apps) to upload directly to R2 without p ```ts import { AwsClient } from "aws4fetch"; -const r2 = new AwsClient({ - accessKeyId: "", - secretAccessKey: "", -}); +interface Env { + R2_ACCESS_KEY_ID: string; + R2_SECRET_ACCESS_KEY: string; +} -// Generate a presigned PUT URL valid for 1 hour -const url = new URL("https://.r2.cloudflarestorage.com/my-bucket/image.png"); -url.searchParams.set("X-Amz-Expires", "3600"); +export default { + async fetch(request: Request, env: Env): Promise { + const r2 = new AwsClient({ + accessKeyId: env.R2_ACCESS_KEY_ID, + secretAccessKey: env.R2_SECRET_ACCESS_KEY, + }); -const signed = await r2.sign( - new Request(url, { method: "PUT" }), - { aws: { signQuery: true } }, -); + // Generate a presigned PUT URL valid for 1 hour + const url = new URL( + "https://.r2.cloudflarestorage.com/my-bucket/image.png", + ); + url.searchParams.set("X-Amz-Expires", "3600"); + + const signed = await r2.sign( + new Request(url, { method: "PUT" }), + { aws: { signQuery: true } }, + ); -// Return signed.url to the client — they can PUT directly to R2 + // Return the signed URL to the client — they can PUT directly to R2 + return Response.json({ url: signed.url }); + }, +} satisfies ExportedHandler; ``` For full presigned URL documentation including GET, PUT, and security best practices, refer to [Presigned URLs](/r2/api/s3/presigned-urls/). @@ -226,9 +248,9 @@ const S3 = new S3Client({ }, }); -const fileContent: Buffer = await readFile("./image.png"); +const fileContent = await readFile("./image.png"); -await S3.send( +const response = await S3.send( new PutObjectCommand({ Bucket: "my-bucket", Key: "image.png", @@ -236,6 +258,7 @@ await S3.send( ContentType: "image/png", }), ); +console.log(`Uploaded successfully. ETag: ${response.ETag}`); ``` @@ -256,7 +279,7 @@ const S3 = new S3Client({ const fileContent = await readFile("./image.png"); -await S3.send( +const response = await S3.send( new PutObjectCommand({ Bucket: "my-bucket", Key: "image.png", @@ -264,6 +287,7 @@ await S3.send( ContentType: "image/png", }), ); +console.log(`Uploaded successfully. ETag: ${response.ETag}`); ``` @@ -281,12 +305,13 @@ s3 = boto3.client( ) with open("./image.png", "rb") as f: - s3.put_object( + response = s3.put_object( Bucket="my-bucket", Key="image.png", Body=f, ContentType="image/png", ) + print(f"Uploaded successfully. ETag: {response['ETag']}") ``` @@ -329,10 +354,11 @@ const upload = new Upload({ }); upload.on("httpUploadProgress", (progress) => { - console.log(`Uploaded ${progress.loaded} / ${progress.total} bytes`); + console.log(`Uploaded ${progress.loaded ?? 0} bytes`); }); -await upload.done(); +const result = await upload.done(); +console.log(`Upload complete. ETag: ${result.ETag}`); ``` @@ -363,10 +389,11 @@ const upload = new Upload({ }); upload.on("httpUploadProgress", (progress) => { - console.log(`Uploaded ${progress.loaded} / ${progress.total} bytes`); + console.log(`Uploaded ${progress.loaded ?? 0} bytes`); }); -await upload.done(); +const result = await upload.done(); +console.log(`Upload complete. ETag: ${result.ETag}`); ``` @@ -461,11 +488,16 @@ try { MultipartUpload: { Parts: parts }, }), ); + console.log("Multipart upload complete."); } catch (err) { // Abort on failure to clean up incomplete parts - await S3.send( - new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), - ); + try { + await S3.send( + new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), + ); + } catch (_abortErr) { + // Best-effort cleanup — the original error is more important + } throw err; } ``` @@ -532,11 +564,16 @@ try { MultipartUpload: { Parts: parts }, }), ); + console.log("Multipart upload complete."); } catch (err) { // Abort on failure to clean up incomplete parts - await S3.send( - new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), - ); + try { + await S3.send( + new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId }), + ); + } catch (_abortErr) { + // Best-effort cleanup — the original error is more important + } throw err; } ``` @@ -591,9 +628,13 @@ try: UploadId=upload_id, MultipartUpload={"Parts": parts}, ) + print("Multipart upload complete.") except Exception: # Abort on failure to clean up incomplete parts - s3.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) + try: + s3.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) + except Exception: + pass # Best-effort cleanup — the original error is more important raise ``` @@ -630,6 +671,7 @@ const presignedUrl = await getSignedUrl( { expiresIn: 3600 }, // Valid for 1 hour ); +console.log(presignedUrl); // Return presignedUrl to the client ``` @@ -659,6 +701,7 @@ const presignedUrl = await getSignedUrl( { expiresIn: 3600 }, // Valid for 1 hour ); +console.log(presignedUrl); // Return presignedUrl to the client ``` @@ -686,6 +729,7 @@ presigned_url = s3.generate_presigned_url( ExpiresIn=3600, # Valid for 1 hour ) +print(presigned_url) # Return presigned_url to the client ``` From b24c9ff3702582f7773ea61272a3b3a44cef886b Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Sun, 8 Feb 2026 16:33:11 +0000 Subject: [PATCH 3/3] Wrap Workers TS blocks in TypeScriptExample Co-authored-by: elithrar --- src/content/docs/r2/objects/upload-objects.mdx | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/content/docs/r2/objects/upload-objects.mdx b/src/content/docs/r2/objects/upload-objects.mdx index 15bdd5a6cb057b2..e12c1e74e2081e9 100644 --- a/src/content/docs/r2/objects/upload-objects.mdx +++ b/src/content/docs/r2/objects/upload-objects.mdx @@ -5,7 +5,7 @@ sidebar: order: 2 --- -import { Steps, Tabs, TabItem, Render, DashButton, LinkCard } from "~/components" +import { Steps, Tabs, TabItem, Render, DashButton, LinkCard, TypeScriptExample } from "~/components" There are several ways to upload objects to R2. Which approach you choose depends on the size of your objects and your performance requirements. @@ -47,6 +47,8 @@ Use R2 [bindings](/workers/runtime-apis/bindings/) in Workers to upload objects Use `put()` to upload an object in a single request. This is the simplest approach for small to medium objects. + + ```ts export default { async fetch(request: Request, env: Env): Promise { @@ -73,10 +75,14 @@ export default { } satisfies ExportedHandler; ``` + + ### Multipart upload Use `createMultipartUpload()` and `resumeMultipartUpload()` for large files or when you need to upload parts in parallel. Each part must be at least 5 MiB (except the last part). + + ```ts export default { async fetch(request: Request, env: Env): Promise { @@ -110,8 +116,12 @@ export default { } satisfies ExportedHandler; ``` + + In most cases, the multipart state (the `uploadId` and uploaded part ETags) is tracked by the client sending requests to your Worker. The following example exposes an HTTP API that a client application can call to create, upload parts for, and complete a multipart upload: + + ```ts export default { async fetch(request: Request, env: Env): Promise { @@ -186,12 +196,16 @@ export default { } satisfies ExportedHandler; ``` + + For the complete Workers API reference, refer to [Workers API reference](/r2/api/workers/workers-api-reference/). ### Presigned URLs (Workers) When you need clients (browsers, mobile apps) to upload directly to R2 without proxying through your Worker, generate a presigned URL server-side and hand it to the client: + + ```ts import { AwsClient } from "aws4fetch"; @@ -224,6 +238,8 @@ export default { } satisfies ExportedHandler; ``` + + For full presigned URL documentation including GET, PUT, and security best practices, refer to [Presigned URLs](/r2/api/s3/presigned-urls/). ## Upload via S3 API