Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
182 changes: 182 additions & 0 deletions shortcuts/base/base_execute_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,157 @@ func TestBaseRecordExecuteReadCreateDelete(t *testing.T) {
}
})

t.Run("upload attachment uses multipart for large file", func(t *testing.T) {
factory, stdout, reg := newExecuteFactory(t)

tmpFile, err := os.CreateTemp(t.TempDir(), "base-attachment-large-*.bin")
if err != nil {
t.Fatalf("CreateTemp() err=%v", err)
}
if err := tmpFile.Truncate(common.MaxDriveMediaUploadSinglePartSize + 1); err != nil {
t.Fatalf("Truncate() err=%v", err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf("Close() err=%v", err)
}
withBaseWorkingDir(t, filepath.Dir(tmpFile.Name()))

reg.Register(&httpmock.Stub{
Method: "GET",
URL: "/open-apis/base/v3/bases/app_x/tables/tbl_x/fields/fld_att",
Body: map[string]interface{}{
"code": 0,
"data": map[string]interface{}{"id": "fld_att", "name": "附件", "type": "attachment"},
},
})
reg.Register(&httpmock.Stub{
Method: "GET",
URL: "/open-apis/base/v3/bases/app_x/tables/tbl_x/records/rec_x",
Body: map[string]interface{}{
"code": 0,
"data": map[string]interface{}{
"record_id": "rec_x",
"fields": map[string]interface{}{},
},
},
})

prepareStub := &httpmock.Stub{
Method: "POST",
URL: "/open-apis/drive/v1/medias/upload_prepare",
Body: map[string]interface{}{
"code": 0,
"data": map[string]interface{}{
"upload_id": "upload_big_1",
"block_size": float64(8 * 1024 * 1024),
"block_num": float64(3),
},
},
}
reg.Register(prepareStub)

partStubs := make([]*httpmock.Stub, 0, 3)
for i := 0; i < 3; i++ {
stub := &httpmock.Stub{
Method: "POST",
URL: "/open-apis/drive/v1/medias/upload_part",
Body: map[string]interface{}{
"code": 0,
"msg": "ok",
},
}
partStubs = append(partStubs, stub)
reg.Register(stub)
}

finishStub := &httpmock.Stub{
Method: "POST",
URL: "/open-apis/drive/v1/medias/upload_finish",
Body: map[string]interface{}{
"code": 0,
"data": map[string]interface{}{"file_token": "file_tok_big"},
},
}
reg.Register(finishStub)

updateStub := &httpmock.Stub{
Method: "PATCH",
URL: "/open-apis/base/v3/bases/app_x/tables/tbl_x/records/rec_x",
Body: map[string]interface{}{
"code": 0,
"data": map[string]interface{}{
"record_id": "rec_x",
"fields": map[string]interface{}{
"附件": []interface{}{
map[string]interface{}{
"file_token": "file_tok_big",
"name": "large-report.bin",
"deprecated_set_attachment": true,
},
},
},
},
},
}
reg.Register(updateStub)

if err := runShortcut(t, BaseRecordUploadAttachment, []string{
"+record-upload-attachment",
"--base-token", "app_x",
"--table-id", "tbl_x",
"--record-id", "rec_x",
"--field-id", "fld_att",
"--file", "./" + filepath.Base(tmpFile.Name()),
"--name", "large-report.bin",
}, factory, stdout); err != nil {
t.Fatalf("err=%v", err)
}

if got := stdout.String(); !strings.Contains(got, `"updated": true`) || !strings.Contains(got, `"file_tok_big"`) || !strings.Contains(got, `"large-report.bin"`) {
t.Fatalf("stdout=%s", got)
}

prepareBody := string(prepareStub.CapturedBody)
if !strings.Contains(prepareBody, `"file_name":"large-report.bin"`) ||
!strings.Contains(prepareBody, `"parent_type":"bitable_file"`) ||
!strings.Contains(prepareBody, `"parent_node":"app_x"`) ||
!strings.Contains(prepareBody, `"size":20971521`) {
t.Fatalf("prepare body=%s", prepareBody)
}

firstPartBody := string(partStubs[0].CapturedBody)
if !strings.Contains(firstPartBody, `name="upload_id"`) ||
!strings.Contains(firstPartBody, "upload_big_1") ||
!strings.Contains(firstPartBody, `name="seq"`) ||
!strings.Contains(firstPartBody, "\r\n0\r\n") ||
!strings.Contains(firstPartBody, `name="size"`) ||
!strings.Contains(firstPartBody, "8388608") {
t.Fatalf("first part body=%s", firstPartBody)
}

lastPartBody := string(partStubs[2].CapturedBody)
if !strings.Contains(lastPartBody, `name="seq"`) ||
!strings.Contains(lastPartBody, "\r\n2\r\n") ||
!strings.Contains(lastPartBody, `name="size"`) ||
!strings.Contains(lastPartBody, "4194305") {
t.Fatalf("last part body=%s", lastPartBody)
}

finishBody := string(finishStub.CapturedBody)
if !strings.Contains(finishBody, `"upload_id":"upload_big_1"`) ||
!strings.Contains(finishBody, `"block_num":3`) {
t.Fatalf("finish body=%s", finishBody)
}

updateBody := string(updateStub.CapturedBody)
if !strings.Contains(updateBody, `"附件"`) ||
!strings.Contains(updateBody, `"file_token":"file_tok_big"`) ||
!strings.Contains(updateBody, `"name":"large-report.bin"`) ||
!strings.Contains(updateBody, `"deprecated_set_attachment":true`) {
t.Fatalf("update body=%s", updateBody)
}
})

t.Run("upload attachment rejects non-attachment field", func(t *testing.T) {
factory, stdout, reg := newExecuteFactory(t)

Expand Down Expand Up @@ -904,6 +1055,37 @@ func TestBaseRecordExecuteReadCreateDelete(t *testing.T) {
t.Fatalf("err=%v", err)
}
})

t.Run("upload attachment rejects file larger than 2GB", func(t *testing.T) {
factory, stdout, _ := newExecuteFactory(t)

tmpFile, err := os.CreateTemp(t.TempDir(), "base-too-large-*.bin")
if err != nil {
t.Fatalf("CreateTemp() err=%v", err)
}
if err := tmpFile.Truncate(2*1024*1024*1024 + 1); err != nil {
t.Fatalf("Truncate() err=%v", err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf("Close() err=%v", err)
}
withBaseWorkingDir(t, filepath.Dir(tmpFile.Name()))

err = runShortcut(t, BaseRecordUploadAttachment, []string{
"+record-upload-attachment",
"--base-token", "app_x",
"--table-id", "tbl_x",
"--record-id", "rec_x",
"--field-id", "fld_att",
"--file", "./" + filepath.Base(tmpFile.Name()),
}, factory, stdout)
if err == nil {
t.Fatal("expected validation error, got nil")
}
if !strings.Contains(err.Error(), "exceeds 2GB limit") {
t.Fatalf("err=%v", err)
}
})
}

func TestBaseViewExecuteReadCreateDeleteAndFilter(t *testing.T) {
Expand Down
131 changes: 74 additions & 57 deletions shortcuts/base/record_upload_attachment.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,20 @@ package base

import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"path/filepath"
"strings"

larkcore "github.com/larksuite/oapi-sdk-go/v3/core"

"github.com/larksuite/cli/extension/fileio"
"github.com/larksuite/cli/internal/output"
"github.com/larksuite/cli/internal/util"
"github.com/larksuite/cli/shortcuts/common"
)

const (
baseAttachmentUploadMaxFileSize = 20 * 1024 * 1024
baseAttachmentParentType = "bitable_file"
baseAttachmentUploadMaxFileSize int64 = 2 * 1024 * 1024 * 1024
baseAttachmentParentType = "bitable_file"
)

var BaseRecordUploadAttachment = common.Shortcut{
Expand All @@ -37,7 +33,7 @@ var BaseRecordUploadAttachment = common.Shortcut{
tableRefFlag(true),
recordRefFlag(true),
fieldRefFlag(true),
{Name: "file", Desc: "local file path (max 20MB)", Required: true},
{Name: "file", Desc: "local file path (max 2GB; files > 20MB use multipart upload automatically)", Required: true},
{Name: "name", Desc: "attachment file name (default: local file name)"},
},
DryRun: dryRunRecordUploadAttachment,
Expand All @@ -52,7 +48,7 @@ func dryRunRecordUploadAttachment(_ context.Context, runtime *common.RuntimeCont
if fileName == "" {
fileName = filepath.Base(filePath)
}
return common.NewDryRunAPI().
dry := common.NewDryRunAPI().
Desc("4-step orchestration: validate attachment field → read existing record attachments → upload file to Base → patch merged attachment array").
GET("/open-apis/base/v3/bases/:base_token/tables/:table_id/fields/:field_id").
Desc("[1] Read target field and ensure it is an attachment field").
Expand All @@ -61,15 +57,42 @@ func dryRunRecordUploadAttachment(_ context.Context, runtime *common.RuntimeCont
Set("field_id", runtime.Str("field-id")).
GET("/open-apis/base/v3/bases/:base_token/tables/:table_id/records/:record_id").
Desc("[2] Read current record to preserve existing attachments in the target cell").
Set("record_id", runtime.Str("record-id")).
POST("/open-apis/drive/v1/medias/upload_all").
Desc("[3] Upload local file to the current Base as attachment media (multipart/form-data)").
Body(map[string]interface{}{
"file_name": fileName,
"parent_type": baseAttachmentParentType,
"parent_node": runtime.Str("base-token"),
"file": "@" + filePath,
}).
Set("record_id", runtime.Str("record-id"))
if baseAttachmentShouldUseMultipart(runtime.FileIO(), filePath) {
dry.POST("/open-apis/drive/v1/medias/upload_prepare").
Desc("[3a] Initialize multipart attachment upload to the current Base").
Body(map[string]interface{}{
"file_name": fileName,
"parent_type": baseAttachmentParentType,
"parent_node": runtime.Str("base-token"),
"size": "<file_size>",
}).
POST("/open-apis/drive/v1/medias/upload_part").
Desc("[3b] Upload attachment parts (repeated)").
Body(map[string]interface{}{
"upload_id": "<upload_id>",
"seq": "<chunk_index>",
"size": "<chunk_size>",
"file": "<chunk_binary>",
}).
POST("/open-apis/drive/v1/medias/upload_finish").
Desc("[3c] Finalize multipart attachment upload and get file token").
Body(map[string]interface{}{
"upload_id": "<upload_id>",
"block_num": "<block_num>",
})
} else {
dry.POST("/open-apis/drive/v1/medias/upload_all").
Desc("[3] Upload local file to the current Base as attachment media (multipart/form-data)").
Body(map[string]interface{}{
"file_name": fileName,
"parent_type": baseAttachmentParentType,
"parent_node": runtime.Str("base-token"),
"file": "@" + filePath,
"size": "<file_size>",
})
}
return dry.
PATCH("/open-apis/base/v3/bases/:base_token/tables/:table_id/records/:record_id").
Desc("[4] Update the target attachment cell with existing attachments plus the uploaded file token").
Body(map[string]interface{}{
Expand Down Expand Up @@ -102,7 +125,7 @@ func executeRecordUploadAttachment(runtime *common.RuntimeContext) error {
return output.ErrValidation("file not accessible: %s: %v", filePath, err)
}
if fileInfo.Size() > baseAttachmentUploadMaxFileSize {
return output.ErrValidation("file %.1fMB exceeds 20MB limit", float64(fileInfo.Size())/1024/1024)
return output.ErrValidation("file %s exceeds 2GB limit", common.FormatSize(fileInfo.Size()))
}

fileName := strings.TrimSpace(runtime.Str("name"))
Expand All @@ -124,6 +147,9 @@ func executeRecordUploadAttachment(runtime *common.RuntimeContext) error {
}

fmt.Fprintf(runtime.IO().ErrOut, "Uploading attachment: %s -> record %s field %s\n", fileName, runtime.Str("record-id"), fieldName(field))
if fileInfo.Size() > common.MaxDriveMediaUploadSinglePartSize {
fmt.Fprintf(runtime.IO().ErrOut, "File exceeds 20MB, using multipart upload\n")
}

attachment, err := uploadAttachmentToBase(runtime, filePath, fileName, runtime.Str("base-token"), fileInfo.Size())
if err != nil {
Expand Down Expand Up @@ -151,6 +177,14 @@ func executeRecordUploadAttachment(runtime *common.RuntimeContext) error {
return nil
}

func baseAttachmentShouldUseMultipart(fio fileio.FileIO, filePath string) bool {
info, err := fio.Stat(filePath)
if err != nil {
return false
}
return info.Mode().IsRegular() && info.Size() > common.MaxDriveMediaUploadSinglePartSize
}

func fetchBaseField(runtime *common.RuntimeContext, baseToken, tableIDValue, fieldRef string) (map[string]interface{}, error) {
return baseV3Call(runtime, "GET", baseV3Path("bases", baseToken, "tables", tableIDValue, "fields", fieldRef), nil, nil)
}
Expand Down Expand Up @@ -209,47 +243,30 @@ func normalizeAttachmentForPatch(attachment map[string]interface{}) map[string]i
}

func uploadAttachmentToBase(runtime *common.RuntimeContext, filePath, fileName, baseToken string, fileSize int64) (map[string]interface{}, error) {
f, err := runtime.FileIO().Open(filePath)
if err != nil {
return nil, output.ErrValidation("cannot open file: %v", err)
parentNode := baseToken
var (
fileToken string
err error
)
if fileSize <= common.MaxDriveMediaUploadSinglePartSize {
fileToken, err = common.UploadDriveMediaAll(runtime, common.DriveMediaUploadAllConfig{
FilePath: filePath,
FileName: fileName,
FileSize: fileSize,
ParentType: baseAttachmentParentType,
ParentNode: &parentNode,
})
} else {
fileToken, err = common.UploadDriveMediaMultipart(runtime, common.DriveMediaMultipartUploadConfig{
FilePath: filePath,
FileName: fileName,
FileSize: fileSize,
ParentType: baseAttachmentParentType,
ParentNode: parentNode,
})
}
defer f.Close()

fd := larkcore.NewFormdata()
fd.AddField("file_name", fileName)
fd.AddField("parent_type", baseAttachmentParentType)
fd.AddField("parent_node", baseToken)
fd.AddField("size", fmt.Sprintf("%d", fileSize))
fd.AddFile("file", f)

apiResp, err := runtime.DoAPI(&larkcore.ApiReq{
HttpMethod: http.MethodPost,
ApiPath: "/open-apis/drive/v1/medias/upload_all",
Body: fd,
}, larkcore.WithFileUpload())
if err != nil {
var exitErr *output.ExitError
if errors.As(err, &exitErr) {
return nil, err
}
return nil, output.ErrNetwork("upload failed: %v", err)
}

var result map[string]interface{}
if err := json.Unmarshal(apiResp.RawBody, &result); err != nil {
return nil, output.Errorf(output.ExitAPI, "api_error", "upload failed: invalid response JSON: %v", err)
}

code, _ := util.ToFloat64(result["code"])
if code != 0 {
msg, _ := result["msg"].(string)
return nil, output.ErrAPI(int(code), fmt.Sprintf("upload failed: [%d] %s", int(code), msg), result["error"])
}

data, _ := result["data"].(map[string]interface{})
fileToken, _ := data["file_token"].(string)
if fileToken == "" {
return nil, output.Errorf(output.ExitAPI, "api_error", "upload failed: no file_token returned")
return nil, err
}

attachment := map[string]interface{}{
Expand Down
Loading
Loading