From 1ab01dfd414863907852b6672c977aa91bc91ef0 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 3 Jul 2025 14:06:15 +0800 Subject: [PATCH 01/20] pref(115,123): optimize upload --- drivers/115_open/upload.go | 16 ++++++++--- drivers/123/upload.go | 17 +++++++---- drivers/123_open/upload.go | 30 +++++++++++++------- internal/stream/util.go | 58 ++++++++++++++++++++++++++++++++++++++ pkg/errgroup/errgroup.go | 15 ++++++++++ 5 files changed, 116 insertions(+), 20 deletions(-) diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go index 389774304..f8cc86fe2 100644 --- a/drivers/115_open/upload.go +++ b/drivers/115_open/upload.go @@ -6,12 +6,13 @@ import ( "io" "time" + sdk "github.com/OpenListTeam/115-sdk-go" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/avast/retry-go" - sdk "github.com/OpenListTeam/115-sdk-go" ) func calPartSize(fileSize int64) int64 { @@ -89,6 +90,10 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, partNum := (stream.GetSize() + chunkSize - 1) / chunkSize parts := make([]oss.UploadPart, partNum) offset := int64(0) + ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), 1) + if err != nil { + return err + } for i := int64(1); i <= partNum; i++ { if utils.IsCanceled(ctx) { return ctx.Err() @@ -98,10 +103,13 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, if i == partNum { partSize = fileSize - (i-1)*chunkSize } - rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) + rd, err := ss.GetSectionReader(offset, partSize, int(i-1)) + if err != nil { + return err + } + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) err = retry.Do(func() error { - _ = rd.Reset() - rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + rd.Seek(0, io.SeekStart) part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i)) if err != nil { return err diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 1dc79e2f2..3a509e39a 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -10,6 +10,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/go-resty/resty/v2" ) @@ -69,10 +70,6 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F } func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { - tmpF, err := file.CacheFullInTempFile() - if err != nil { - return err - } // fetch s3 pre signed urls size := file.GetSize() chunkSize := min(size, 16*utils.MB) @@ -90,6 +87,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi batchSize = 10 getS3UploadUrl = d.getS3PreSignedUrls } + ss, err := stream.NewStreamSectionReader(file, int(chunkSize), 1) + if err != nil { + return err + } for i := 1; i <= chunkCount; i += batchSize { if utils.IsCanceled(ctx) { return ctx.Err() @@ -109,7 +110,11 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if j == chunkCount { curSize = lastChunkSize } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) + reader, err := ss.GetSectionReader(chunkSize*int64(j-1), curSize, j) + if err != nil { + return err + } + err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, reader, curSize, false, getS3UploadUrl) if err != nil { return err } @@ -120,7 +125,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return d.completeS3(ctx, upReq, file, chunkCount > 1) } -func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { +func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.ReadSeeker, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] if uploadUrl == "" { return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index cc769509a..6069659ae 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -2,6 +2,7 @@ package _123_open import ( "context" + "io" "net/http" "strings" "time" @@ -9,11 +10,12 @@ import ( "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup" - "github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/avast/retry-go" "github.com/go-resty/resty/v2" + "github.com/sirupsen/logrus" ) func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) { @@ -79,11 +81,16 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes size := file.GetSize() chunkSize := createResp.Data.SliceSize uploadNums := (size + chunkSize - 1) / chunkSize - threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.UploadThread, + thread := min(int(uploadNums), d.UploadThread) + threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread, retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + ss, err := stream.NewStreamSectionReader(file, int(chunkSize), thread) + if err != nil { + return err + } for partIndex := int64(0); partIndex < uploadNums; partIndex++ { if utils.IsCanceled(uploadCtx) { return ctx.Err() @@ -92,21 +99,24 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes partNumber := partIndex + 1 // 分片号从1开始 offset := partIndex * chunkSize size := min(chunkSize, size-offset) - limitedReader, err := file.RangeRead(http_range.Range{ - Start: offset, - Length: size}) - if err != nil { - return err - } - limitedReader = driver.NewLimitedUploadStream(ctx, limitedReader) + var reader io.ReadSeeker threadG.Go(func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, size, int(partIndex)) + logrus.Warnf("off:%d,size:%d ,idx:%d", offset, size, partIndex) + if err != nil { + return err + } + } + reader.Seek(0, io.SeekStart) uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber) if err != nil { return err } - req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, limitedReader) + req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err } diff --git a/internal/stream/util.go b/internal/stream/util.go index 94bf750a5..bc255e7bd 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -1,11 +1,13 @@ package stream import ( + "bytes" "context" "encoding/hex" "fmt" "io" "net/http" + "sync" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/net" @@ -139,3 +141,59 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashT } return tmpF, hex.EncodeToString(h.Sum(nil)), err } + +type StreamSectionReader struct { + file model.FileStreamer + off int64 + bufs [][]byte + bufMaxLen int + m sync.Mutex +} + +func NewStreamSectionReader(file model.FileStreamer, bufMaxLen, thread int) (*StreamSectionReader, error) { + ss := &StreamSectionReader{file: file, bufMaxLen: bufMaxLen} + if file.GetFile() == nil { + if bufMaxLen > 64*utils.KB { + _, err := file.CacheFullInTempFile() + if err != nil { + return nil, err + } + } else { + ss.bufMaxLen = bufMaxLen + ss.bufs = make([][]byte, max(1, thread)) + } + } + return ss, nil +} + +func (ss *StreamSectionReader) getBuf(index int) []byte { + index = index % len(ss.bufs) + buf := ss.bufs[index] + if buf == nil { + buf = make([]byte, ss.bufMaxLen) + ss.bufs[index] = buf + } + return buf +} +func (ss *StreamSectionReader) GetSectionReader(off, length int64, index int) (io.ReadSeeker, error) { + ss.m.Lock() + defer ss.m.Unlock() + var cache io.ReaderAt = ss.file.GetFile() + if cache == nil { + if off != ss.off { + return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off) + } + buf := ss.getBuf(index)[:length] + n, err := io.ReadFull(ss.file, buf) + if err != nil { + return nil, err + } + if int64(n) != length { + return nil, fmt.Errorf("stream read did not get all data, expect =%d ,actual =%d", length, n) + } + ss.off += int64(n) + off = 0 + cache = bytes.NewReader(buf) + } + return io.NewSectionReader(cache, off, length), nil +} diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go index 858df044c..bb0a9e9e3 100644 --- a/pkg/errgroup/errgroup.go +++ b/pkg/errgroup/errgroup.go @@ -19,6 +19,8 @@ type Group struct { wg sync.WaitGroup sem chan token + + startChan chan token } func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) { @@ -26,6 +28,13 @@ func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Opti return (&Group{cancel: cancel, ctx: ctx, opts: append(retryOpts, retry.Context(ctx))}).SetLimit(limit), ctx } +func NewOrderedGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) { + group, ctx := NewGroupWithContext(ctx, limit, retryOpts...) + group.startChan = make(chan token, 1) + group.startChan <- token{} + return group, ctx +} + func (g *Group) done() { if g.sem != nil { <-g.sem @@ -40,12 +49,18 @@ func (g *Group) Wait() error { } func (g *Group) Go(f func(ctx context.Context) error) { + if g.startChan != nil { + <-g.startChan + } if g.sem != nil { g.sem <- token{} } g.wg.Add(1) go func() { + if g.startChan != nil { + g.startChan <- token{} + } defer g.done() if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil { g.cancel(err) From d7855094059502795bb40a5dd0e0489b9d3f06c3 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 3 Jul 2025 14:11:07 +0800 Subject: [PATCH 02/20] chore --- drivers/123_open/upload.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 6069659ae..9e0d28ab5 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -100,7 +100,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes offset := partIndex * chunkSize size := min(chunkSize, size-offset) var reader io.ReadSeeker - + var rateLimitedRd io.Reader threadG.Go(func(ctx context.Context) error { if reader == nil { var err error @@ -109,6 +109,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes if err != nil { return err } + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) } reader.Seek(0, io.SeekStart) uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber) @@ -116,7 +117,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes return err } - req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, driver.NewLimitedUploadStream(ctx, reader)) + req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, rateLimitedRd) if err != nil { return err } From 3a5f3dfd570932f5b40717d42b3ebcda14f6ec7f Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 3 Jul 2025 14:38:29 +0800 Subject: [PATCH 03/20] aliyun_open, google_drive --- drivers/aliyundrive_open/upload.go | 18 +++++++++--------- drivers/google_drive/util.go | 28 ++++++++++++++++------------ internal/stream/util.go | 3 +++ 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index 10ae4bfce..e06773dcc 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -223,6 +223,10 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m preTime := time.Now() var offset, length int64 = 0, partSize //var length + ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), 1) + if err != nil { + return nil, err + } for i := 0; i < len(createResp.PartInfoList); i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -238,17 +242,13 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m if remain := stream.GetSize() - offset; length > remain { length = remain } - rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) - if rapidUpload { - srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length}) - if err != nil { - return nil, err - } - rd = utils.NewMultiReadable(srd) + rd, err := ss.GetSectionReader(offset, length, i) + if err != nil { + return nil, err } + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) err = retry.Do(func() error { - _ = rd.Reset() - rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + rd.Seek(0, io.SeekStart) return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i]) }, retry.Attempts(3), diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 97e04f4de..f1e1058ec 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -5,17 +5,19 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "github.com/OpenListTeam/OpenList/v4/internal/op" + "io" "net/http" "os" "regexp" "strconv" "time" + "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" - "github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/go-resty/resty/v2" "github.com/golang-jwt/jwt/v4" @@ -251,27 +253,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) { return res, nil } -func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error { +func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error { var defaultChunkSize = d.ChunkSize * 1024 * 1024 var offset int64 = 0 - for offset < stream.GetSize() { + ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), 1) + if err != nil { + return err + } + for offset < file.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() } - chunkSize := stream.GetSize() - offset - if chunkSize > defaultChunkSize { - chunkSize = defaultChunkSize - } - reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize}) + chunkSize := min(file.GetSize()-offset, defaultChunkSize) + reader, err := ss.GetSectionReader(offset, chunkSize, 0) if err != nil { return err } - reader = driver.NewLimitedUploadStream(ctx, reader) + limitedReader := driver.NewLimitedUploadStream(ctx, reader) _, err = d.request(url, http.MethodPut, func(req *resty.Request) { + reader.Seek(0, io.SeekStart) req.SetHeaders(map[string]string{ "Content-Length": strconv.FormatInt(chunkSize, 10), - "Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()), - }).SetBody(reader).SetContext(ctx) + "Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize()), + }).SetBody(limitedReader).SetContext(ctx) }, nil) if err != nil { return err diff --git a/internal/stream/util.go b/internal/stream/util.go index bc255e7bd..59d6bd995 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -150,6 +150,7 @@ type StreamSectionReader struct { m sync.Mutex } +// 单线程 thread 可以为0 func NewStreamSectionReader(file model.FileStreamer, bufMaxLen, thread int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file, bufMaxLen: bufMaxLen} if file.GetFile() == nil { @@ -175,6 +176,8 @@ func (ss *StreamSectionReader) getBuf(index int) []byte { } return buf } + +// 单线程 index 可以为0 func (ss *StreamSectionReader) GetSectionReader(off, length int64, index int) (io.ReadSeeker, error) { ss.m.Lock() defer ss.m.Unlock() From 2e67f2827dc88d4616167e8f233b588a45df1df0 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 3 Jul 2025 16:08:17 +0800 Subject: [PATCH 04/20] fix bug --- drivers/115_open/upload.go | 5 +-- drivers/123/upload.go | 5 +-- drivers/123_open/upload.go | 12 ++++--- drivers/aliyundrive_open/upload.go | 4 +-- drivers/google_drive/util.go | 4 +-- internal/stream/util.go | 57 ++++++++++++++++++------------ pkg/errgroup/errgroup.go | 11 +++++- 7 files changed, 61 insertions(+), 37 deletions(-) diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go index f8cc86fe2..30b476a58 100644 --- a/drivers/115_open/upload.go +++ b/drivers/115_open/upload.go @@ -90,7 +90,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, partNum := (stream.GetSize() + chunkSize - 1) / chunkSize parts := make([]oss.UploadPart, partNum) offset := int64(0) - ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize), 1) + ss, err := streamPkg.NewStreamSectionReader(stream, int(chunkSize)) if err != nil { return err } @@ -103,7 +103,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, if i == partNum { partSize = fileSize - (i-1)*chunkSize } - rd, err := ss.GetSectionReader(offset, partSize, int(i-1)) + rd, err := ss.GetSectionReader(offset, partSize) if err != nil { return err } @@ -123,6 +123,7 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, if err != nil { return err } + ss.RecycleSectionReader(rd) if i == partNum { offset = fileSize diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 3a509e39a..02674eb85 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -87,7 +87,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi batchSize = 10 getS3UploadUrl = d.getS3PreSignedUrls } - ss, err := stream.NewStreamSectionReader(file, int(chunkSize), 1) + ss, err := stream.NewStreamSectionReader(file, int(chunkSize)) if err != nil { return err } @@ -110,7 +110,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if j == chunkCount { curSize = lastChunkSize } - reader, err := ss.GetSectionReader(chunkSize*int64(j-1), curSize, j) + reader, err := ss.GetSectionReader(chunkSize*int64(j-1), curSize) if err != nil { return err } @@ -118,6 +118,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if err != nil { return err } + ss.RecycleSectionReader(reader) up(float64(j) * 100 / float64(chunkCount)) } } diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 9e0d28ab5..6af3b1df7 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -15,7 +15,6 @@ import ( "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/avast/retry-go" "github.com/go-resty/resty/v2" - "github.com/sirupsen/logrus" ) func (d *Open123) create(parentFileID int64, filename string, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) { @@ -87,7 +86,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) - ss, err := stream.NewStreamSectionReader(file, int(chunkSize), thread) + ss, err := stream.NewStreamSectionReader(file, int(chunkSize)) if err != nil { return err } @@ -101,11 +100,10 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes size := min(chunkSize, size-offset) var reader io.ReadSeeker var rateLimitedRd io.Reader - threadG.Go(func(ctx context.Context) error { + threadG.GoWithResult(func(ctx context.Context) error { if reader == nil { var err error - reader, err = ss.GetSectionReader(offset, size, int(partIndex)) - logrus.Warnf("off:%d,size:%d ,idx:%d", offset, size, partIndex) + reader, err = ss.GetSectionReader(offset, size) if err != nil { return err } @@ -133,6 +131,10 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums) up(progress) return nil + }, func(err error) { + if reader != nil { + ss.RecycleSectionReader(reader) + } }) } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index e06773dcc..0d08bc726 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -223,7 +223,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m preTime := time.Now() var offset, length int64 = 0, partSize //var length - ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize), 1) + ss, err := streamPkg.NewStreamSectionReader(stream, int(partSize)) if err != nil { return nil, err } @@ -242,7 +242,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m if remain := stream.GetSize() - offset; length > remain { length = remain } - rd, err := ss.GetSectionReader(offset, length, i) + rd, err := ss.GetSectionReader(offset, length) if err != nil { return nil, err } diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index f1e1058ec..975c10d26 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -256,7 +256,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) { func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string) error { var defaultChunkSize = d.ChunkSize * 1024 * 1024 var offset int64 = 0 - ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), 1) + ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize)) if err != nil { return err } @@ -265,7 +265,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, return ctx.Err() } chunkSize := min(file.GetSize()-offset, defaultChunkSize) - reader, err := ss.GetSectionReader(offset, chunkSize, 0) + reader, err := ss.GetSectionReader(offset, chunkSize) if err != nil { return err } diff --git a/internal/stream/util.go b/internal/stream/util.go index 59d6bd995..8e33439a2 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -143,50 +143,44 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashT } type StreamSectionReader struct { - file model.FileStreamer - off int64 - bufs [][]byte - bufMaxLen int - m sync.Mutex + file model.FileStreamer + off int64 + m sync.Mutex + bufPool *sync.Pool } // 单线程 thread 可以为0 -func NewStreamSectionReader(file model.FileStreamer, bufMaxLen, thread int) (*StreamSectionReader, error) { - ss := &StreamSectionReader{file: file, bufMaxLen: bufMaxLen} +func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { + ss := &StreamSectionReader{file: file} if file.GetFile() == nil { - if bufMaxLen > 64*utils.KB { + if bufMaxLen > 64*utils.MB { _, err := file.CacheFullInTempFile() if err != nil { return nil, err } } else { - ss.bufMaxLen = bufMaxLen - ss.bufs = make([][]byte, max(1, thread)) + ss.bufPool = &sync.Pool{ + New: func() any { + return make([]byte, bufMaxLen) // Two times of size in io package + }, + } } } return ss, nil } -func (ss *StreamSectionReader) getBuf(index int) []byte { - index = index % len(ss.bufs) - buf := ss.bufs[index] - if buf == nil { - buf = make([]byte, ss.bufMaxLen) - ss.bufs[index] = buf - } - return buf -} - // 单线程 index 可以为0 -func (ss *StreamSectionReader) GetSectionReader(off, length int64, index int) (io.ReadSeeker, error) { +func (ss *StreamSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) { ss.m.Lock() defer ss.m.Unlock() var cache io.ReaderAt = ss.file.GetFile() + var buf []byte if cache == nil { if off != ss.off { return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off) } - buf := ss.getBuf(index)[:length] + tempBuf := ss.bufPool.Get().([]byte) + buf = tempBuf[:length] n, err := io.ReadFull(ss.file, buf) if err != nil { return nil, err @@ -198,5 +192,22 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64, index int) (i off = 0 cache = bytes.NewReader(buf) } - return io.NewSectionReader(cache, off, length), nil + return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil +} + +func (ss *StreamSectionReader) RecycleSectionReader(rs io.ReadSeeker) { + ss.m.Lock() + defer ss.m.Unlock() + if sr, ok := rs.(*SectionReader); ok { + if sr.buf != nil { + ss.bufPool.Put(sr.buf) + sr.buf = nil + } + sr.ReadSeeker = nil + } +} + +type SectionReader struct { + io.ReadSeeker + buf []byte } diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go index bb0a9e9e3..9544edd9e 100644 --- a/pkg/errgroup/errgroup.go +++ b/pkg/errgroup/errgroup.go @@ -49,6 +49,10 @@ func (g *Group) Wait() error { } func (g *Group) Go(f func(ctx context.Context) error) { + g.GoWithResult(f, nil) +} + +func (g *Group) GoWithResult(f func(ctx context.Context) error, result func(err error)) { if g.startChan != nil { <-g.startChan } @@ -62,10 +66,15 @@ func (g *Group) Go(f func(ctx context.Context) error) { g.startChan <- token{} } defer g.done() - if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil { + err := retry.Do(func() error { return f(g.ctx) }, g.opts...) + if result != nil { + result(err) + } + if err != nil { g.cancel(err) } }() + } func (g *Group) TryGo(f func(ctx context.Context) error) bool { From 3f0e1db589d22b04344b46944d6d65d5cee70af7 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 3 Jul 2025 17:02:40 +0800 Subject: [PATCH 05/20] chore --- drivers/115_open/upload.go | 2 +- drivers/123/upload.go | 4 ++-- internal/stream/util.go | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go index 30b476a58..089141e32 100644 --- a/drivers/115_open/upload.go +++ b/drivers/115_open/upload.go @@ -120,10 +120,10 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, retry.Attempts(3), retry.DelayType(retry.BackOffDelay), retry.Delay(time.Second)) + ss.RecycleSectionReader(rd) if err != nil { return err } - ss.RecycleSectionReader(rd) if i == partNum { offset = fileSize diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 02674eb85..0101c0369 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -115,10 +115,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return err } err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, reader, curSize, false, getS3UploadUrl) + ss.RecycleSectionReader(reader) if err != nil { return err } - ss.RecycleSectionReader(reader) up(float64(j) * 100 / float64(chunkCount)) } } @@ -131,6 +131,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign if uploadUrl == "" { return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) } + reader.Seek(0, io.SeekStart) req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err @@ -154,7 +155,6 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign } s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls // retry - reader.Seek(0, io.SeekStart) return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl) } if res.StatusCode != http.StatusOK { diff --git a/internal/stream/util.go b/internal/stream/util.go index 8e33439a2..bcf2df4d4 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -149,7 +149,6 @@ type StreamSectionReader struct { bufPool *sync.Pool } -// 单线程 thread 可以为0 func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { @@ -169,7 +168,6 @@ func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSect return ss, nil } -// 单线程 index 可以为0 func (ss *StreamSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) { ss.m.Lock() defer ss.m.Unlock() From 6b13bf7c82e78bac802ea38efc0a3857eaeb4c97 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 3 Jul 2025 20:54:09 +0800 Subject: [PATCH 06/20] cloudreve, cloudreve_v4, onedrive, onedrive_app --- drivers/cloudreve/util.go | 245 ++++++++++++++++++----------------- drivers/cloudreve_v4/util.go | 244 +++++++++++++++++----------------- drivers/onedrive/util.go | 78 +++++------ drivers/onedrive_app/util.go | 77 ++++++----- 4 files changed, 334 insertions(+), 310 deletions(-) diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index ef37811ad..f3778028c 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -18,8 +18,10 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/setting" + streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/cookie" "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/avast/retry-go" "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" ) @@ -240,8 +242,10 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U var finish int64 = 0 var chunk int = 0 DEFAULT := int64(u.ChunkSize) - retryCount := 0 - maxRetries := 3 + ss, err := streamPkg.NewStreamSectionReader(stream, int(DEFAULT)) + if err != nil { + return err + } for finish < stream.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() @@ -249,59 +253,55 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U left := stream.GetSize() - finish byteSize := min(left, DEFAULT) utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) - byteData := make([]byte, byteSize) - n, err := io.ReadFull(stream, byteData) - utils.Log.Debug(err, n) + rd, err := ss.GetSectionReader(finish, byteSize) if err != nil { return err } - req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), - driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + err = retry.Do( + func() error { + rd.Seek(0, io.SeekStart) + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + driver.NewLimitedUploadStream(ctx, rd)) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = byteSize + req.Header.Set("Authorization", fmt.Sprint(credential)) + req.Header.Set("User-Agent", d.getUA()) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("server error: %d", res.StatusCode) + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + var up Resp + err = json.Unmarshal(body, &up) + if err != nil { + return err + } + if up.Code != 0 { + return errors.New(up.Msg) + } + return nil + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second), + ) + ss.RecycleSectionReader(rd) if err != nil { return err } - req = req.WithContext(ctx) - req.ContentLength = byteSize - // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) - req.Header.Set("Authorization", fmt.Sprint(credential)) - req.Header.Set("User-Agent", d.getUA()) - err = func() error { - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return errors.New(res.Status) - } - body, err := io.ReadAll(res.Body) - if err != nil { - return err - } - var up Resp - err = json.Unmarshal(body, &up) - if err != nil { - return err - } - if up.Code != 0 { - return errors.New(up.Msg) - } - return nil - }() - if err == nil { - retryCount = 0 - finish += byteSize - up(float64(finish) * 100 / float64(stream.GetSize())) - chunk++ - } else { - retryCount++ - if retryCount > maxRetries { - return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) - } - backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + return fmt.Errorf("server error: %d", res.StatusCode) + case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: + data, _ := io.ReadAll(res.Body) + return errors.New(string(data)) + default: + return nil + } + }, retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second), + ) + ss.RecycleSectionReader(rd) if err != nil { return err } - // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - switch { - case res.StatusCode >= 500 && res.StatusCode <= 504: - retryCount++ - if retryCount > maxRetries { - res.Body.Close() - return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) - } - backoff := time.Duration(1< maxRetries { - return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) - } - backoff := time.Duration(1< maxRetries { - return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) - } - backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + return fmt.Errorf("server error: %d", res.StatusCode) + case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: + data, _ := io.ReadAll(res.Body) + return errors.New(string(data)) + default: + return nil + } + }, retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second), + ) + ss.RecycleSectionReader(rd) if err != nil { return err } - // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - switch { - case res.StatusCode >= 500 && res.StatusCode <= 504: - retryCount++ - if retryCount > maxRetries { - res.Body.Close() - return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) - } - backoff := time.Duration(1< maxRetries { - return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries) - } - backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + return fmt.Errorf("server error: %d", res.StatusCode) + case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: + data, _ := io.ReadAll(res.Body) + return errors.New(string(data)) + default: + return nil + } + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second), + ) + ss.RecycleSectionReader(rd) if err != nil { return err } - // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - switch { - case res.StatusCode >= 500 && res.StatusCode <= 504: - retryCount++ - if retryCount > maxRetries { - res.Body.Close() - return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) - } - backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + return fmt.Errorf("server error: %d", res.StatusCode) + case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200: + data, _ := io.ReadAll(res.Body) + return errors.New(string(data)) + default: + return nil + } + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second), + ) + ss.RecycleSectionReader(rd) if err != nil { return err } - req = req.WithContext(ctx) - req.ContentLength = byteSize - // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) - req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } - // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - switch { - case res.StatusCode >= 500 && res.StatusCode <= 504: - retryCount++ - if retryCount > maxRetries { - res.Body.Close() - return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) - } - backoff := time.Duration(1< Date: Fri, 4 Jul 2025 16:31:37 +0800 Subject: [PATCH 07/20] chore(conf): add `max_buffer_limit` option --- internal/bootstrap/config.go | 3 +++ internal/conf/config.go | 2 ++ internal/conf/var.go | 1 + internal/net/request.go | 6 +++++- internal/stream/stream.go | 8 +++----- internal/stream/util.go | 3 ++- 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 781dc82ae..d3857299e 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -67,6 +67,9 @@ func InitConfig() { if conf.Conf.MaxConcurrency > 0 { net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} } + if conf.Conf.MaxBufferLimit < 0 { + conf.MaxBufferLimit = 64 * utils.MB + } if !conf.Conf.Force { confFromEnv() } diff --git a/internal/conf/config.go b/internal/conf/config.go index 4fc8b2838..a637b2efe 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -108,6 +108,7 @@ type Config struct { DistDir string `json:"dist_dir"` Log LogConfig `json:"log"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"` + MaxBufferLimit int `json:"max_buffer_limit" env:"MAX_BUFFER_LIMIT"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"` @@ -154,6 +155,7 @@ func DefaultConfig() *Config { MaxBackups: 30, MaxAge: 28, }, + MaxBufferLimit: -1, MaxConnections: 0, MaxConcurrency: 64, TlsInsecureSkipVerify: true, diff --git a/internal/conf/var.go b/internal/conf/var.go index 7ae1a5abf..dd11cfdd7 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -25,6 +25,7 @@ var PrivacyReg []*regexp.Regexp var ( // StoragesLoaded loaded success if empty StoragesLoaded = false + MaxBufferLimit int ) var ( RawIndexHtml string diff --git a/internal/net/request.go b/internal/net/request.go index 416409720..461c2555e 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -20,7 +21,7 @@ import ( // DefaultDownloadPartSize is the default range of bytes to get at a time when // using Download(). -const DefaultDownloadPartSize = utils.MB * 10 +const DefaultDownloadPartSize = utils.MB * 8 // DefaultDownloadConcurrency is the default number of goroutines to spin up // when using Download(). @@ -82,6 +83,9 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo if impl.cfg.PartSize == 0 { impl.cfg.PartSize = DefaultDownloadPartSize } + if conf.MaxBufferLimit > 0 && impl.cfg.PartSize > conf.MaxBufferLimit { + impl.cfg.PartSize = conf.MaxBufferLimit + } if impl.cfg.HttpClient == nil { impl.cfg.HttpClient = DefaultHttpRequestFunc } diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 1164f80d3..50613788d 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -9,6 +9,7 @@ import ( "math" "os" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -104,11 +105,8 @@ func (f *FileStream) GetFile() model.File { return nil } -const InMemoryBufMaxSize = 10 // Megabytes -const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024 - // RangeRead have to cache all data first since only Reader is provided. -// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory +// also support a peeking RangeRead at very start, but won't buffer more than conf.MaxBufferLimit data in memory func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if httpRange.Length == -1 { // 参考 internal/net/request.go @@ -123,7 +121,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } - if size <= InMemoryBufMaxSizeBytes { + if size <= int64(conf.MaxBufferLimit) { bufSize := min(size, f.GetSize()) // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 diff --git a/internal/stream/util.go b/internal/stream/util.go index bcf2df4d4..93347299a 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -9,6 +9,7 @@ import ( "net/http" "sync" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -152,7 +153,7 @@ type StreamSectionReader struct { func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { - if bufMaxLen > 64*utils.MB { + if bufMaxLen > conf.MaxBufferLimit { _, err := file.CacheFullInTempFile() if err != nil { return nil, err From 139900d47e757e121c92e8f1016a437a6205454f Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 5 Jul 2025 14:18:29 +0800 Subject: [PATCH 08/20] 123pan multithread upload --- drivers/123/meta.go | 10 ++- drivers/123/upload.go | 138 +++++++++++++++++++++++++----------------- 2 files changed, 89 insertions(+), 59 deletions(-) diff --git a/drivers/123/meta.go b/drivers/123/meta.go index 505c55c08..613ba3ad6 100644 --- a/drivers/123/meta.go +++ b/drivers/123/meta.go @@ -11,7 +11,8 @@ type Addition struct { driver.RootID //OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"` //OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` - AccessToken string + AccessToken string + UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"` } var config = driver.Config{ @@ -22,6 +23,11 @@ var config = driver.Config{ func init() { op.RegisterDriver(func() driver.Driver { - return &Pan123{} + // 新增默认选项 要在RegisterDriver初始化设置 才会对正在使用的用户生效 + return &Pan123{ + Addition: Addition{ + UploadThread: 3, + }, + } }) } diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 0101c0369..2a6e0a6e8 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -6,12 +6,16 @@ import ( "io" "net/http" "strconv" + "time" "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/pkg/errgroup" + "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/avast/retry-go" "github.com/go-resty/resty/v2" ) @@ -91,78 +95,98 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if err != nil { return err } + + thread := min(int(chunkCount), d.UploadThread) + threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread, + retry.Attempts(3), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) for i := 1; i <= chunkCount; i += batchSize { - if utils.IsCanceled(ctx) { - return ctx.Err() + if utils.IsCanceled(uploadCtx) { + return uploadCtx.Err() } start := i end := min(i+batchSize, chunkCount+1) - s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) + s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end) + key := fmt.Sprintf("%p", s3PreSignedUrls) if err != nil { return err } // upload each chunk - for j := start; j < end; j++ { - if utils.IsCanceled(ctx) { - return ctx.Err() + for cur := start; cur < end; cur++ { + if utils.IsCanceled(uploadCtx) { + return uploadCtx.Err() } + offset := int64(cur-1) * chunkSize curSize := chunkSize - if j == chunkCount { + if cur == chunkCount { curSize = lastChunkSize } - reader, err := ss.GetSectionReader(chunkSize*int64(j-1), curSize) - if err != nil { - return err - } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, reader, curSize, false, getS3UploadUrl) - ss.RecycleSectionReader(reader) - if err != nil { - return err - } - up(float64(j) * 100 / float64(chunkCount)) + var reader io.ReadSeeker + var rateLimitedRd io.Reader + threadG.GoWithResult(func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, curSize) + if err != nil { + return err + } + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + reader.Seek(0, io.SeekStart) + uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] + if uploadUrl == "" { + return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) + } + reader.Seek(0, io.SeekStart) + req, err := http.NewRequest("PUT", uploadUrl, rateLimitedRd) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = curSize + //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode == http.StatusForbidden { + _, err, _ := uploadG.Do(key, func() (*S3PreSignedURLs, error) { + newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) + if err != nil { + return nil, err + } + s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls + return newS3PreSignedUrls, nil + }) + if err != nil { + return err + } + return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode) + } + if res.StatusCode != http.StatusOK { + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body) + } + progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount) + up(progress) + return nil + }, func(err error) { + ss.RecycleSectionReader(reader) + }) + // err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, reader, curSize, false, getS3UploadUrl) } } + if err := threadG.Wait(); err != nil { + return err + } + defer up(100) // complete s3 upload return d.completeS3(ctx, upReq, file, chunkCount > 1) } -func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.ReadSeeker, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { - uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] - if uploadUrl == "" { - return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) - } - reader.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) - if err != nil { - return err - } - req = req.WithContext(ctx) - req.ContentLength = curSize - //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode == http.StatusForbidden { - if retry { - return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode) - } - // refresh s3 pre signed urls - newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) - if err != nil { - return err - } - s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls - // retry - return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl) - } - if res.StatusCode != http.StatusOK { - body, err := io.ReadAll(res.Body) - if err != nil { - return err - } - return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body) - } - return nil -} +var uploadG singleflight.Group[*S3PreSignedURLs] From 6ca64b96a3586964165e0a3e4b62a9cd4ebd48c1 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 5 Jul 2025 22:25:49 +0800 Subject: [PATCH 09/20] doubao --- drivers/123/upload.go | 2 +- drivers/123_open/upload.go | 8 +- drivers/doubao/driver.go | 2 +- drivers/doubao/util.go | 201 +++++++++++++++++++------------------ internal/stream/util.go | 39 +++++-- 5 files changed, 138 insertions(+), 114 deletions(-) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 2a6e0a6e8..ebff04698 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -122,7 +122,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if cur == chunkCount { curSize = lastChunkSize } - var reader io.ReadSeeker + var reader *stream.SectionReader var rateLimitedRd io.Reader threadG.GoWithResult(func(ctx context.Context) error { if reader == nil { diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 6af3b1df7..40a72a6e2 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -92,13 +92,13 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes } for partIndex := int64(0); partIndex < uploadNums; partIndex++ { if utils.IsCanceled(uploadCtx) { - return ctx.Err() + return uploadCtx.Err() } partIndex := partIndex partNumber := partIndex + 1 // 分片号从1开始 offset := partIndex * chunkSize size := min(chunkSize, size-offset) - var reader io.ReadSeeker + var reader *stream.SectionReader var rateLimitedRd io.Reader threadG.GoWithResult(func(ctx context.Context) error { if reader == nil { @@ -132,9 +132,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes up(progress) return nil }, func(err error) { - if reader != nil { - ss.RecycleSectionReader(reader) - } + ss.RecycleSectionReader(reader) }) } diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go index 1819c6861..d2ba04ea3 100644 --- a/drivers/doubao/driver.go +++ b/drivers/doubao/driver.go @@ -236,7 +236,7 @@ func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea // 根据文件大小选择上传方式 if file.GetSize() <= 1*utils.MB { // 小于1MB,使用普通模式上传 - return d.Upload(&uploadConfig, dstDir, file, up, dataType) + return d.Upload(ctx, &uploadConfig, dstDir, file, up, dataType) } // 大文件使用分片上传 return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType) diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go index 70b4231ce..669336e04 100644 --- a/drivers/doubao/util.go +++ b/drivers/doubao/util.go @@ -24,6 +24,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/avast/retry-go" @@ -447,36 +448,58 @@ func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file mode } // Upload 普通上传实现 -func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { - data, err := io.ReadAll(file) +func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { + ss, err := stream.NewStreamSectionReader(file, int(file.GetSize())) if err != nil { return nil, err } + reader, err := ss.GetSectionReader(0, file.GetSize()) // 计算CRC32 crc32Hash := crc32.NewIEEE() - crc32Hash.Write(data) + utils.CopyWithBuffer(crc32Hash, reader) crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) // 构建请求路径 uploadNode := config.InnerUploadAddress.UploadNodes[0] storeInfo := uploadNode.StoreInfos[0] uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) + var uploadResp *UploadResp + rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader) + err = d._retryOperation("Upload", func() error { + reader.Seek(0, io.SeekStart) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd) + if err != nil { + return err + } + req.Header = map[string][]string{ + "Referer": {BaseURL + "/"}, + "Origin": {BaseURL}, + "User-Agent": {UserAgent}, + "X-Storage-U": {d.UserId}, + "Authorization": {storeInfo.Auth}, + "Content-Type": {"application/octet-stream"}, + "Content-Crc32": {crc32Value}, + "Content-Length": {fmt.Sprintf("%d", file.GetSize())}, + "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, + } + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + bytes, _ := io.ReadAll(res.Body) + resp := UploadResp{} + utils.Json.Unmarshal(bytes, &resp) + if resp.Code != 2000 { + return fmt.Errorf("upload part failed: %s", resp.Message) + } else if resp.Data.Crc32 != crc32Value { + return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32) + } - uploadResp := UploadResp{} - - if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { - req.SetHeaders(map[string]string{ - "Content-Type": "application/octet-stream", - "Content-Crc32": crc32Value, - "Content-Length": fmt.Sprintf("%d", len(data)), - "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), - }) + return nil - req.SetBody(data) - }, &uploadResp); err != nil { - return nil, err - } + }) if uploadResp.Code != 2000 { return nil, fmt.Errorf("upload failed: %s", uploadResp.Message) @@ -519,65 +542,90 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi totalParts := (fileSize + chunkSize - 1) / chunkSize // 创建分片信息组 parts := make([]UploadPart, totalParts) - // 缓存文件 - tempFile, err := file.CacheFullInTempFile() + + // 用 stream.NewStreamSectionReader 替代缓存临时文件 + ss, err := stream.NewStreamSectionReader(file, int(chunkSize)) if err != nil { - return nil, fmt.Errorf("failed to cache file: %w", err) + return nil, err } up(10.0) // 更新进度 // 设置并行上传 - threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, + thread := min(int(totalParts), d.uploadThread) + threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, thread, retry.Attempts(1), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) var partsMutex sync.Mutex // 并行上传所有分片 - for partIndex := int64(0); partIndex < totalParts; partIndex++ { + for partIndex := range totalParts { if utils.IsCanceled(uploadCtx) { break } - partIndex := partIndex partNumber := partIndex + 1 // 分片编号从1开始 - threadG.Go(func(ctx context.Context) error { - // 计算此分片的大小和偏移 - offset := partIndex * chunkSize - size := chunkSize - if partIndex == totalParts-1 { - size = fileSize - offset - } + // 计算此分片的大小和偏移 + offset := partIndex * chunkSize + size := chunkSize + if partIndex == totalParts-1 { + size = fileSize - offset + } - limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size)) - // 读取数据到内存 - data, err := io.ReadAll(limitedReader) + threadG.Go(func(ctx context.Context) error { + reader, err := ss.GetSectionReader(offset, size) + defer ss.RecycleSectionReader(reader) if err != nil { - return fmt.Errorf("failed to read part %d: %w", partNumber, err) - } - // 计算CRC32 - crc32Value := calculateCRC32(data) - // 使用_retryOperation上传分片 - var uploadPart UploadPart - if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { - var err error - uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value) return err - }); err != nil { - return fmt.Errorf("part %d upload failed: %w", partNumber, err) } - // 记录成功上传的分片 - partsMutex.Lock() - parts[partIndex] = UploadPart{ - PartNumber: strconv.FormatInt(partNumber, 10), - Etag: uploadPart.Etag, - Crc32: crc32Value, - } - partsMutex.Unlock() - // 更新进度 - progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) - up(math.Min(progress, 95.0)) + hash := crc32.NewIEEE() + utils.CopyWithBuffer(hash, reader) + crc32Value := hex.EncodeToString(hash.Sum(nil)) + rateLimitedRd := driver.NewLimitedUploadStream(uploadCtx, reader) + return d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { + // 使用_retryOperation上传分片 + reader.Seek(0, io.SeekStart) + req, err := http.NewRequestWithContext(uploadCtx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd) + if err != nil { + return err + } + req.Header = map[string][]string{ + "Referer": {BaseURL + "/"}, + "Origin": {BaseURL}, + "User-Agent": {UserAgent}, + "X-Storage-U": {d.UserId}, + "Authorization": {storeInfo.Auth}, + "Content-Type": {"application/octet-stream"}, + "Content-Crc32": {crc32Value}, + "Content-Length": {fmt.Sprintf("%d", size)}, + "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, + } + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + bytes, _ := io.ReadAll(res.Body) + uploadResp := UploadResp{} + utils.Json.Unmarshal(bytes, &uploadResp) + if uploadResp.Code != 2000 { + return fmt.Errorf("upload part failed: %s", uploadResp.Message) + } else if uploadResp.Data.Crc32 != crc32Value { + return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32) + } + // 记录成功上传的分片 + partsMutex.Lock() + parts[partIndex] = UploadPart{ + PartNumber: strconv.FormatInt(partNumber, 10), + Etag: uploadResp.Data.Etag, + Crc32: crc32Value, + } + partsMutex.Unlock() + // 更新进度 + progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) + up(math.Min(progress, 95.0)) + return nil - return nil + }) }) } @@ -680,42 +728,6 @@ func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, sto return uploadResp.Data.UploadId, nil } -// 分片上传实现 -func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) { - uploadResp := UploadResp{} - storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0] - - _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { - req.SetHeaders(map[string]string{ - "Content-Type": "application/octet-stream", - "Content-Crc32": crc32Value, - "Content-Length": fmt.Sprintf("%d", len(data)), - "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), - }) - - req.SetQueryParams(map[string]string{ - "uploadid": uploadID, - "part_number": strconv.FormatInt(partNumber, 10), - "phase": "transfer", - }) - - req.SetBody(data) - req.SetContentLength(true) - }, &uploadResp) - - if err != nil { - return resp, err - } - - if uploadResp.Code != 2000 { - return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message) - } else if uploadResp.Data.Crc32 != crc32Value { - return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32) - } - - return uploadResp.Data, nil -} - // 完成分片上传 func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error { uploadResp := UploadResp{} @@ -784,13 +796,6 @@ func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error { return nil } -// 计算CRC32 -func calculateCRC32(data []byte) string { - hash := crc32.NewIEEE() - hash.Write(data) - return hex.EncodeToString(hash.Sum(nil)) -} - // _retryOperation 操作重试 func (d *Doubao) _retryOperation(operation string, fn func() error) error { return retry.Do( diff --git a/internal/stream/util.go b/internal/stream/util.go index 93347299a..f3f80595d 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -146,7 +146,7 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashT type StreamSectionReader struct { file model.FileStreamer off int64 - m sync.Mutex + mu sync.Mutex bufPool *sync.Pool } @@ -169,9 +169,9 @@ func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSect return ss, nil } -func (ss *StreamSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) { - ss.m.Lock() - defer ss.m.Unlock() +func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) { + ss.mu.Lock() + defer ss.mu.Unlock() var cache io.ReaderAt = ss.file.GetFile() var buf []byte if cache == nil { @@ -194,18 +194,39 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (io.ReadSeeke return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil } -func (ss *StreamSectionReader) RecycleSectionReader(rs io.ReadSeeker) { - ss.m.Lock() - defer ss.m.Unlock() - if sr, ok := rs.(*SectionReader); ok { +func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) { + if sr != nil { + ss.mu.Lock() + defer ss.mu.Unlock() if sr.buf != nil { - ss.bufPool.Put(sr.buf) + ss.bufPool.Put(sr.buf[0:cap(sr.buf)]) sr.buf = nil } sr.ReadSeeker = nil } } +// func (ss *StreamSectionReader) GetBytes(sr *SectionReader) ([]byte, error) { +// if sr != nil && ss.bufPool != nil { +// ss.mu.Lock() +// defer ss.mu.Unlock() +// buf := sr.buf +// if buf == nil { +// buf := ss.bufPool.Get().([]byte) +// n, err := io.ReadFull(sr, buf) +// if err == io.EOF && n > 0 { +// err = nil +// } +// if err != nil { +// return nil, err +// } +// sr.buf = buf[:n] +// } +// return sr.buf, nil +// } +// return nil, errors.New("SectionReader is nil") +// } + type SectionReader struct { io.ReadSeeker buf []byte From 6cb506ef727be15f1c3b6de95177d9f1a1f0d0b7 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 5 Jul 2025 23:10:27 +0800 Subject: [PATCH 10/20] google_drive --- drivers/google_drive/util.go | 41 ++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 975c10d26..8bdd68784 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -14,6 +14,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/avast/retry-go" "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/driver" @@ -260,6 +261,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, if err != nil { return err } + url += "?includeItemsFromAllDrives=true&supportsAllDrives=true" for offset < file.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() @@ -270,13 +272,40 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, return err } limitedReader := driver.NewLimitedUploadStream(ctx, reader) - _, err = d.request(url, http.MethodPut, func(req *resty.Request) { + err = retry.Do(func() error { reader.Seek(0, io.SeekStart) - req.SetHeaders(map[string]string{ - "Content-Length": strconv.FormatInt(chunkSize, 10), - "Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize()), - }).SetBody(limitedReader).SetContext(ctx) - }, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, + limitedReader) + if err != nil { + return err + } + req.Header = map[string][]string{ + "Authorization": {"Bearer " + d.AccessToken}, + "Content-Length": {strconv.FormatInt(chunkSize, 10)}, + "Content-Range": {fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, file.GetSize())}, + } + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + bytes, _ := io.ReadAll(res.Body) + var e Error + utils.Json.Unmarshal(bytes, &e) + if e.Error.Code != 0 { + if e.Error.Code == 401 { + err = d.refreshToken() + if err != nil { + return err + } + } + return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors) + } + return nil + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second)) if err != nil { return err } From d2feb37110b7b7a42015edb9da0447a6fe7e4543 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 5 Jul 2025 23:17:41 +0800 Subject: [PATCH 11/20] chore --- drivers/123/upload.go | 3 +-- drivers/123_open/upload.go | 3 +-- drivers/139/driver.go | 7 ++----- drivers/189/util.go | 3 +-- drivers/aliyundrive/driver.go | 3 +-- drivers/aliyundrive_open/upload.go | 2 +- drivers/chaoxing/driver.go | 2 +- drivers/chaoxing/util.go | 2 +- drivers/cloudreve/util.go | 13 +++++-------- drivers/cloudreve_v4/util.go | 15 +++++++-------- drivers/dropbox/driver.go | 3 +-- drivers/dropbox/util.go | 6 ++---- drivers/febbox/oauth2.go | 4 ++-- drivers/misskey/util.go | 23 ++++++++++++----------- drivers/onedrive/util.go | 3 +-- drivers/onedrive_app/util.go | 3 +-- drivers/onedrive_sharelink/util.go | 6 +++--- drivers/quark_uc_tv/util.go | 2 +- drivers/s3/doge.go | 2 +- drivers/webdav/odrvcookie/fetch.go | 2 +- pkg/qbittorrent/client.go | 4 ++-- 21 files changed, 48 insertions(+), 63 deletions(-) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index ebff04698..74cd1f43d 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -139,11 +139,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) } reader.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", uploadUrl, rateLimitedRd) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = curSize //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) res, err := base.HttpClient.Do(req) diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 40a72a6e2..9bc9fd091 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -115,11 +115,10 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes return err } - req, err := http.NewRequestWithContext(ctx, "PUT", uploadPartUrl, rateLimitedRd) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartUrl, rateLimitedRd) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = size res, err := base.HttpClient.Do(req) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index f5ad71e95..d7fe5994a 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -636,11 +636,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // Update Progress r := io.TeeReader(limitReader, p) - req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r) if err != nil { return err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Length", fmt.Sprint(partSize)) req.Header.Set("Origin", "https://yun.139.com") @@ -805,12 +804,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr limitReader := io.LimitReader(rateLimited, byteSize) // Update Progress r := io.TeeReader(limitReader, p) - req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, resp.Data.UploadResult.RedirectionURL, r) if err != nil { return err } - - req = req.WithContext(ctx) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) diff --git a/drivers/189/util.go b/drivers/189/util.go index 8b48fcad9..d10c7e8b2 100644 --- a/drivers/189/util.go +++ b/drivers/189/util.go @@ -365,11 +365,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F log.Debugf("uploadData: %+v", uploadData) requestURL := uploadData.RequestURL uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") - req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } - req = req.WithContext(ctx) for _, v := range uploadHeaders { i := strings.Index(v, "=") req.Header.Set(v[0:i], v[i+1:]) diff --git a/drivers/aliyundrive/driver.go b/drivers/aliyundrive/driver.go index fcceb1be2..df3f301e6 100644 --- a/drivers/aliyundrive/driver.go +++ b/drivers/aliyundrive/driver.go @@ -297,11 +297,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil if d.InternalUpload { url = partInfo.InternalUploadUrl } - req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, io.LimitReader(rateLimited, DEFAULT)) if err != nil { return err } - req = req.WithContext(ctx) res, err := base.HttpClient.Do(req) if err != nil { return err diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index 0d08bc726..3ad0f93ae 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -69,7 +69,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo if d.InternalUpload { uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/") } - req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, r) if err != nil { return err } diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index 798359955..cb12b29f1 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -255,7 +255,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr }, UpdateProgress: up, }) - req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://pan-yz.chaoxing.com/upload", r) if err != nil { return err } diff --git a/drivers/chaoxing/util.go b/drivers/chaoxing/util.go index 03caa1ee8..715c248a8 100644 --- a/drivers/chaoxing/util.go +++ b/drivers/chaoxing/util.go @@ -167,7 +167,7 @@ func (d *ChaoXing) Login() (string, error) { return "", err } // Create the request - req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body) + req, err := http.NewRequest(http.MethodPost, "https://passport2.chaoxing.com/fanyalogin", body) if err != nil { return "", err } diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index f3778028c..88ff67cc0 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -260,12 +260,11 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk), driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("Authorization", fmt.Sprint(credential)) req.Header.Set("User-Agent", d.getUA()) @@ -328,11 +327,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) req.Header.Set("User-Agent", d.getUA()) @@ -391,12 +389,11 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", u.UploadURLs[chunk], + req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadURLs[chunk], driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("User-Agent", d.getUA()) res, err := base.HttpClient.Do(req) @@ -438,8 +435,8 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa )) } bodyBuilder.WriteString("") - req, err := http.NewRequest( - "POST", + req, err := http.NewRequestWithContext(ctx, + http.MethodPost, u.CompleteURL, strings.NewReader(bodyBuilder.String()), ) diff --git a/drivers/cloudreve_v4/util.go b/drivers/cloudreve_v4/util.go index 4380b8ac8..ff340335d 100644 --- a/drivers/cloudreve_v4/util.go +++ b/drivers/cloudreve_v4/util.go @@ -278,12 +278,12 @@ func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u F err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"?chunk="+strconv.Itoa(chunk), driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) + req.ContentLength = byteSize req.Header.Set("Authorization", fmt.Sprint(credential)) req.Header.Set("User-Agent", d.getUA()) @@ -345,11 +345,11 @@ func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) + req.ContentLength = byteSize req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize())) req.Header.Set("User-Agent", d.getUA()) @@ -408,12 +408,11 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk], + req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.UploadUrls[chunk], driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("User-Agent", d.getUA()) res, err := base.HttpClient.Do(req) @@ -456,8 +455,8 @@ func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileU )) } bodyBuilder.WriteString("") - req, err := http.NewRequest( - "POST", + req, err := http.NewRequestWithContext(ctx, + http.MethodPost, u.CompleteURL, strings.NewReader(bodyBuilder.String()), ) diff --git a/drivers/dropbox/driver.go b/drivers/dropbox/driver.go index 4ae3ddf83..000da0861 100644 --- a/drivers/dropbox/driver.go +++ b/drivers/dropbox/driver.go @@ -192,12 +192,11 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt url := d.contentBase + "/2/files/upload_session/append_v2" reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize)) - req, err := http.NewRequest(http.MethodPost, url, reader) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, reader) if err != nil { log.Errorf("failed to update file when append to upload session, err: %+v", err) return err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Authorization", "Bearer "+d.AccessToken) diff --git a/drivers/dropbox/util.go b/drivers/dropbox/util.go index bb71118df..73cb4c8c3 100644 --- a/drivers/dropbox/util.go +++ b/drivers/dropbox/util.go @@ -169,11 +169,10 @@ func (d *Dropbox) getFiles(ctx context.Context, path string) ([]File, error) { func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset int64, sessionId string) error { url := d.contentBase + "/2/files/upload_session/finish" - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { return err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Authorization", "Bearer "+d.AccessToken) @@ -214,11 +213,10 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) { url := d.contentBase + "/2/files/upload_session/start" - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { return "", err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Authorization", "Bearer "+d.AccessToken) req.Header.Set("Dropbox-API-Arg", "{\"close\":false}") diff --git a/drivers/febbox/oauth2.go b/drivers/febbox/oauth2.go index 6345d1a71..e90291681 100644 --- a/drivers/febbox/oauth2.go +++ b/drivers/febbox/oauth2.go @@ -31,13 +31,13 @@ func (c *customTokenSource) Token() (*oauth2.Token, error) { v.Set("client_id", c.config.ClientID) v.Set("client_secret", c.config.ClientSecret) - req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequestWithContext(c.ctx, http.MethodPost, c.config.TokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - resp, err := http.DefaultClient.Do(req.WithContext(c.ctx)) + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } diff --git a/drivers/misskey/util.go b/drivers/misskey/util.go index 65764f6f7..5e7a0d8db 100644 --- a/drivers/misskey/util.go +++ b/drivers/misskey/util.go @@ -4,6 +4,7 @@ import ( "context" "errors" "io" + "net/http" "time" "github.com/go-resty/resty/v2" @@ -72,7 +73,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) { } else { body = map[string]string{} } - err := d.request("/files", "POST", setBody(body), &files) + err := d.request("/files", http.MethodPost, setBody(body), &files) if err != nil { return []model.Obj{}, err } @@ -89,7 +90,7 @@ func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) { } else { body = map[string]string{} } - err := d.request("/folders", "POST", setBody(body), &folders) + err := d.request("/folders", http.MethodPost, setBody(body), &folders) if err != nil { return []model.Obj{}, err } @@ -106,7 +107,7 @@ func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) { func (d *Misskey) link(file model.Obj) (*model.Link, error) { var mFile MFile - err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile) + err := d.request("/files/show", http.MethodPost, setBody(map[string]string{"fileId": file.GetID()}), &mFile) if err != nil { return nil, err } @@ -117,7 +118,7 @@ func (d *Misskey) link(file model.Obj) (*model.Link, error) { func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) { var folder MFolder - err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder) + err := d.request("/folders/create", http.MethodPost, setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder) if err != nil { return nil, err } @@ -127,11 +128,11 @@ func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) { if srcObj.IsDir() { var folder MFolder - err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder) + err := d.request("/folders/update", http.MethodPost, setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder) return mFolder2Object(folder), err } else { var file MFile - err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file) + err := d.request("/files/update", http.MethodPost, setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file) return mFile2Object(file), err } } @@ -139,11 +140,11 @@ func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) { func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) { if srcObj.IsDir() { var folder MFolder - err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder) + err := d.request("/folders/update", http.MethodPost, setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder) return mFolder2Object(folder), err } else { var file MFile - err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file) + err := d.request("/files/update", http.MethodPost, setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file) return mFile2Object(file), err } } @@ -171,7 +172,7 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) { if err != nil { return nil, err } - err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file) + err = d.request("/files/upload-from-url", http.MethodPost, setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file) if err != nil { return nil, err } @@ -181,10 +182,10 @@ func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) { func (d *Misskey) remove(obj model.Obj) error { if obj.IsDir() { - err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil) + err := d.request("/folders/delete", http.MethodPost, setBody(map[string]string{"folderId": obj.GetID()}), nil) return err } else { - err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil) + err := d.request("/files/delete", http.MethodPost, setBody(map[string]string{"fileId": obj.GetID()}), nil) return err } } diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index f04cf7523..672d3c518 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -259,11 +259,10 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) res, err := base.HttpClient.Do(req) diff --git a/drivers/onedrive_app/util.go b/drivers/onedrive_app/util.go index 17c7874b2..71c509de5 100644 --- a/drivers/onedrive_app/util.go +++ b/drivers/onedrive_app/util.go @@ -174,11 +174,10 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model. err = retry.Do( func() error { rd.Seek(0, io.SeekStart) - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, rd)) if err != nil { return err } - req = req.WithContext(ctx) req.ContentLength = byteSize req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) res, err := base.HttpClient.Do(req) diff --git a/drivers/onedrive_sharelink/util.go b/drivers/onedrive_sharelink/util.go index 9f3480fbe..94d8bd165 100644 --- a/drivers/onedrive_sharelink/util.go +++ b/drivers/onedrive_sharelink/util.go @@ -290,7 +290,7 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { client := &http.Client{} postUrl := strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/v2.1/graphql" - req, err = http.NewRequest("POST", postUrl, strings.NewReader(graphqlVar)) + req, err = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(graphqlVar)) if err != nil { return nil, err } @@ -323,7 +323,7 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { graphqlReqNEW := GraphQLNEWRequest{} postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref - req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar)) + req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar)) req.Header = tempHeader resp, err := client.Do(req) @@ -339,7 +339,7 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { for graphqlReqNEW.ListData.NextHref != "" { graphqlReqNEW = GraphQLNEWRequest{} postUrl = strings.Join(redirectSplitURL[:len(redirectSplitURL)-3], "/") + "/_api/web/GetListUsingPath(DecodedUrl=@a1)/RenderListDataAsStream" + nextHref - req, _ = http.NewRequest("POST", postUrl, strings.NewReader(renderListDataAsStreamVar)) + req, _ = http.NewRequest(http.MethodPost, postUrl, strings.NewReader(renderListDataAsStreamVar)) req.Header = tempHeader resp, err := client.Do(req) if err != nil { diff --git a/drivers/quark_uc_tv/util.go b/drivers/quark_uc_tv/util.go index f5a18f299..8bdf605b5 100644 --- a/drivers/quark_uc_tv/util.go +++ b/drivers/quark_uc_tv/util.go @@ -136,7 +136,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) { func (d *QuarkUCTV) getRefreshTokenByTV(ctx context.Context, code string, isRefresh bool) error { pathname := "/token" - _, _, reqID := d.generateReqSign("POST", pathname, d.conf.signKey) + _, _, reqID := d.generateReqSign(http.MethodPost, pathname, d.conf.signKey) u := d.conf.codeApi + pathname var resp RefreshTokenAuthResp body := map[string]string{ diff --git a/drivers/s3/doge.go b/drivers/s3/doge.go index 12a584ca4..625c2f27e 100644 --- a/drivers/s3/doge.go +++ b/drivers/s3/doge.go @@ -38,7 +38,7 @@ func getCredentials(AccessKey, SecretKey string) (rst Credentials, err error) { sign := hex.EncodeToString(hmacObj.Sum(nil)) Authorization := "TOKEN " + AccessKey + ":" + sign - req, err := http.NewRequest("POST", "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody))) + req, err := http.NewRequest(http.MethodPost, "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody))) if err != nil { return rst, err } diff --git a/drivers/webdav/odrvcookie/fetch.go b/drivers/webdav/odrvcookie/fetch.go index a6e71a56a..b4eca0772 100644 --- a/drivers/webdav/odrvcookie/fetch.go +++ b/drivers/webdav/odrvcookie/fetch.go @@ -181,7 +181,7 @@ func (ca *CookieAuth) getSPToken() (*SuccessResponse, error) { // Execute the first request which gives us an auth token for the sharepoint service // With this token we can authenticate on the login page and save the returned cookies - req, err := http.NewRequest("POST", loginUrl, buf) + req, err := http.NewRequest(http.MethodPost, loginUrl, buf) if err != nil { return nil, err } diff --git a/pkg/qbittorrent/client.go b/pkg/qbittorrent/client.go index ff86918d5..77d6605e0 100644 --- a/pkg/qbittorrent/client.go +++ b/pkg/qbittorrent/client.go @@ -99,7 +99,7 @@ func (c *client) post(path string, data url.Values) (*http.Response, error) { u := c.url.JoinPath(path) u.User = nil // remove userinfo for requests - req, err := http.NewRequest("POST", u.String(), bytes.NewReader([]byte(data.Encode()))) + req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader([]byte(data.Encode()))) if err != nil { return nil, err } @@ -147,7 +147,7 @@ func (c *client) AddFromLink(link string, savePath string, id string) error { u := c.url.JoinPath("/api/v2/torrents/add") u.User = nil // remove userinfo for requests - req, err := http.NewRequest("POST", u.String(), buf) + req, err := http.NewRequest(http.MethodPost, u.String(), buf) if err != nil { return err } From 6ae2ab5eb5f5458ef7bbf4c4108314cf0192dcea Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sun, 6 Jul 2025 00:52:25 +0800 Subject: [PATCH 12/20] chore --- drivers/123/upload.go | 2 +- drivers/onedrive_sharelink/driver.go | 8 ++++---- drivers/onedrive_sharelink/util.go | 25 +++++++++++++------------ drivers/quark_uc_tv/driver.go | 5 +++-- drivers/quark_uc_tv/util.go | 4 ++-- internal/net/request.go | 2 +- internal/stream/util.go | 2 +- pkg/gowebdav/client.go | 4 ++-- 8 files changed, 27 insertions(+), 25 deletions(-) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 74cd1f43d..8e955e24f 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -139,7 +139,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) } reader.Seek(0, io.SeekStart) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl, rateLimitedRd) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd) if err != nil { return err } diff --git a/drivers/onedrive_sharelink/driver.go b/drivers/onedrive_sharelink/driver.go index 08ed07e0e..42d0f1904 100644 --- a/drivers/onedrive_sharelink/driver.go +++ b/drivers/onedrive_sharelink/driver.go @@ -38,14 +38,14 @@ func (d *OnedriveSharelink) Init(ctx context.Context) error { d.cron = cron.NewCron(time.Hour * 1) d.cron.Do(func() { var err error - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { log.Errorf("%+v", err) } }) // Get initial headers - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { return err } @@ -59,7 +59,7 @@ func (d *OnedriveSharelink) Drop(ctx context.Context) error { func (d *OnedriveSharelink) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { path := dir.GetPath() - files, err := d.getFiles(path) + files, err := d.getFiles(ctx, path) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func (d *OnedriveSharelink) Link(ctx context.Context, file model.Obj, args model if d.HeaderTime < time.Now().Unix()-1800 { var err error log.Debug("headers are older than 30 minutes, get new headers") - header, err = d.getHeaders() + header, err = d.getHeaders(ctx) if err != nil { return nil, err } diff --git a/drivers/onedrive_sharelink/util.go b/drivers/onedrive_sharelink/util.go index 94d8bd165..d4cd4229a 100644 --- a/drivers/onedrive_sharelink/util.go +++ b/drivers/onedrive_sharelink/util.go @@ -1,6 +1,7 @@ package onedrive_sharelink import ( + "context" "crypto/tls" "encoding/json" "fmt" @@ -131,7 +132,7 @@ func getAttrValue(n *html.Node, key string) string { } // getHeaders constructs and returns the necessary HTTP headers for accessing the OneDrive share link -func (d *OnedriveSharelink) getHeaders() (http.Header, error) { +func (d *OnedriveSharelink) getHeaders(ctx context.Context) (http.Header, error) { header := http.Header{} header.Set("User-Agent", base.UserAgent) header.Set("accept-language", "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6") @@ -142,7 +143,7 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) { if d.ShareLinkPassword == "" { // Create a no-redirect client clientNoDirect := NewNoRedirectCLient() - req, err := http.NewRequest("GET", d.ShareLinkURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil) if err != nil { return nil, err } @@ -180,9 +181,9 @@ func (d *OnedriveSharelink) getHeaders() (http.Header, error) { } // getFiles retrieves the files from the OneDrive share link at the specified path -func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { +func (d *OnedriveSharelink) getFiles(ctx context.Context, path string) ([]Item, error) { clientNoDirect := NewNoRedirectCLient() - req, err := http.NewRequest("GET", d.ShareLinkURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, d.ShareLinkURL, nil) if err != nil { return nil, err } @@ -221,11 +222,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { // Get redirectUrl answer, err := clientNoDirect.Do(req) if err != nil { - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { return nil, err } - return d.getFiles(path) + return d.getFiles(ctx, path) } defer answer.Body.Close() re := regexp.MustCompile(`templateUrl":"(.*?)"`) @@ -298,11 +299,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { resp, err := client.Do(req) if err != nil { - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { return nil, err } - return d.getFiles(path) + return d.getFiles(ctx, path) } defer resp.Body.Close() var graphqlReq GraphQLRequest @@ -328,11 +329,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { resp, err := client.Do(req) if err != nil { - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { return nil, err } - return d.getFiles(path) + return d.getFiles(ctx, path) } defer resp.Body.Close() json.NewDecoder(resp.Body).Decode(&graphqlReqNEW) @@ -343,11 +344,11 @@ func (d *OnedriveSharelink) getFiles(path string) ([]Item, error) { req.Header = tempHeader resp, err := client.Do(req) if err != nil { - d.Headers, err = d.getHeaders() + d.Headers, err = d.getHeaders(ctx) if err != nil { return nil, err } - return d.getFiles(path) + return d.getFiles(ctx, path) } defer resp.Body.Close() json.NewDecoder(resp.Body).Decode(&graphqlReqNEW) diff --git a/drivers/quark_uc_tv/driver.go b/drivers/quark_uc_tv/driver.go index 83f2b50d9..9cd22ae05 100644 --- a/drivers/quark_uc_tv/driver.go +++ b/drivers/quark_uc_tv/driver.go @@ -3,6 +3,7 @@ package quark_uc_tv import ( "context" "fmt" + "net/http" "strconv" "time" @@ -96,7 +97,7 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs pageSize := int64(100) for { var filesData FilesData - _, err := d.request(ctx, "/file", "GET", func(req *resty.Request) { + _, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ "method": "list", "parent_fid": dir.GetID(), @@ -127,7 +128,7 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { var fileLink FileLink - _, err := d.request(ctx, "/file", "GET", func(req *resty.Request) { + _, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ "method": "download", "group_by": "source", diff --git a/drivers/quark_uc_tv/util.go b/drivers/quark_uc_tv/util.go index 8bdf605b5..b9be1bf46 100644 --- a/drivers/quark_uc_tv/util.go +++ b/drivers/quark_uc_tv/util.go @@ -93,7 +93,7 @@ func (d *QuarkUCTV) getLoginCode(ctx context.Context) (string, error) { QrData string `json:"qr_data"` QueryToken string `json:"query_token"` } - _, err := d.request(ctx, pathname, "GET", func(req *resty.Request) { + _, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ "auth_type": "code", "client_id": d.conf.clientID, @@ -121,7 +121,7 @@ func (d *QuarkUCTV) getCode(ctx context.Context) (string, error) { CommonRsp Code string `json:"code"` } - _, err := d.request(ctx, pathname, "GET", func(req *resty.Request) { + _, err := d.request(ctx, pathname, http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ "client_id": d.conf.clientID, "scope": "netdisk", diff --git a/internal/net/request.go b/internal/net/request.go index 878ce1ee2..1da03b29d 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -557,7 +557,7 @@ type chunk struct { func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) { header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef) - res, err := RequestHttp(ctx, "GET", header, params.URL) + res, err := RequestHttp(ctx, http.MethodGet, header, params.URL) if err != nil { return res, err } diff --git a/internal/stream/util.go b/internal/stream/util.go index f3f80595d..42ab9d33a 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -67,7 +67,7 @@ func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int header := net.ProcessHeader(nil, link.Header) header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) - return net.RequestHttp(ctx, "GET", header, link.URL) + return net.RequestHttp(ctx, http.MethodGet, header, link.URL) } // 139 cloud does not properly return 206 http status code, add a hack here diff --git a/pkg/gowebdav/client.go b/pkg/gowebdav/client.go index e23a5a25c..7251e0842 100644 --- a/pkg/gowebdav/client.go +++ b/pkg/gowebdav/client.go @@ -383,7 +383,7 @@ func (c *Client) Link(path string) (string, http.Header, error) { // ReadStream reads the stream for a given path func (c *Client) ReadStream(path string, callback func(rq *http.Request)) (io.ReadCloser, http.Header, error) { - rs, err := c.req("GET", path, nil, callback) + rs, err := c.req(http.MethodGet, path, nil, callback) if err != nil { return nil, nil, newPathErrorErr("ReadStream", path, err) } @@ -405,7 +405,7 @@ func (c *Client) ReadStream(path string, callback func(rq *http.Request)) (io.Re // this function will emulate the behavior by skipping `offset` bytes and limiting the result // to `length`. func (c *Client) ReadStreamRange(path string, offset, length int64) (io.ReadCloser, error) { - rs, err := c.req("GET", path, nil, func(r *http.Request) { + rs, err := c.req(http.MethodGet, path, nil, func(r *http.Request) { r.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+length-1)) }) if err != nil { From 70d1bdb02fd529568662e0f4971c10cc3365b991 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 26 Jun 2025 23:05:56 +0800 Subject: [PATCH 13/20] =?UTF-8?q?chore:=20=E8=AE=A1=E7=AE=97=E5=88=86?= =?UTF-8?q?=E7=89=87=E6=95=B0=E9=87=8F=E7=9A=84=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drivers/123/upload.go | 9 +++++---- drivers/139/driver.go | 20 ++++++++------------ drivers/189pc/utils.go | 18 ++++++++++-------- drivers/baidu_netdisk/driver.go | 9 +++++---- drivers/baidu_photo/driver.go | 9 +++++---- internal/net/request.go | 10 +++------- 6 files changed, 36 insertions(+), 39 deletions(-) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 8e955e24f..a8c718674 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -77,11 +77,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi // fetch s3 pre signed urls size := file.GetSize() chunkSize := min(size, 16*utils.MB) - chunkCount := int(size / chunkSize) + chunkCount := 1 + if size > chunkSize { + chunkCount = int((size + chunkSize - 1) / chunkSize) + } lastChunkSize := size % chunkSize - if lastChunkSize > 0 { - chunkCount++ - } else { + if lastChunkSize == 0 { lastChunkSize = chunkSize } // only 1 batch is allowed diff --git a/drivers/139/driver.go b/drivers/139/driver.go index 01d6d31c5..8033eefce 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -531,12 +531,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } size := stream.GetSize() - var partSize = d.getPartSize(size) - part := size / partSize - if size%partSize > 0 { - part++ - } else if part == 0 { - part = 1 + partSize := d.getPartSize(size) + part := int64(1) + if size > partSize { + part = (size + partSize - 1) / partSize } partInfos := make([]PartInfo, 0, part) for i := int64(0); i < part; i++ { @@ -787,12 +785,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr size := stream.GetSize() // Progress p := driver.NewProgress(size, up) - var partSize = d.getPartSize(size) - part := size / partSize - if size%partSize > 0 { - part++ - } else if part == 0 { - part = 1 + partSize := d.getPartSize(size) + part := int64(1) + if size > partSize { + part = (size + partSize - 1) / partSize } rateLimited := driver.NewLimitedUploadStream(ctx, stream) for i := int64(0); i < part; i++ { diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index fc7cb98a5..68142ad94 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -505,11 +505,12 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) - count := int(size / sliceSize) + count := 1 + if size > sliceSize { + count = int((size + sliceSize - 1) / sliceSize) + } lastPartSize := size % sliceSize - if lastPartSize > 0 { - count++ - } else { + if lastPartSize == 0 { lastPartSize = sliceSize } fileMd5 := utils.MD5.NewFunc() @@ -620,11 +621,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode cache = tmpF } sliceSize := partSize(size) - count := int(size / sliceSize) + count := 1 + if size > sliceSize { + count = int((size + sliceSize - 1) / sliceSize) + } lastSliceSize := size % sliceSize - if lastSliceSize > 0 { - count++ - } else { + if lastSliceSize == 0 { lastSliceSize = sliceSize } diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 9bbaa3aec..0fa94e885 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -203,11 +203,12 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F streamSize := stream.GetSize() sliceSize := d.getSliceSize(streamSize) - count := int(streamSize / sliceSize) + count := 1 + if streamSize > sliceSize { + count = int((streamSize + sliceSize - 1) / sliceSize) + } lastBlockSize := streamSize % sliceSize - if lastBlockSize > 0 { - count++ - } else { + if lastBlockSize == 0 { lastBlockSize = sliceSize } diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index bbd6e74ee..00e36ee65 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -262,11 +262,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil // 计算需要的数据 streamSize := stream.GetSize() - count := int(streamSize / DEFAULT) + count := 1 + if streamSize > DEFAULT { + count = int((streamSize + DEFAULT - 1) / DEFAULT) + } lastBlockSize := streamSize % DEFAULT - if lastBlockSize > 0 { - count++ - } else { + if lastBlockSize == 0 { lastBlockSize = DEFAULT } diff --git a/internal/net/request.go b/internal/net/request.go index cd57fc27e..5e98f490d 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -163,17 +163,13 @@ func (d *downloader) download() (io.ReadCloser, error) { return nil, err } - maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize)) - if d.params.Range.Length%int64(d.cfg.PartSize) > 0 { - maxPart++ + maxPart := 1 + if d.params.Range.Length > int64(d.cfg.PartSize) { + maxPart = int((d.params.Range.Length + int64(d.cfg.PartSize) - 1) / int64(d.cfg.PartSize)) } if maxPart < d.cfg.Concurrency { d.cfg.Concurrency = maxPart } - if d.params.Range.Length == 0 { - d.cfg.Concurrency = 1 - } - log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency) if maxPart == 1 { From 76c9977e8fcf1e70f3c03aadef2e36e5e37b22bc Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 2 Aug 2025 23:36:48 +0800 Subject: [PATCH 14/20] =?UTF-8?q?MaxBufferLimit=E8=87=AA=E5=8A=A8=E6=8C=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/bootstrap/config.go | 4 +--- internal/conf/var.go | 1 - internal/net/request.go | 6 +++--- internal/stream/stream.go | 3 +-- internal/stream/util.go | 23 +---------------------- pkg/utils/buf.go | 29 +++++++++++++++++++++++++++++ 6 files changed, 35 insertions(+), 31 deletions(-) create mode 100644 pkg/utils/buf.go diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 98eb2368e..3d801f60a 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -79,9 +79,7 @@ func InitConfig() { if conf.Conf.MaxConcurrency > 0 { net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} } - if conf.Conf.MaxBufferLimit < 0 { - conf.MaxBufferLimit = 64 * utils.MB - } + if !conf.Conf.Force { confFromEnv() } diff --git a/internal/conf/var.go b/internal/conf/var.go index 50a7f33d5..8af45ca3e 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -25,7 +25,6 @@ var PrivacyReg []*regexp.Regexp var ( // StoragesLoaded loaded success if empty StoragesLoaded = false - MaxBufferLimit int ) var ( RawIndexHtml string diff --git a/internal/net/request.go b/internal/net/request.go index 5e98f490d..d7f619423 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -12,7 +12,6 @@ import ( "sync" "time" - "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/utils" @@ -85,8 +84,9 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo if impl.cfg.PartSize == 0 { impl.cfg.PartSize = DefaultDownloadPartSize } - if conf.MaxBufferLimit > 0 && impl.cfg.PartSize > conf.MaxBufferLimit { - impl.cfg.PartSize = conf.MaxBufferLimit + maxBufferLimit := utils.MaxBufferLimit() + if maxBufferLimit > 0 && impl.cfg.PartSize > maxBufferLimit { + impl.cfg.PartSize = maxBufferLimit } if impl.cfg.HttpClient == nil { impl.cfg.HttpClient = DefaultHttpRequestFunc diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 387bf0360..3a454168c 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -9,7 +9,6 @@ import ( "math" "os" - "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -120,7 +119,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } - if size <= int64(conf.MaxBufferLimit) { + if size <= int64(utils.MaxBufferLimit()) { bufSize := min(size, f.GetSize()) // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 diff --git a/internal/stream/util.go b/internal/stream/util.go index 7b9c4fb0b..1b85a54ed 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -200,7 +200,7 @@ type StreamSectionReader struct { func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { - if bufMaxLen > conf.MaxBufferLimit { + if bufMaxLen > utils.MaxBufferLimit() { _, err := file.CacheFullInTempFile() if err != nil { return nil, err @@ -253,27 +253,6 @@ func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) { } } -// func (ss *StreamSectionReader) GetBytes(sr *SectionReader) ([]byte, error) { -// if sr != nil && ss.bufPool != nil { -// ss.mu.Lock() -// defer ss.mu.Unlock() -// buf := sr.buf -// if buf == nil { -// buf := ss.bufPool.Get().([]byte) -// n, err := io.ReadFull(sr, buf) -// if err == io.EOF && n > 0 { -// err = nil -// } -// if err != nil { -// return nil, err -// } -// sr.buf = buf[:n] -// } -// return sr.buf, nil -// } -// return nil, errors.New("SectionReader is nil") -// } - type SectionReader struct { io.ReadSeeker buf []byte diff --git a/pkg/utils/buf.go b/pkg/utils/buf.go new file mode 100644 index 000000000..d7ac4ba2e --- /dev/null +++ b/pkg/utils/buf.go @@ -0,0 +1,29 @@ +package utils + +import ( + "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" + "github.com/shirou/gopsutil/v4/mem" +) + +var ( + maxBufferLimit = 16 * MB +) + +func updateMaxBufferLimit() (error, error) { + m, err := mem.VirtualMemory() + if err != nil { + return nil, nil + } + maxBufferLimit = int(min(float64(m.Total)*0.05, float64(m.Available)*0.1)) + maxBufferLimit = max(maxBufferLimit, 4*MB) + return nil, nil +} + +func MaxBufferLimit() int { + if conf.Conf.MaxBufferLimit >= 0 { + return conf.Conf.MaxBufferLimit + } + _, _, _ = singleflight.ErrorGroup.Do("updateMaxBufferLimit", updateMaxBufferLimit) + return maxBufferLimit +} From 532135118f82fdeb1a0e7ac18bd6d6f26909407e Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sun, 3 Aug 2025 00:27:29 +0800 Subject: [PATCH 15/20] =?UTF-8?q?MaxBufferLimit=E8=87=AA=E5=8A=A8=E6=8C=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/bootstrap/config.go | 13 ++++++++++++- internal/conf/var.go | 1 + internal/net/request.go | 6 +++--- internal/stream/stream.go | 3 ++- internal/stream/util.go | 2 +- pkg/utils/buf.go | 29 ----------------------------- 6 files changed, 19 insertions(+), 35 deletions(-) delete mode 100644 pkg/utils/buf.go diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 3d801f60a..0a2a536f0 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -12,6 +12,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/caarlos0/env/v9" + "github.com/shirou/gopsutil/v4/mem" log "github.com/sirupsen/logrus" ) @@ -79,7 +80,17 @@ func InitConfig() { if conf.Conf.MaxConcurrency > 0 { net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} } - + if conf.Conf.MaxBufferLimit < 0 { + m, _ := mem.VirtualMemory() + if m != nil { + maxBufferLimit := int(min(float64(m.Total)*0.05, float64(m.Available)*0.1)) + maxBufferLimit = max(maxBufferLimit, 4*utils.MB) + conf.MaxBufferLimit = maxBufferLimit + } else { + conf.MaxBufferLimit = 16 * utils.MB + } + } + log.Infof("max buffer limit: %d", conf.MaxBufferLimit) if !conf.Conf.Force { confFromEnv() } diff --git a/internal/conf/var.go b/internal/conf/var.go index 8af45ca3e..50a7f33d5 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -25,6 +25,7 @@ var PrivacyReg []*regexp.Regexp var ( // StoragesLoaded loaded success if empty StoragesLoaded = false + MaxBufferLimit int ) var ( RawIndexHtml string diff --git a/internal/net/request.go b/internal/net/request.go index d7f619423..5e98f490d 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/utils" @@ -84,9 +85,8 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo if impl.cfg.PartSize == 0 { impl.cfg.PartSize = DefaultDownloadPartSize } - maxBufferLimit := utils.MaxBufferLimit() - if maxBufferLimit > 0 && impl.cfg.PartSize > maxBufferLimit { - impl.cfg.PartSize = maxBufferLimit + if conf.MaxBufferLimit > 0 && impl.cfg.PartSize > conf.MaxBufferLimit { + impl.cfg.PartSize = conf.MaxBufferLimit } if impl.cfg.HttpClient == nil { impl.cfg.HttpClient = DefaultHttpRequestFunc diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 3a454168c..387bf0360 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -9,6 +9,7 @@ import ( "math" "os" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -119,7 +120,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } - if size <= int64(utils.MaxBufferLimit()) { + if size <= int64(conf.MaxBufferLimit) { bufSize := min(size, f.GetSize()) // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 diff --git a/internal/stream/util.go b/internal/stream/util.go index 1b85a54ed..8f794d72c 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -200,7 +200,7 @@ type StreamSectionReader struct { func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { - if bufMaxLen > utils.MaxBufferLimit() { + if bufMaxLen > conf.MaxBufferLimit { _, err := file.CacheFullInTempFile() if err != nil { return nil, err diff --git a/pkg/utils/buf.go b/pkg/utils/buf.go deleted file mode 100644 index d7ac4ba2e..000000000 --- a/pkg/utils/buf.go +++ /dev/null @@ -1,29 +0,0 @@ -package utils - -import ( - "github.com/OpenListTeam/OpenList/v4/internal/conf" - "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" - "github.com/shirou/gopsutil/v4/mem" -) - -var ( - maxBufferLimit = 16 * MB -) - -func updateMaxBufferLimit() (error, error) { - m, err := mem.VirtualMemory() - if err != nil { - return nil, nil - } - maxBufferLimit = int(min(float64(m.Total)*0.05, float64(m.Available)*0.1)) - maxBufferLimit = max(maxBufferLimit, 4*MB) - return nil, nil -} - -func MaxBufferLimit() int { - if conf.Conf.MaxBufferLimit >= 0 { - return conf.Conf.MaxBufferLimit - } - _, _, _ = singleflight.ErrorGroup.Do("updateMaxBufferLimit", updateMaxBufferLimit) - return maxBufferLimit -} From e1a5c4fcd203f006210894ffcecd651c4e81373a Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 4 Aug 2025 20:18:11 +0800 Subject: [PATCH 16/20] 189pc --- drivers/189pc/utils.go | 80 ++++++++++++++++++++++++++---------- internal/bootstrap/config.go | 4 +- 2 files changed, 60 insertions(+), 24 deletions(-) diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index 68142ad94..e2b09770c 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "encoding/xml" "fmt" + "hash" "io" "net/http" "net/http/cookiejar" @@ -16,6 +17,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" "github.com/OpenListTeam/OpenList/v4/drivers/base" @@ -472,7 +474,7 @@ func (y *Cloud189PC) refreshSession() (err error) { // 无法上传大小为0的文件 func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { size := file.GetSize() - sliceSize := partSize(size) + sliceSize := min(size, partSize(size)) params := Params{ "parentFolderId": dstDir.GetID(), @@ -499,8 +501,12 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo if err != nil { return nil, err } + ss, err := stream.NewStreamSectionReader(file, int(sliceSize)) + if err != nil { + return nil, err + } - threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread, + threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, y.uploadThread, retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) @@ -513,31 +519,59 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo if lastPartSize == 0 { lastPartSize = sliceSize } - fileMd5 := utils.MD5.NewFunc() - silceMd5 := utils.MD5.NewFunc() + silceMd5Hexs := make([]string, 0, count) - teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)) - byteSize := sliceSize + silceMd5 := utils.MD5.NewFunc() + var writers io.Writer = silceMd5 + + fileMd5Hex := file.GetHash().GetHash(utils.MD5) + var fileMd5 hash.Hash + if len(fileMd5Hex) != utils.MD5.Width { + fileMd5 = utils.MD5.NewFunc() + writers = io.MultiWriter(silceMd5, fileMd5) + } + mu := &sync.Mutex{} for i := 1; i <= count; i++ { if utils.IsCanceled(upCtx) { break } + offset := int64((i)-1) * sliceSize + size := sliceSize if i == count { - byteSize = lastPartSize - } - byteData := make([]byte, byteSize) - // 读取块 - silceMd5.Reset() - if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil { - return nil, err + size = lastPartSize } + partInfo := "" + var reader *stream.SectionReader + var rateLimitedRd io.Reader + generateSectionReader := func() error { + mu.Lock() + defer mu.Unlock() + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, size) + if err != nil { + return err + } + silceMd5.Reset() + w, _ := utils.CopyWithBuffer(writers, reader) + if w != size { + return fmt.Errorf("stream read did not get all data, expect =%d ,actual =%d", size, w) + } + // 计算块md5并进行hex和base64编码 + md5Bytes := silceMd5.Sum(nil) + silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) + partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) - // 计算块md5并进行hex和base64编码 - md5Bytes := silceMd5.Sum(nil) - silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) - partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) - - threadG.Go(func(ctx context.Context) error { + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + return nil + } + threadG.GoWithResult(func(ctx context.Context) error { + err := generateSectionReader() + if err != nil { + return err + } + reader.Seek(0, io.SeekStart) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) if err != nil { return err @@ -546,19 +580,23 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // step.4 上传切片 uploadUrl := uploadUrls[0] _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, - driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily) + driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily) if err != nil { return err } up(float64(threadG.Success()) * 100 / float64(count)) return nil + }, func(err error) { + ss.RecycleSectionReader(reader) }) } if err = threadG.Wait(); err != nil { return nil, err } - fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) + if fileMd5 != nil { + fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) + } sliceMd5Hex := fileMd5Hex if file.GetSize() > sliceSize { sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 0a2a536f0..bcf75a320 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -83,9 +83,7 @@ func InitConfig() { if conf.Conf.MaxBufferLimit < 0 { m, _ := mem.VirtualMemory() if m != nil { - maxBufferLimit := int(min(float64(m.Total)*0.05, float64(m.Available)*0.1)) - maxBufferLimit = max(maxBufferLimit, 4*utils.MB) - conf.MaxBufferLimit = maxBufferLimit + conf.MaxBufferLimit = max(int(float64(m.Total)*0.05), 4*utils.MB) } else { conf.MaxBufferLimit = 16 * utils.MB } From ae434c1ab55ea6697d812bc20136c8d01369deac Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 4 Aug 2025 22:35:47 +0800 Subject: [PATCH 17/20] =?UTF-8?q?errorgroup=E6=B7=BB=E5=8A=A0Lifecycle?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drivers/123/upload.go | 97 ++++++++++++++++++++------------------ drivers/123_open/upload.go | 60 ++++++++++++----------- drivers/189pc/utils.go | 87 +++++++++++++++++----------------- drivers/doubao/util.go | 52 ++++++++++++-------- internal/stream/util.go | 8 +--- pkg/errgroup/errgroup.go | 55 ++++++++++++++++----- 6 files changed, 203 insertions(+), 156 deletions(-) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index a8c718674..b15e55cf2 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -104,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi retry.DelayType(retry.BackOffDelay)) for i := 1; i <= chunkCount; i += batchSize { if utils.IsCanceled(uploadCtx) { - return uploadCtx.Err() + break } start := i end := min(i+batchSize, chunkCount+1) @@ -125,60 +125,65 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi } var reader *stream.SectionReader var rateLimitedRd io.Reader - threadG.GoWithResult(func(ctx context.Context) error { - if reader == nil { - var err error - reader, err = ss.GetSectionReader(offset, curSize) - if err != nil { - return err - } - rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) - } - reader.Seek(0, io.SeekStart) - uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] - if uploadUrl == "" { - return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) - } - reader.Seek(0, io.SeekStart) - req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd) - if err != nil { - return err - } - req.ContentLength = curSize - //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode == http.StatusForbidden { - _, err, _ := uploadG.Do(key, func() (*S3PreSignedURLs, error) { - newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) + threadG.GoWithLifecycle(errgroup.Lifecycle{ + Before: func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, curSize) if err != nil { - return nil, err + return err } - s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls - return newS3PreSignedUrls, nil - }) + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + return nil + }, + Do: func(ctx context.Context) error { + reader.Seek(0, io.SeekStart) + uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] + if uploadUrl == "" { + return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) + } + reader.Seek(0, io.SeekStart) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadUrl, rateLimitedRd) if err != nil { return err } - return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode) - } - if res.StatusCode != http.StatusOK { - body, err := io.ReadAll(res.Body) + req.ContentLength = curSize + //req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10)) + res, err := base.HttpClient.Do(req) if err != nil { return err } - return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body) - } - progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount) - up(progress) - return nil - }, func(err error) { - ss.RecycleSectionReader(reader) + defer res.Body.Close() + if res.StatusCode == http.StatusForbidden { + _, err, _ := uploadG.Do(key, func() (*S3PreSignedURLs, error) { + newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) + if err != nil { + return nil, err + } + s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls + return newS3PreSignedUrls, nil + }) + if err != nil { + return err + } + return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode) + } + if res.StatusCode != http.StatusOK { + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body) + } + progress := 10.0 + 85.0*float64(threadG.Success())/float64(chunkCount) + up(progress) + return nil + }, + After: func(err error) { + ss.RecycleSectionReader(reader) + }, }) - // err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, reader, curSize, false, getS3UploadUrl) } } if err := threadG.Wait(); err != nil { diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 9bc9fd091..3f2ec70c6 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -92,7 +92,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes } for partIndex := int64(0); partIndex < uploadNums; partIndex++ { if utils.IsCanceled(uploadCtx) { - return uploadCtx.Err() + break } partIndex := partIndex partNumber := partIndex + 1 // 分片号从1开始 @@ -100,38 +100,44 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes size := min(chunkSize, size-offset) var reader *stream.SectionReader var rateLimitedRd io.Reader - threadG.GoWithResult(func(ctx context.Context) error { - if reader == nil { - var err error - reader, err = ss.GetSectionReader(offset, size) + threadG.GoWithLifecycle(errgroup.Lifecycle{ + Before: func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, size) + if err != nil { + return err + } + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + return nil + }, + Do: func(ctx context.Context) error { + reader.Seek(0, io.SeekStart) + uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber) if err != nil { return err } - rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) - } - reader.Seek(0, io.SeekStart) - uploadPartUrl, err := d.url(createResp.Data.PreuploadID, partNumber) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartUrl, rateLimitedRd) - if err != nil { - return err - } - req.ContentLength = size + req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartUrl, rateLimitedRd) + if err != nil { + return err + } + req.ContentLength = size - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } - _ = res.Body.Close() + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + _ = res.Body.Close() - progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums) - up(progress) - return nil - }, func(err error) { - ss.RecycleSectionReader(reader) + progress := 10.0 + 85.0*float64(threadG.Success())/float64(uploadNums) + up(progress) + return nil + }, + After: func(err error) { + ss.RecycleSectionReader(reader) + }, }) } diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index e2b09770c..00fbe2975 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -17,7 +17,6 @@ import ( "sort" "strconv" "strings" - "sync" "time" "github.com/OpenListTeam/OpenList/v4/drivers/base" @@ -530,7 +529,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo fileMd5 = utils.MD5.NewFunc() writers = io.MultiWriter(silceMd5, fileMd5) } - mu := &sync.Mutex{} for i := 1; i <= count; i++ { if utils.IsCanceled(upCtx) { break @@ -543,52 +541,50 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo partInfo := "" var reader *stream.SectionReader var rateLimitedRd io.Reader - generateSectionReader := func() error { - mu.Lock() - defer mu.Unlock() - if reader == nil { - var err error - reader, err = ss.GetSectionReader(offset, size) + threadG.GoWithLifecycle(errgroup.Lifecycle{ + Before: func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, size) + if err != nil { + return err + } + silceMd5.Reset() + w, _ := utils.CopyWithBuffer(writers, reader) + if w != size { + return fmt.Errorf("can't read data, expected=%d, got=%d", size, w) + } + // 计算块md5并进行hex和base64编码 + md5Bytes := silceMd5.Sum(nil) + silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) + partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) + + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + return nil + }, + Do: func(ctx context.Context) error { + reader.Seek(0, io.SeekStart) + uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) if err != nil { return err } - silceMd5.Reset() - w, _ := utils.CopyWithBuffer(writers, reader) - if w != size { - return fmt.Errorf("stream read did not get all data, expect =%d ,actual =%d", size, w) - } - // 计算块md5并进行hex和base64编码 - md5Bytes := silceMd5.Sum(nil) - silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes))) - partInfo = fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) - - rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) - } - return nil - } - threadG.GoWithResult(func(ctx context.Context) error { - err := generateSectionReader() - if err != nil { - return err - } - reader.Seek(0, io.SeekStart) - uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) - if err != nil { - return err - } - // step.4 上传切片 - uploadUrl := uploadUrls[0] - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, - driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily) - if err != nil { - return err - } - up(float64(threadG.Success()) * 100 / float64(count)) - return nil - }, func(err error) { - ss.RecycleSectionReader(reader) - }) + // step.4 上传切片 + uploadUrl := uploadUrls[0] + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, + driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily) + if err != nil { + return err + } + up(float64(threadG.Success()) * 100 / float64(count)) + return nil + }, + After: func(err error) { + ss.RecycleSectionReader(reader) + }, + }, + ) } if err = threadG.Wait(); err != nil { return nil, err @@ -778,7 +774,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode } // step.4 上传切片 - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) + rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily) if err != nil { return err } diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go index 974b77f01..3db2836e0 100644 --- a/drivers/doubao/util.go +++ b/drivers/doubao/util.go @@ -551,13 +551,16 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi up(10.0) // 更新进度 // 设置并行上传 thread := min(int(totalParts), d.uploadThread) - threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, thread, - retry.Attempts(1), + threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread, + retry.Attempts(MaxRetryAttempts), retry.Delay(time.Second), - retry.DelayType(retry.BackOffDelay)) + retry.DelayType(retry.BackOffDelay), + retry.MaxJitter(200*time.Millisecond), + ) var partsMutex sync.Mutex // 并行上传所有分片 + hash := crc32.NewIEEE() for partIndex := range totalParts { if utils.IsCanceled(uploadCtx) { break @@ -570,21 +573,30 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi if partIndex == totalParts-1 { size = fileSize - offset } - - threadG.Go(func(ctx context.Context) error { - reader, err := ss.GetSectionReader(offset, size) - defer ss.RecycleSectionReader(reader) - if err != nil { - return err - } - hash := crc32.NewIEEE() - utils.CopyWithBuffer(hash, reader) - crc32Value := hex.EncodeToString(hash.Sum(nil)) - rateLimitedRd := driver.NewLimitedUploadStream(uploadCtx, reader) - return d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { - // 使用_retryOperation上传分片 + var reader *stream.SectionReader + var rateLimitedRd io.Reader + crc32Value := "" + threadG.GoWithLifecycle(errgroup.Lifecycle{ + Before: func(ctx context.Context) error { + if reader == nil { + var err error + reader, err = ss.GetSectionReader(offset, size) + if err != nil { + return err + } + hash.Reset() + w, _ := utils.CopyWithBuffer(hash, reader) + if w != size { + return fmt.Errorf("can't read data, expected=%d, got=%d", size, w) + } + crc32Value = hex.EncodeToString(hash.Sum(nil)) + rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader) + } + return nil + }, + Do: func(ctx context.Context) error { reader.Seek(0, io.SeekStart) - req, err := http.NewRequestWithContext(uploadCtx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s?uploadid=%s&part_number=%d&phase=transfer", uploadUrl, uploadID, partNumber), rateLimitedRd) if err != nil { return err } @@ -624,8 +636,10 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) up(math.Min(progress, 95.0)) return nil - - }) + }, + After: func(err error) { + ss.RecycleSectionReader(reader) + }, }) } diff --git a/internal/stream/util.go b/internal/stream/util.go index 8f794d72c..c53e783ed 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -193,7 +193,6 @@ func CacheFullInTempFileAndHash(stream model.FileStreamer, up model.UpdateProgre type StreamSectionReader struct { file model.FileStreamer off int64 - mu sync.Mutex bufPool *sync.Pool } @@ -216,9 +215,8 @@ func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSect return ss, nil } +// 线程不安全 func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) { - ss.mu.Lock() - defer ss.mu.Unlock() var cache io.ReaderAt = ss.file.GetFile() var buf []byte if cache == nil { @@ -232,7 +230,7 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead return nil, err } if int64(n) != length { - return nil, fmt.Errorf("stream read did not get all data, expect =%d ,actual =%d", length, n) + return nil, fmt.Errorf("can't read data, expected=%d, got=%d", length, n) } ss.off += int64(n) off = 0 @@ -243,8 +241,6 @@ func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionRead func (ss *StreamSectionReader) RecycleSectionReader(sr *SectionReader) { if sr != nil { - ss.mu.Lock() - defer ss.mu.Unlock() if sr.buf != nil { ss.bufPool.Put(sr.buf[0:cap(sr.buf)]) sr.buf = nil diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go index 9544edd9e..daf1b3156 100644 --- a/pkg/errgroup/errgroup.go +++ b/pkg/errgroup/errgroup.go @@ -28,10 +28,10 @@ func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Opti return (&Group{cancel: cancel, ctx: ctx, opts: append(retryOpts, retry.Context(ctx))}).SetLimit(limit), ctx } +// OrderedGroup func NewOrderedGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) { group, ctx := NewGroupWithContext(ctx, limit, retryOpts...) group.startChan = make(chan token, 1) - group.startChan <- token{} return group, ctx } @@ -48,30 +48,59 @@ func (g *Group) Wait() error { return context.Cause(g.ctx) } -func (g *Group) Go(f func(ctx context.Context) error) { - g.GoWithResult(f, nil) +func (g *Group) Go(do func(ctx context.Context) error) { + g.GoWithLifecycle(Lifecycle{Do: do}) } -func (g *Group) GoWithResult(f func(ctx context.Context) error, result func(err error)) { +type Lifecycle struct { + // Before在OrderedGroup是线程安全的 + Before func(ctx context.Context) error + // 如果Before返回err就不调用Do + Do func(ctx context.Context) error + // 最后调用After + After func(err error) +} + +func (g *Group) GoWithLifecycle(lifecycle Lifecycle) { if g.startChan != nil { - <-g.startChan + select { + case <-g.ctx.Done(): + return + case g.startChan <- token{}: + } } + if g.sem != nil { - g.sem <- token{} + select { + case <-g.ctx.Done(): + return + case g.sem <- token{}: + } } g.wg.Add(1) go func() { - if g.startChan != nil { - g.startChan <- token{} - } defer g.done() - err := retry.Do(func() error { return f(g.ctx) }, g.opts...) - if result != nil { - result(err) + var err error + if lifecycle.Before != nil { + err = lifecycle.Before(g.ctx) + } + if err == nil { + if g.startChan != nil { + <-g.startChan + } + err = retry.Do(func() error { return lifecycle.Do(g.ctx) }, g.opts...) + } + if lifecycle.After != nil { + lifecycle.After(err) } if err != nil { - g.cancel(err) + select { + case <-g.ctx.Done(): + return + default: + g.cancel(err) + } } }() From db5ba60ad57467c94d8a1048fc8e49e0d87f83dc Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 4 Aug 2025 23:46:31 +0800 Subject: [PATCH 18/20] =?UTF-8?q?=E6=9F=A5=E7=BC=BA=E8=A1=A5=E6=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drivers/115_open/upload.go | 5 ++--- drivers/123/upload.go | 11 ++++------- drivers/aliyundrive_open/upload.go | 1 + drivers/doubao/util.go | 17 ++++++++++------- drivers/ftp/util.go | 4 ++-- drivers/google_drive/util.go | 4 ++-- drivers/sftp/util.go | 4 ++-- drivers/smb/util.go | 4 ++-- internal/stream/util.go | 3 ++- pkg/singleflight/var.go | 2 +- 10 files changed, 28 insertions(+), 27 deletions(-) diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go index e1af7c80d..9bd1f9207 100644 --- a/drivers/115_open/upload.go +++ b/drivers/115_open/upload.go @@ -70,9 +70,6 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp // } func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { - fileSize := stream.GetSize() - chunkSize := calPartSize(fileSize) - ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) if err != nil { return err @@ -87,6 +84,8 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, return err } + fileSize := stream.GetSize() + chunkSize := calPartSize(fileSize) partNum := (stream.GetSize() + chunkSize - 1) / chunkSize parts := make([]oss.UploadPart, partNum) offset := int64(0) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index b15e55cf2..e44ce2eec 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -76,7 +76,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { // fetch s3 pre signed urls size := file.GetSize() - chunkSize := min(size, 16*utils.MB) + chunkSize := int64(16 * utils.MB) chunkCount := 1 if size > chunkSize { chunkCount = int((size + chunkSize - 1) / chunkSize) @@ -109,14 +109,13 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi start := i end := min(i+batchSize, chunkCount+1) s3PreSignedUrls, err := getS3UploadUrl(uploadCtx, upReq, start, end) - key := fmt.Sprintf("%p", s3PreSignedUrls) if err != nil { return err } // upload each chunk for cur := start; cur < end; cur++ { if utils.IsCanceled(uploadCtx) { - return uploadCtx.Err() + break } offset := int64(cur-1) * chunkSize curSize := chunkSize @@ -156,13 +155,13 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi } defer res.Body.Close() if res.StatusCode == http.StatusForbidden { - _, err, _ := uploadG.Do(key, func() (*S3PreSignedURLs, error) { + singleflight.AnyGroup.Do(fmt.Sprintf("Pan123.newUpload_%p", threadG), func() (any, error) { newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end) if err != nil { return nil, err } s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls - return newS3PreSignedUrls, nil + return nil, nil }) if err != nil { return err @@ -193,5 +192,3 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi // complete s3 upload return d.completeS3(ctx, upReq, file, chunkCount > 1) } - -var uploadG singleflight.Group[*S3PreSignedURLs] diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index 36ebd3bc5..988527069 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -256,6 +256,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m retry.Attempts(3), retry.DelayType(retry.BackOffDelay), retry.Delay(time.Second)) + ss.RecycleSectionReader(rd) if err != nil { return nil, err } diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go index 3db2836e0..39d551346 100644 --- a/drivers/doubao/util.go +++ b/drivers/doubao/util.go @@ -454,17 +454,22 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model. return nil, err } reader, err := ss.GetSectionReader(0, file.GetSize()) + if err != nil { + return nil, err + } // 计算CRC32 crc32Hash := crc32.NewIEEE() - utils.CopyWithBuffer(crc32Hash, reader) + w, _ := utils.CopyWithBuffer(crc32Hash, reader) + if w != file.GetSize() { + return nil, fmt.Errorf("can't read data, expected=%d, got=%d", file.GetSize(), w) + } crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) // 构建请求路径 uploadNode := config.InnerUploadAddress.UploadNodes[0] storeInfo := uploadNode.StoreInfos[0] uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) - var uploadResp *UploadResp rateLimitedRd := driver.NewLimitedUploadStream(ctx, reader) err = d._retryOperation("Upload", func() error { reader.Seek(0, io.SeekStart) @@ -496,13 +501,11 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model. } else if resp.Data.Crc32 != crc32Value { return fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, resp.Data.Crc32) } - return nil - }) - - if uploadResp.Code != 2000 { - return nil, fmt.Errorf("upload failed: %s", uploadResp.Message) + ss.RecycleSectionReader(reader) + if err != nil { + return nil, err } uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType) diff --git a/drivers/ftp/util.go b/drivers/ftp/util.go index 9e050b4b8..c81803d6c 100644 --- a/drivers/ftp/util.go +++ b/drivers/ftp/util.go @@ -15,8 +15,8 @@ import ( // do others that not defined in Driver interface func (d *FTP) login() error { - err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (error, error) { - return d._login(), nil + _, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("FTP.login:%p", d), func() (any, error) { + return nil, d._login() }) return err } diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 8bdd68784..ff2191361 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -274,8 +274,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, limitedReader := driver.NewLimitedUploadStream(ctx, reader) err = retry.Do(func() error { reader.Seek(0, io.SeekStart) - req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, - limitedReader) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, limitedReader) if err != nil { return err } @@ -306,6 +305,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, retry.Attempts(3), retry.DelayType(retry.BackOffDelay), retry.Delay(time.Second)) + ss.RecycleSectionReader(reader) if err != nil { return err } diff --git a/drivers/sftp/util.go b/drivers/sftp/util.go index 5c47c532c..293df8fa1 100644 --- a/drivers/sftp/util.go +++ b/drivers/sftp/util.go @@ -13,8 +13,8 @@ import ( // do others that not defined in Driver interface func (d *SFTP) initClient() error { - err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (error, error) { - return d._initClient(), nil + _, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SFTP.initClient:%p", d), func() (any, error) { + return nil, d._initClient() }) return err } diff --git a/drivers/smb/util.go b/drivers/smb/util.go index 166e2ae37..3e40f8138 100644 --- a/drivers/smb/util.go +++ b/drivers/smb/util.go @@ -28,8 +28,8 @@ func (d *SMB) getLastConnTime() time.Time { } func (d *SMB) initFS() error { - err, _, _ := singleflight.ErrorGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (error, error) { - return d._initFS(), nil + _, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (any, error) { + return nil, d._initFS() }) return err } diff --git a/internal/stream/util.go b/internal/stream/util.go index c53e783ed..b02222652 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -199,6 +199,7 @@ type StreamSectionReader struct { func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { + bufMaxLen = min(bufMaxLen, int(file.GetSize())) if bufMaxLen > conf.MaxBufferLimit { _, err := file.CacheFullInTempFile() if err != nil { @@ -207,7 +208,7 @@ func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSect } else { ss.bufPool = &sync.Pool{ New: func() any { - return make([]byte, bufMaxLen) // Two times of size in io package + return make([]byte, bufMaxLen) }, } } diff --git a/pkg/singleflight/var.go b/pkg/singleflight/var.go index 41c97a2e2..a92288d16 100644 --- a/pkg/singleflight/var.go +++ b/pkg/singleflight/var.go @@ -1,3 +1,3 @@ package singleflight -var ErrorGroup Group[error] +var AnyGroup Group[any] From 605c7f575d1716287affe682592531d9747eea8e Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Tue, 5 Aug 2025 00:11:02 +0800 Subject: [PATCH 19/20] =?UTF-8?q?Conf.MaxBufferLimit=E5=8D=95=E4=BD=8D?= =?UTF-8?q?=E4=B8=BAMB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/bootstrap/config.go | 3 +++ internal/conf/config.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index bcf75a320..3980deb83 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -84,9 +84,12 @@ func InitConfig() { m, _ := mem.VirtualMemory() if m != nil { conf.MaxBufferLimit = max(int(float64(m.Total)*0.05), 4*utils.MB) + conf.MaxBufferLimit -= conf.MaxBufferLimit % utils.MB } else { conf.MaxBufferLimit = 16 * utils.MB } + } else { + conf.MaxBufferLimit = conf.Conf.MaxBufferLimit * utils.MB } log.Infof("max buffer limit: %d", conf.MaxBufferLimit) if !conf.Conf.Force { diff --git a/internal/conf/config.go b/internal/conf/config.go index 5d29e54fb..72a8ee722 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -119,7 +119,7 @@ type Config struct { DistDir string `json:"dist_dir"` Log LogConfig `json:"log" envPrefix:"LOG_"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"` - MaxBufferLimit int `json:"max_buffer_limit" env:"MAX_BUFFER_LIMIT"` + MaxBufferLimit int `json:"max_buffer_limitMB" env:"MAX_BUFFER_LIMIT_MB"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"` From eb0f072cc4e445f9d768c67570c14b79933372ec Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Tue, 5 Aug 2025 21:40:25 +0800 Subject: [PATCH 20/20] =?UTF-8?q?=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/stream/util.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/stream/util.go b/internal/stream/util.go index b02222652..77b238025 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -196,11 +196,11 @@ type StreamSectionReader struct { bufPool *sync.Pool } -func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSectionReader, error) { +func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int) (*StreamSectionReader, error) { ss := &StreamSectionReader{file: file} if file.GetFile() == nil { - bufMaxLen = min(bufMaxLen, int(file.GetSize())) - if bufMaxLen > conf.MaxBufferLimit { + maxBufferSize = min(maxBufferSize, int(file.GetSize())) + if maxBufferSize > conf.MaxBufferLimit { _, err := file.CacheFullInTempFile() if err != nil { return nil, err @@ -208,7 +208,7 @@ func NewStreamSectionReader(file model.FileStreamer, bufMaxLen int) (*StreamSect } else { ss.bufPool = &sync.Pool{ New: func() any { - return make([]byte, bufMaxLen) + return make([]byte, maxBufferSize) }, } }