From 88805722c04d7d7302ce916b842500a1dab7c364 Mon Sep 17 00:00:00 2001 From: "Mako (XSpy)" <127767602+x-spy@users.noreply.github.com> Date: Sat, 13 Dec 2025 19:45:02 +0800 Subject: [PATCH 1/7] feat(driver): add wps drive support --- drivers/all.go | 1 + drivers/wps/driver.go | 78 +++++ drivers/wps/meta.go | 31 ++ drivers/wps/types.go | 94 ++++++ drivers/wps/util.go | 647 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 851 insertions(+) create mode 100644 drivers/wps/driver.go create mode 100644 drivers/wps/meta.go create mode 100644 drivers/wps/types.go create mode 100644 drivers/wps/util.go diff --git a/drivers/all.go b/drivers/all.go index 9181c38a9..15fbf2b96 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -78,6 +78,7 @@ import ( _ "github.com/OpenListTeam/OpenList/v4/drivers/webdav" _ "github.com/OpenListTeam/OpenList/v4/drivers/weiyun" _ "github.com/OpenListTeam/OpenList/v4/drivers/wopan" + _ "github.com/OpenListTeam/OpenList/v4/drivers/wps" _ "github.com/OpenListTeam/OpenList/v4/drivers/yandex_disk" ) diff --git a/drivers/wps/driver.go b/drivers/wps/driver.go new file mode 100644 index 000000000..0450bd34c --- /dev/null +++ b/drivers/wps/driver.go @@ -0,0 +1,78 @@ +package wps + +import ( + "context" + "fmt" + + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/model" +) + +type Wps struct { + model.Storage + Addition + companyID string +} + +func (d *Wps) Config() driver.Config { + return config +} + +func (d *Wps) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Wps) Init(ctx context.Context) error { + if d.Cookie == "" { + return fmt.Errorf("cookie is empty") + } + return d.ensureCompanyID(ctx) +} + +func (d *Wps) Drop(ctx context.Context) error { + return nil +} + +func (d *Wps) List(ctx context.Context, dir model.Obj, _ model.ListArgs) ([]model.Obj, error) { + basePath := "/" + if dir != nil { + if p := dir.GetPath(); p != "" { + basePath = p + } + } + return d.list(ctx, basePath) +} + +func (d *Wps) Link(ctx context.Context, file model.Obj, _ model.LinkArgs) (*model.Link, error) { + if file == nil { + return nil, errs.NotSupport + } + return d.link(ctx, file.GetPath()) +} + +func (d *Wps) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + return d.makeDir(ctx, parentDir, dirName) +} + +func (d *Wps) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.move(ctx, srcObj, dstDir) +} + +func (d *Wps) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + return d.rename(ctx, srcObj, newName) +} + +func (d *Wps) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.copy(ctx, srcObj, dstDir) +} + +func (d *Wps) Remove(ctx context.Context, obj model.Obj) error { + return d.remove(ctx, obj) +} + +func (d *Wps) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + return d.put(ctx, dstDir, file, up) +} + +var _ driver.Driver = (*Wps)(nil) diff --git a/drivers/wps/meta.go b/drivers/wps/meta.go new file mode 100644 index 000000000..1501e5582 --- /dev/null +++ b/drivers/wps/meta.go @@ -0,0 +1,31 @@ +package wps + +import ( + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/op" +) + +type Addition struct { + driver.RootPath + Cookie string `json:"cookie" required:"true" type:"text"` + Mode string `json:"mode" type:"select" options:"商业,个人" default:"商业"` +} + +var config = driver.Config{ + Name: "WPS", + LocalSort: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "/", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Wps{} + }) +} diff --git a/drivers/wps/types.go b/drivers/wps/types.go new file mode 100644 index 000000000..f057d5257 --- /dev/null +++ b/drivers/wps/types.go @@ -0,0 +1,94 @@ +package wps + +import ( + "time" + + "github.com/OpenListTeam/OpenList/v4/pkg/utils" +) + +type workspaceResp struct { + Companies []struct { + ID int64 `json:"id"` + } `json:"companies"` +} + +type Group struct { + CompanyID int64 `json:"company_id"` + GroupID int64 `json:"group_id"` + Name string `json:"name"` + Type string `json:"type"` +} + +type groupsResp struct { + Groups []Group `json:"groups"` +} + +type filePerms struct { + Download int `json:"download"` +} + +type FileInfo struct { + GroupID int64 `json:"groupid"` + ParentID int64 `json:"parentid"` + Name string `json:"fname"` + Size int64 `json:"fsize"` + Type string `json:"ftype"` + Ctime int64 `json:"ctime"` + Mtime int64 `json:"mtime"` + ID int64 `json:"id"` + Deleted bool `json:"deleted"` + FilePerms filePerms `json:"file_perms_acl"` +} + +type filesResp struct { + Files []FileInfo `json:"files"` +} + +type downloadResp struct { + URL string `json:"url"` + Result string `json:"result"` +} + +type Obj struct { + id string + name string + size int64 + ctime time.Time + mtime time.Time + isDir bool + hash utils.HashInfo + path string + canDownload bool +} + +func (o *Obj) GetSize() int64 { + return o.size +} + +func (o *Obj) GetName() string { + return o.name +} + +func (o *Obj) ModTime() time.Time { + return o.mtime +} + +func (o *Obj) CreateTime() time.Time { + return o.ctime +} + +func (o *Obj) IsDir() bool { + return o.isDir +} + +func (o *Obj) GetHash() utils.HashInfo { + return o.hash +} + +func (o *Obj) GetID() string { + return o.id +} + +func (o *Obj) GetPath() string { + return o.path +} diff --git a/drivers/wps/util.go b/drivers/wps/util.go new file mode 100644 index 000000000..dff27b495 --- /dev/null +++ b/drivers/wps/util.go @@ -0,0 +1,647 @@ +package wps + +import ( + "context" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/OpenListTeam/OpenList/v4/drivers/base" + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/go-resty/resty/v2" +) + +const endpoint = "https://365.kdocs.cn" +const personalEndpoint = "https://drive.wps.cn" + +type resolvedNode struct { + kind string + group Group + file *FileInfo +} + +type apiResult struct { + Result string `json:"result"` + Msg string `json:"msg"` +} + +type uploadCreateUpdateResp struct { + apiResult + Method string `json:"method"` + URL string `json:"url"` + Request struct { + Headers map[string]string `json:"headers"` + } `json:"request"` +} + +type uploadPutResp struct { + NewFilename string `json:"newfilename"` +} + +type personalGroupsResp struct { + apiResult + Groups []struct { + ID int64 `json:"id"` + Name string `json:"name"` + } `json:"groups"` +} + +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +func (d *Wps) isPersonal() bool { + return strings.TrimSpace(d.Mode) == "个人" +} + +func (d *Wps) driveHost() string { + if d.isPersonal() { + return personalEndpoint + } + return endpoint +} + +func (d *Wps) drivePrefix() string { + if d.isPersonal() { + return "" + } + return "/3rd/drive" +} + +func (d *Wps) driveURL(path string) string { + return d.driveHost() + d.drivePrefix() + path +} + +func (d *Wps) origin() string { + return d.driveHost() + "/" +} + +func (d *Wps) canDownload(f *FileInfo) bool { + if f == nil || f.Type == "folder" { + return false + } + if f.FilePerms.Download != 0 { + return true + } + return d.isPersonal() +} + +func (d *Wps) request(ctx context.Context) *resty.Request { + return base.RestyClient.R(). + SetHeader("Cookie", d.Cookie). + SetHeader("Accept", "application/json"). + SetContext(ctx) +} + +func (d *Wps) jsonRequest(ctx context.Context) *resty.Request { + return d.request(ctx). + SetHeader("Content-Type", "application/json"). + SetHeader("Origin", d.origin()) +} + +func checkAPI(resp *resty.Response, result apiResult) error { + if result.Result != "" && result.Result != "ok" { + if result.Msg == "" { + result.Msg = "unknown error" + } + return fmt.Errorf("%s: %s", result.Result, result.Msg) + } + if resp != nil && resp.IsError() { + if result.Msg != "" { + return fmt.Errorf("%s", result.Msg) + } + return fmt.Errorf("http error: %d", resp.StatusCode()) + } + return nil +} + +func (d *Wps) ensureCompanyID(ctx context.Context) error { + if d.isPersonal() { + return nil + } + if d.companyID != "" { + return nil + } + var resp workspaceResp + r, err := d.request(ctx).SetResult(&resp).SetError(&resp).Get(endpoint + "/3rd/plussvr/compose/v1/users/self/workspaces?fields=name&comp_status=active") + if err != nil { + return err + } + if r != nil && r.IsError() { + return fmt.Errorf("http error: %d", r.StatusCode()) + } + if len(resp.Companies) == 0 { + return fmt.Errorf("no company id") + } + d.companyID = strconv.FormatInt(resp.Companies[0].ID, 10) + return nil +} + +func (d *Wps) getGroups(ctx context.Context) ([]Group, error) { + if d.isPersonal() { + var resp personalGroupsResp + r, err := d.request(ctx).SetResult(&resp).SetError(&resp).Get(d.driveURL("/api/v3/groups")) + if err != nil { + return nil, err + } + if err := checkAPI(r, resp.apiResult); err != nil { + return nil, err + } + res := make([]Group, 0, len(resp.Groups)) + for _, g := range resp.Groups { + res = append(res, Group{GroupID: g.ID, Name: g.Name}) + } + return res, nil + } + if err := d.ensureCompanyID(ctx); err != nil { + return nil, err + } + var resp groupsResp + url := fmt.Sprintf("%s/3rd/plus/groups/v1/companies/%s/users/self/groups/private", endpoint, d.companyID) + r, err := d.request(ctx).SetResult(&resp).SetError(&resp).Get(url) + if err != nil { + return nil, err + } + if r != nil && r.IsError() { + return nil, fmt.Errorf("http error: %d", r.StatusCode()) + } + return resp.Groups, nil +} + +func (d *Wps) getFiles(ctx context.Context, groupID, parentID int64) ([]FileInfo, error) { + var resp filesResp + url := fmt.Sprintf("%s/api/v5/groups/%d/files", d.driveHost()+d.drivePrefix(), groupID) + r, err := d.request(ctx). + SetQueryParam("parentid", strconv.FormatInt(parentID, 10)). + SetResult(&resp). + SetError(&resp). + Get(url) + if err != nil { + return nil, err + } + if r != nil && r.IsError() { + return nil, fmt.Errorf("http error: %d", r.StatusCode()) + } + return resp.Files, nil +} + +func parseTime(v int64) time.Time { + if v <= 0 { + return time.Time{} + } + return time.Unix(v, 0) +} + +func joinPath(basePath, name string) string { + if basePath == "" || basePath == "/" { + return "/" + name + } + return strings.TrimRight(basePath, "/") + "/" + name +} + +func (d *Wps) resolvePath(ctx context.Context, path string) (*resolvedNode, error) { + clean := strings.TrimSpace(path) + if clean == "" { + clean = "/" + } + clean = strings.Trim(clean, "/") + if clean == "" { + return &resolvedNode{kind: "root"}, nil + } + segs := strings.Split(clean, "/") + groups, err := d.getGroups(ctx) + if err != nil { + return nil, err + } + var grp *Group + for i := range groups { + if groups[i].Name == segs[0] { + grp = &groups[i] + break + } + } + if grp == nil { + return nil, fmt.Errorf("group not found") + } + if len(segs) == 1 { + return &resolvedNode{kind: "group", group: *grp}, nil + } + parentID := int64(0) + var last FileInfo + for i := 1; i < len(segs); i++ { + files, err := d.getFiles(ctx, grp.GroupID, parentID) + if err != nil { + return nil, err + } + var found *FileInfo + for j := range files { + if files[j].Name == segs[i] { + found = &files[j] + break + } + } + if found == nil { + return nil, fmt.Errorf("path not found") + } + if i < len(segs)-1 && found.Type != "folder" { + return nil, fmt.Errorf("path not found") + } + last = *found + parentID = found.ID + } + kind := "file" + if last.Type == "folder" { + kind = "folder" + } + return &resolvedNode{kind: kind, group: *grp, file: &last}, nil +} + +func (d *Wps) fileToObj(basePath string, f FileInfo) *Obj { + name := f.Name + path := joinPath(basePath, name) + obj := &Obj{ + id: path, + name: name, + size: f.Size, + ctime: parseTime(f.Ctime), + mtime: parseTime(f.Mtime), + isDir: f.Type == "folder", + path: path, + } + if !obj.isDir { + obj.canDownload = d.canDownload(&f) + } + return obj +} + +func (d *Wps) doJSON(ctx context.Context, method, url string, body interface{}) error { + var result apiResult + req := d.jsonRequest(ctx).SetBody(body).SetResult(&result).SetError(&result) + var ( + resp *resty.Response + err error + ) + switch method { + case http.MethodPost: + resp, err = req.Post(url) + case http.MethodPut: + resp, err = req.Put(url) + default: + return errs.NotSupport + } + if err != nil { + return err + } + return checkAPI(resp, result) +} + +func (d *Wps) list(ctx context.Context, basePath string) ([]model.Obj, error) { + if strings.TrimSpace(basePath) == "" { + basePath = "/" + } + node, err := d.resolvePath(ctx, basePath) + if err != nil { + return nil, err + } + if node.kind == "root" { + groups, err := d.getGroups(ctx) + if err != nil { + return nil, err + } + res := make([]model.Obj, 0, len(groups)) + for _, g := range groups { + path := joinPath(basePath, g.Name) + obj := &Obj{ + id: path, + name: g.Name, + ctime: parseTime(0), + mtime: parseTime(0), + isDir: true, + path: path, + } + res = append(res, obj) + } + return res, nil + } + if node.kind != "group" && node.kind != "folder" { + return nil, nil + } + parentID := int64(0) + if node.file != nil && node.kind == "folder" { + parentID = node.file.ID + } + files, err := d.getFiles(ctx, node.group.GroupID, parentID) + if err != nil { + return nil, err + } + res := make([]model.Obj, 0, len(files)) + for _, f := range files { + res = append(res, d.fileToObj(basePath, f)) + } + return res, nil +} + +func (d *Wps) link(ctx context.Context, path string) (*model.Link, error) { + node, err := d.resolvePath(ctx, path) + if err != nil { + return nil, err + } + if node.kind != "file" || node.file == nil { + return nil, errs.NotSupport + } + if !d.canDownload(node.file) { + return nil, fmt.Errorf("no download permission") + } + url := fmt.Sprintf("%s/api/v5/groups/%d/files/%d/download?support_checksums=sha1", d.driveHost()+d.drivePrefix(), node.group.GroupID, node.file.ID) + var resp downloadResp + r, err := d.request(ctx).SetResult(&resp).SetError(&resp).Get(url) + if err != nil { + return nil, err + } + if r != nil && r.IsError() { + return nil, fmt.Errorf("http error: %d", r.StatusCode()) + } + if resp.URL == "" { + return nil, fmt.Errorf("empty download url") + } + return &model.Link{URL: resp.URL, Header: http.Header{}}, nil +} + +func (d *Wps) makeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if parentDir == nil { + return errs.NotSupport + } + node, err := d.resolvePath(ctx, parentDir.GetPath()) + if err != nil { + return err + } + if node.kind != "group" && node.kind != "folder" { + return errs.NotSupport + } + parentID := int64(0) + if node.file != nil && node.kind == "folder" { + parentID = node.file.ID + } + body := map[string]interface{}{ + "groupid": node.group.GroupID, + "name": dirName, + "parentid": parentID, + } + return d.doJSON(ctx, http.MethodPost, d.driveURL("/api/v5/files/folder"), body) +} + +func (d *Wps) move(ctx context.Context, srcObj, dstDir model.Obj) error { + if srcObj == nil || dstDir == nil { + return errs.NotSupport + } + nodeSrc, err := d.resolvePath(ctx, srcObj.GetPath()) + if err != nil { + return err + } + nodeDst, err := d.resolvePath(ctx, dstDir.GetPath()) + if err != nil { + return err + } + if nodeSrc.kind != "file" && nodeSrc.kind != "folder" { + return errs.NotSupport + } + if nodeDst.kind != "group" && nodeDst.kind != "folder" { + return errs.NotSupport + } + targetParentID := int64(0) + if nodeDst.file != nil && nodeDst.kind == "folder" { + targetParentID = nodeDst.file.ID + } + body := map[string]interface{}{ + "fileids": []int64{nodeSrc.file.ID}, + "target_groupid": nodeDst.group.GroupID, + "target_parentid": targetParentID, + } + url := fmt.Sprintf("/api/v3/groups/%d/files/batch/move", nodeSrc.group.GroupID) + return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) +} + +func (d *Wps) rename(ctx context.Context, srcObj model.Obj, newName string) error { + if srcObj == nil { + return errs.NotSupport + } + node, err := d.resolvePath(ctx, srcObj.GetPath()) + if err != nil { + return err + } + if node.kind != "file" && node.kind != "folder" { + return errs.NotSupport + } + url := fmt.Sprintf("/api/v3/groups/%d/files/%d", node.group.GroupID, node.file.ID) + body := map[string]string{"fname": newName} + return d.doJSON(ctx, http.MethodPut, d.driveURL(url), body) +} + +func (d *Wps) copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if srcObj == nil || dstDir == nil { + return errs.NotSupport + } + nodeSrc, err := d.resolvePath(ctx, srcObj.GetPath()) + if err != nil { + return err + } + nodeDst, err := d.resolvePath(ctx, dstDir.GetPath()) + if err != nil { + return err + } + if nodeSrc.kind != "file" && nodeSrc.kind != "folder" { + return errs.NotSupport + } + if nodeDst.kind != "group" && nodeDst.kind != "folder" { + return errs.NotSupport + } + targetParentID := int64(0) + if nodeDst.file != nil && nodeDst.kind == "folder" { + targetParentID = nodeDst.file.ID + } + body := map[string]interface{}{ + "fileids": []int64{nodeSrc.file.ID}, + "groupid": nodeSrc.group.GroupID, + "target_groupid": nodeDst.group.GroupID, + "target_parentid": targetParentID, + "duplicated_name_model": 1, + } + url := fmt.Sprintf("/api/v3/groups/%d/files/batch/copy", nodeSrc.group.GroupID) + return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) +} + +func (d *Wps) remove(ctx context.Context, obj model.Obj) error { + if obj == nil { + return errs.NotSupport + } + node, err := d.resolvePath(ctx, obj.GetPath()) + if err != nil { + return err + } + if node.kind != "file" && node.kind != "folder" { + return errs.NotSupport + } + body := map[string]interface{}{ + "fileids": []int64{node.file.ID}, + } + url := fmt.Sprintf("/api/v3/groups/%d/files/batch/delete", node.group.GroupID) + return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) +} + +func cacheAndHash(file model.FileStreamer, up driver.UpdateProgress) (model.File, int64, string, string, error) { + h1 := sha1.New() + h256 := sha256.New() + size := file.GetSize() + var counted int64 + ws := []io.Writer{h1, h256} + if size <= 0 { + ws = append(ws, countingWriter{n: &counted}) + } + p := up + f, err := file.CacheFullAndWriter(&p, io.MultiWriter(ws...)) + if err != nil { + return nil, 0, "", "", err + } + if size <= 0 { + size = counted + } + return f, size, hex.EncodeToString(h1.Sum(nil)), hex.EncodeToString(h256.Sum(nil)), nil +} + +func (d *Wps) createUpload(ctx context.Context, groupID, parentID int64, name string, size int64, sha1Hex, sha256Hex string) (*uploadCreateUpdateResp, error) { + body := map[string]string{ + "group_id": strconv.FormatInt(groupID, 10), + "name": name, + "parent_id": strconv.FormatInt(parentID, 10), + "sha1": sha1Hex, + "sha256": sha256Hex, + "size": strconv.FormatInt(size, 10), + } + var resp uploadCreateUpdateResp + r, err := d.jsonRequest(ctx). + SetBody(body). + SetResult(&resp). + SetError(&resp). + Put(d.driveURL("/api/v5/files/upload/create_update")) + if err != nil { + return nil, err + } + if err := checkAPI(r, resp.apiResult); err != nil { + return nil, err + } + if resp.URL == "" { + return nil, fmt.Errorf("empty upload url") + } + return &resp, nil +} + +func normalizeETag(v string) string { + v = strings.TrimSpace(v) + if strings.HasPrefix(v, "W/") { + v = strings.TrimSpace(strings.TrimPrefix(v, "W/")) + } + return strings.Trim(v, `"`) +} + +func (d *Wps) commitUpload(ctx context.Context, etag string, groupID, parentID int64, name, sha1Hex string, size int64) error { + body := map[string]interface{}{ + "etag": etag, + "groupid": groupID, + "key": "", + "name": name, + "parentid": parentID, + "sha1": sha1Hex, + "size": size, + "store": "ks3", + "storekey": "", + } + return d.doJSON(ctx, http.MethodPost, d.driveURL("/api/v5/files/file"), body) +} + +func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + if dstDir == nil || file == nil { + return errs.NotSupport + } + if up == nil { + up = func(float64) {} + } + node, err := d.resolvePath(ctx, dstDir.GetPath()) + if err != nil { + return err + } + if node.kind != "group" && node.kind != "folder" { + return errs.NotSupport + } + parentID := int64(0) + if node.file != nil && node.kind == "folder" { + parentID = node.file.ID + } + f, size, sha1Hex, sha256Hex, err := cacheAndHash(file, model.UpdateProgressWithRange(up, 0, 0.5)) + if err != nil { + return err + } + if c, ok := f.(io.Closer); ok { + defer c.Close() + } + info, err := d.createUpload(ctx, node.group.GroupID, parentID, file.GetName(), size, sha1Hex, sha256Hex) + if err != nil { + return err + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + return err + } + rf := driver.NewLimitedUploadFile(ctx, f) + prog := driver.NewProgress(size, model.UpdateProgressWithRange(up, 0.5, 1)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, info.URL, io.TeeReader(rf, prog)) + if err != nil { + return err + } + req.ContentLength = size + for k, v := range info.Request.Headers { + req.Header.Set(k, v) + } + method := strings.ToUpper(strings.TrimSpace(info.Method)) + if method != "" && method != http.MethodPut { + req.Method = method + } + resp, err := base.RestyClient.GetClient().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + io.Copy(io.Discard, resp.Body) + return fmt.Errorf("http error: %d", resp.StatusCode) + } + etag := normalizeETag(resp.Header.Get("ETag")) + var pr uploadPutResp + if err := json.NewDecoder(resp.Body).Decode(&pr); err != nil { + return err + } + sha1FromServer := strings.TrimSpace(pr.NewFilename) + if etag == "" { + return fmt.Errorf("empty etag") + } + if sha1FromServer == "" { + return fmt.Errorf("empty newfilename") + } + if err := d.commitUpload(ctx, etag, node.group.GroupID, parentID, file.GetName(), sha1FromServer, size); err != nil { + return err + } + up(1) + return nil +} From 650df1dcbd7a285205462c1add3fb342537ee4a3 Mon Sep 17 00:00:00 2001 From: "Mako (XSpy)" <127767602+x-spy@users.noreply.github.com> Date: Sat, 13 Dec 2025 20:15:11 +0800 Subject: [PATCH 2/7] feat(driver): add wps drive support --- drivers/wps/meta.go | 2 +- drivers/wps/util.go | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/wps/meta.go b/drivers/wps/meta.go index 1501e5582..6cba4ac48 100644 --- a/drivers/wps/meta.go +++ b/drivers/wps/meta.go @@ -8,7 +8,7 @@ import ( type Addition struct { driver.RootPath Cookie string `json:"cookie" required:"true" type:"text"` - Mode string `json:"mode" type:"select" options:"商业,个人" default:"商业"` + Mode string `json:"mode" type:"select" options:"Business,Personal" default:"Business"` } var config = driver.Config{ diff --git a/drivers/wps/util.go b/drivers/wps/util.go index dff27b495..d0374e35a 100644 --- a/drivers/wps/util.go +++ b/drivers/wps/util.go @@ -590,7 +590,7 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer if node.file != nil && node.kind == "folder" { parentID = node.file.ID } - f, size, sha1Hex, sha256Hex, err := cacheAndHash(file, model.UpdateProgressWithRange(up, 0, 0.5)) + f, size, sha1Hex, sha256Hex, err := cacheAndHash(file, func(float64) {}) if err != nil { return err } @@ -605,7 +605,7 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer return err } rf := driver.NewLimitedUploadFile(ctx, f) - prog := driver.NewProgress(size, model.UpdateProgressWithRange(up, 0.5, 1)) + prog := driver.NewProgress(size, model.UpdateProgressWithRange(up, 0, 1)) req, err := http.NewRequestWithContext(ctx, http.MethodPut, info.URL, io.TeeReader(rf, prog)) if err != nil { return err @@ -618,7 +618,9 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer if method != "" && method != http.MethodPut { req.Method = method } - resp, err := base.RestyClient.GetClient().Do(req) + c := *base.RestyClient.GetClient() + c.Timeout = 0 + resp, err := (&c).Do(req) if err != nil { return err } From 9ed88c02ebb175d29f893a89513813db24e3bf1d Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 14 Dec 2025 17:12:15 +0800 Subject: [PATCH 3/7] fix(wps): update personal mode string to English Signed-off-by: MadDogOwner --- drivers/wps/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/wps/util.go b/drivers/wps/util.go index d0374e35a..19d6b1d01 100644 --- a/drivers/wps/util.go +++ b/drivers/wps/util.go @@ -65,7 +65,7 @@ func (w countingWriter) Write(p []byte) (int, error) { } func (d *Wps) isPersonal() bool { - return strings.TrimSpace(d.Mode) == "个人" + return strings.TrimSpace(d.Mode) == "Personal" } func (d *Wps) driveHost() string { From 67da7f8e86235cdd74f2aeea7fb311d30a17d0b9 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 14 Dec 2025 18:22:10 +0800 Subject: [PATCH 4/7] fix(wps): remove trailing slash from drive origin URL Signed-off-by: MadDogOwner --- drivers/wps/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/wps/util.go b/drivers/wps/util.go index 19d6b1d01..9500d056d 100644 --- a/drivers/wps/util.go +++ b/drivers/wps/util.go @@ -87,7 +87,7 @@ func (d *Wps) driveURL(path string) string { } func (d *Wps) origin() string { - return d.driveHost() + "/" + return d.driveHost() } func (d *Wps) canDownload(f *FileInfo) bool { From a222d3c94d564de665a3460d0fc75ac50c4a7d10 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 14 Dec 2025 20:23:12 +0800 Subject: [PATCH 5/7] fix(wps): correct order of options in mode selection Signed-off-by: MadDogOwner --- drivers/wps/meta.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/wps/meta.go b/drivers/wps/meta.go index 6cba4ac48..6c1bb1b31 100644 --- a/drivers/wps/meta.go +++ b/drivers/wps/meta.go @@ -8,7 +8,7 @@ import ( type Addition struct { driver.RootPath Cookie string `json:"cookie" required:"true" type:"text"` - Mode string `json:"mode" type:"select" options:"Business,Personal" default:"Business"` + Mode string `json:"mode" type:"select" options:"Personal,Business" default:"Business"` } var config = driver.Config{ From 0c6043487862998713d223da677363805108ce12 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 14 Dec 2025 20:43:36 +0800 Subject: [PATCH 6/7] fix(wps): enable local sort and upload overwrite Signed-off-by: MadDogOwner --- drivers/wps/meta.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/wps/meta.go b/drivers/wps/meta.go index 6c1bb1b31..7a3362f3a 100644 --- a/drivers/wps/meta.go +++ b/drivers/wps/meta.go @@ -13,15 +13,10 @@ type Addition struct { var config = driver.Config{ Name: "WPS", - LocalSort: false, - OnlyProxy: false, - NoCache: false, - NoUpload: false, - NeedMs: false, + LocalSort: true, DefaultRoot: "/", - CheckStatus: false, Alert: "", - NoOverwriteUpload: false, + NoOverwriteUpload: true, } func init() { From b5b0a7b4cde1eab3a85292ce9d238473729f2a05 Mon Sep 17 00:00:00 2001 From: x-spy <127767602+x-spy@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:56:42 +0800 Subject: [PATCH 7/7] fix(wps): resolve put bugs, fix file op problems and optimize list logic - Fix uploading bugs. Support all uploading methods based on 8825.85d3c864.js - Fix issues in delete/copy/move while opearting big folders. - Use cache to optimize performance of list, especially in a deep path. --- drivers/wps/util.go | 486 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 443 insertions(+), 43 deletions(-) diff --git a/drivers/wps/util.go b/drivers/wps/util.go index 9500d056d..a08b27005 100644 --- a/drivers/wps/util.go +++ b/drivers/wps/util.go @@ -1,6 +1,7 @@ package wps import ( + "bytes" "context" "crypto/sha1" "crypto/sha256" @@ -8,9 +9,11 @@ import ( "encoding/json" "fmt" "io" + "mime/multipart" "net/http" "strconv" "strings" + "sync" "time" "github.com/OpenListTeam/OpenList/v4/drivers/base" @@ -29,6 +32,18 @@ type resolvedNode struct { file *FileInfo } +type resolveCacheEntry struct { + node *resolvedNode + expire time.Time +} + +type resolveCacheStore struct { + mu sync.RWMutex + m map[string]resolveCacheEntry +} + +var resolveCaches sync.Map + type apiResult struct { Result string `json:"result"` Msg string `json:"msg"` @@ -38,13 +53,22 @@ type uploadCreateUpdateResp struct { apiResult Method string `json:"method"` URL string `json:"url"` + Store string `json:"store"` Request struct { - Headers map[string]string `json:"headers"` + Headers map[string]string `json:"headers"` + FormData map[string]string `json:"formData"` } `json:"request"` + Response struct { + ExpectCode []int `json:"expect_code"` + ArgsETag string `json:"args_etag"` + ArgsKey string `json:"args_key"` + } `json:"response"` } type uploadPutResp struct { NewFilename string `json:"newfilename"` + Sha1 string `json:"sha1"` + MD5 string `json:"md5"` } type personalGroupsResp struct { @@ -113,6 +137,72 @@ func (d *Wps) jsonRequest(ctx context.Context) *resty.Request { SetHeader("Origin", d.origin()) } +func statusOK(code int, expect []int) bool { + if len(expect) == 0 { + return code >= 200 && code < 300 + } + for _, v := range expect { + if v == code { + return true + } + } + return false +} + +func respArg(arg string, resp *http.Response, body []byte) string { + arg = strings.TrimSpace(arg) + if arg == "" { + return "" + } + l := strings.ToLower(arg) + if strings.HasPrefix(l, "header.") { + h := strings.TrimSpace(arg[len("header."):]) + if h == "" { + return "" + } + return strings.TrimSpace(resp.Header.Get(h)) + } + if strings.HasPrefix(l, "body.") { + k := strings.TrimSpace(arg[len("body."):]) + if k == "" { + return "" + } + var m map[string]interface{} + if err := json.Unmarshal(body, &m); err != nil { + return "" + } + if v, ok := m[k]; ok { + if s, ok := v.(string); ok { + return strings.TrimSpace(s) + } + } + } + return "" +} + +func extractXMLTag(v, tag string) string { + s := strings.TrimSpace(v) + if s == "" { + return "" + } + lt := strings.ToLower(tag) + open := "<" + lt + ">" + clos := "" + ls := strings.ToLower(s) + i := strings.Index(ls, open) + if i < 0 { + return "" + } + i += len(open) + j := strings.Index(ls[i:], clos) + if j < 0 { + return "" + } + r := strings.TrimSpace(s[i : i+j]) + r = strings.ReplaceAll(r, """, "") + return strings.Trim(r, `"'`) +} + func checkAPI(resp *resty.Response, result apiResult) error { if result.Result != "" && result.Result != "ok" { if result.Msg == "" { @@ -213,23 +303,97 @@ func joinPath(basePath, name string) string { return strings.TrimRight(basePath, "/") + "/" + name } +func normalizePath(path string) string { + clean := strings.TrimSpace(path) + if clean == "" || clean == "/" { + return "/" + } + return "/" + strings.Trim(clean, "/") +} + +func (d *Wps) resolveCacheStore() *resolveCacheStore { + if d == nil { + return nil + } + if v, ok := resolveCaches.Load(d); ok { + if s, ok := v.(*resolveCacheStore); ok { + return s + } + } + s := &resolveCacheStore{m: make(map[string]resolveCacheEntry)} + if v, loaded := resolveCaches.LoadOrStore(d, s); loaded { + if s2, ok := v.(*resolveCacheStore); ok { + return s2 + } + } + return s +} + +func (d *Wps) getResolveCache(path string) (*resolvedNode, bool) { + s := d.resolveCacheStore() + if s == nil { + return nil, false + } + s.mu.RLock() + e, ok := s.m[path] + s.mu.RUnlock() + if !ok || e.node == nil { + return nil, false + } + if !e.expire.IsZero() && time.Now().After(e.expire) { + s.mu.Lock() + delete(s.m, path) + s.mu.Unlock() + return nil, false + } + return e.node, true +} + +func (d *Wps) setResolveCache(path string, node *resolvedNode) { + s := d.resolveCacheStore() + if s == nil || node == nil { + return + } + s.mu.Lock() + s.m[path] = resolveCacheEntry{node: node, expire: time.Now().Add(10 * time.Minute)} + s.mu.Unlock() +} + +func (d *Wps) clearResolveCache() { + s := d.resolveCacheStore() + if s == nil { + return + } + s.mu.Lock() + if len(s.m) != 0 { + s.m = make(map[string]resolveCacheEntry) + } + s.mu.Unlock() +} + func (d *Wps) resolvePath(ctx context.Context, path string) (*resolvedNode, error) { + cacheKey := normalizePath(path) + if n, ok := d.getResolveCache(cacheKey); ok { + return n, nil + } clean := strings.TrimSpace(path) if clean == "" { clean = "/" } clean = strings.Trim(clean, "/") if clean == "" { - return &resolvedNode{kind: "root"}, nil + n := &resolvedNode{kind: "root"} + d.setResolveCache("/", n) + return n, nil } - segs := strings.Split(clean, "/") + seg := strings.Split(clean, "/") groups, err := d.getGroups(ctx) if err != nil { return nil, err } var grp *Group for i := range groups { - if groups[i].Name == segs[0] { + if groups[i].Name == seg[0] { grp = &groups[i] break } @@ -237,19 +401,22 @@ func (d *Wps) resolvePath(ctx context.Context, path string) (*resolvedNode, erro if grp == nil { return nil, fmt.Errorf("group not found") } - if len(segs) == 1 { - return &resolvedNode{kind: "group", group: *grp}, nil + cur := "/" + seg[0] + gn := &resolvedNode{kind: "group", group: *grp} + d.setResolveCache(cur, gn) + if len(seg) == 1 { + return gn, nil } parentID := int64(0) - var last FileInfo - for i := 1; i < len(segs); i++ { + var lastNode *resolvedNode + for i := 1; i < len(seg); i++ { files, err := d.getFiles(ctx, grp.GroupID, parentID) if err != nil { return nil, err } var found *FileInfo for j := range files { - if files[j].Name == segs[i] { + if files[j].Name == seg[i] { found = &files[j] break } @@ -257,17 +424,24 @@ func (d *Wps) resolvePath(ctx context.Context, path string) (*resolvedNode, erro if found == nil { return nil, fmt.Errorf("path not found") } - if i < len(segs)-1 && found.Type != "folder" { + if i < len(seg)-1 && found.Type != "folder" { return nil, fmt.Errorf("path not found") } - last = *found - parentID = found.ID + fi := *found + parentID = fi.ID + cur = cur + "/" + seg[i] + kind := "file" + if fi.Type == "folder" { + kind = "folder" + } + n := &resolvedNode{kind: kind, group: *grp, file: &fi} + d.setResolveCache(cur, n) + lastNode = n } - kind := "file" - if last.Type == "folder" { - kind = "folder" + if lastNode == nil { + return nil, fmt.Errorf("path not found") } - return &resolvedNode{kind: kind, group: *grp, file: &last}, nil + return lastNode, nil } func (d *Wps) fileToObj(basePath string, f FileInfo) *Obj { @@ -334,7 +508,9 @@ func (d *Wps) list(ctx context.Context, basePath string) ([]model.Obj, error) { path: path, } res = append(res, obj) + d.setResolveCache(normalizePath(path), &resolvedNode{kind: "group", group: g}) } + d.setResolveCache("/", &resolvedNode{kind: "root"}) return res, nil } if node.kind != "group" && node.kind != "folder" { @@ -351,6 +527,13 @@ func (d *Wps) list(ctx context.Context, basePath string) ([]model.Obj, error) { res := make([]model.Obj, 0, len(files)) for _, f := range files { res = append(res, d.fileToObj(basePath, f)) + path := normalizePath(joinPath(basePath, f.Name)) + fi := f + kind := "file" + if fi.Type == "folder" { + kind = "folder" + } + d.setResolveCache(path, &resolvedNode{kind: kind, group: node.group, file: &fi}) } return res, nil } @@ -401,7 +584,11 @@ func (d *Wps) makeDir(ctx context.Context, parentDir model.Obj, dirName string) "name": dirName, "parentid": parentID, } - return d.doJSON(ctx, http.MethodPost, d.driveURL("/api/v5/files/folder"), body) + if err := d.doJSON(ctx, http.MethodPost, d.driveURL("/api/v5/files/folder"), body); err != nil { + return err + } + d.clearResolveCache() + return nil } func (d *Wps) move(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -432,7 +619,29 @@ func (d *Wps) move(ctx context.Context, srcObj, dstDir model.Obj) error { "target_parentid": targetParentID, } url := fmt.Sprintf("/api/v3/groups/%d/files/batch/move", nodeSrc.group.GroupID) - return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) + for { + var res apiResult + resp, err := d.jsonRequest(ctx). + SetBody(body). + SetResult(&res). + SetError(&res). + Post(d.driveURL(url)) + if err != nil { + return err + } + + if resp.StatusCode() == 403 && res.Result == "fileTaskDuplicated" { + time.Sleep(500 * time.Millisecond) + continue + } + + if err := checkAPI(resp, res); err != nil { + return err + } + break + } + d.clearResolveCache() + return nil } func (d *Wps) rename(ctx context.Context, srcObj model.Obj, newName string) error { @@ -448,7 +657,11 @@ func (d *Wps) rename(ctx context.Context, srcObj model.Obj, newName string) erro } url := fmt.Sprintf("/api/v3/groups/%d/files/%d", node.group.GroupID, node.file.ID) body := map[string]string{"fname": newName} - return d.doJSON(ctx, http.MethodPut, d.driveURL(url), body) + if err := d.doJSON(ctx, http.MethodPut, d.driveURL(url), body); err != nil { + return err + } + d.clearResolveCache() + return nil } func (d *Wps) copy(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -481,7 +694,29 @@ func (d *Wps) copy(ctx context.Context, srcObj, dstDir model.Obj) error { "duplicated_name_model": 1, } url := fmt.Sprintf("/api/v3/groups/%d/files/batch/copy", nodeSrc.group.GroupID) - return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) + for { + var res apiResult + resp, err := d.jsonRequest(ctx). + SetBody(body). + SetResult(&res). + SetError(&res). + Post(d.driveURL(url)) + if err != nil { + return err + } + + if resp.StatusCode() == 403 && res.Result == "fileTaskDuplicated" { + time.Sleep(500 * time.Millisecond) + continue + } + + if err := checkAPI(resp, res); err != nil { + return err + } + break + } + d.clearResolveCache() + return nil } func (d *Wps) remove(ctx context.Context, obj model.Obj) error { @@ -495,11 +730,36 @@ func (d *Wps) remove(ctx context.Context, obj model.Obj) error { if node.kind != "file" && node.kind != "folder" { return errs.NotSupport } + body := map[string]interface{}{ "fileids": []int64{node.file.ID}, } url := fmt.Sprintf("/api/v3/groups/%d/files/batch/delete", node.group.GroupID) - return d.doJSON(ctx, http.MethodPost, d.driveURL(url), body) + + for { + var res apiResult + resp, err := d.jsonRequest(ctx). + SetBody(body). + SetResult(&res). + SetError(&res). + Post(d.driveURL(url)) + if err != nil { + return err + } + + // 无法连续创建文件夹删除。如果一定要删除,每0.5s 尝试一次创建下一个删除请求,应当避免递归删除文件夹 + if resp.StatusCode() == 403 && res.Result == "fileTaskDuplicated" { + time.Sleep(500 * time.Millisecond) + continue + } + + if err := checkAPI(resp, res); err != nil { + return err + } + break + } + d.clearResolveCache() + return nil } func cacheAndHash(file model.FileStreamer, up driver.UpdateProgress) (model.File, int64, string, string, error) { @@ -557,17 +817,25 @@ func normalizeETag(v string) string { return strings.Trim(v, `"`) } -func (d *Wps) commitUpload(ctx context.Context, etag string, groupID, parentID int64, name, sha1Hex string, size int64) error { +func (d *Wps) commitUpload(ctx context.Context, etag, key string, groupID, parentID int64, name, sha1Hex string, size int64, store string) error { + store = strings.TrimSpace(store) + if store == "" { + store = "ks3" + } + storeKey := "" + if key != "" { + storeKey = key + } body := map[string]interface{}{ "etag": etag, "groupid": groupID, - "key": "", + "key": key, "name": name, "parentid": parentID, "sha1": sha1Hex, "size": size, - "store": "ks3", - "storekey": "", + "store": store, + "storekey": storeKey, } return d.doJSON(ctx, http.MethodPost, d.driveURL("/api/v5/files/file"), body) } @@ -597,7 +865,16 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer if c, ok := f.(io.Closer); ok { defer c.Close() } - info, err := d.createUpload(ctx, node.group.GroupID, parentID, file.GetName(), size, sha1Hex, sha256Hex) + + // 在隐藏文件名前加_上传,这是WPS的限制,无法上传隐藏文件,也无法将任何文件重命名为隐藏文件,所有隐藏文件会被自动加上_ 上传 + // 甚至可以上传前缀是..的文件,但是单个点就是不行 + realName := file.GetName() + uploadName := realName + if strings.HasPrefix(realName, ".") { + uploadName = "_" + realName + } + + info, err := d.createUpload(ctx, node.group.GroupID, parentID, uploadName, size, sha1Hex, sha256Hex) if err != nil { return err } @@ -606,18 +883,92 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer } rf := driver.NewLimitedUploadFile(ctx, f) prog := driver.NewProgress(size, model.UpdateProgressWithRange(up, 0, 1)) - req, err := http.NewRequestWithContext(ctx, http.MethodPut, info.URL, io.TeeReader(rf, prog)) - if err != nil { - return err - } - req.ContentLength = size - for k, v := range info.Request.Headers { - req.Header.Set(k, v) - } + method := strings.ToUpper(strings.TrimSpace(info.Method)) - if method != "" && method != http.MethodPut { - req.Method = method + if method == "" { + method = http.MethodPut } + + var req *http.Request + if method == http.MethodPost && len(info.Request.FormData) > 0 { + if size == 0 { + var buf bytes.Buffer + mw := multipart.NewWriter(&buf) + for k, v := range info.Request.FormData { + if err := mw.WriteField(k, v); err != nil { + return err + } + } + part, err := mw.CreateFormFile("file", uploadName) + if err != nil { + return err + } + if _, err := io.Copy(part, io.TeeReader(rf, prog)); err != nil { + return err + } + if err := mw.Close(); err != nil { + return err + } + req, err = http.NewRequestWithContext(ctx, method, info.URL, bytes.NewReader(buf.Bytes())) + if err != nil { + return err + } + for k, v := range info.Request.Headers { + req.Header.Set(k, v) + } + req.Header.Set("Content-Type", mw.FormDataContentType()) + req.ContentLength = int64(buf.Len()) + req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10)) + } else { + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + req, err = http.NewRequestWithContext(ctx, method, info.URL, pr) + if err != nil { + return err + } + for k, v := range info.Request.Headers { + req.Header.Set(k, v) + } + req.Header.Set("Content-Type", mw.FormDataContentType()) + go func() { + for k, v := range info.Request.FormData { + if err := mw.WriteField(k, v); err != nil { + pw.CloseWithError(err) + return + } + } + part, err := mw.CreateFormFile("file", uploadName) + if err != nil { + pw.CloseWithError(err) + return + } + if _, err := io.Copy(part, io.TeeReader(rf, prog)); err != nil { + pw.CloseWithError(err) + return + } + if err := mw.Close(); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + } + } else { + var body = io.TeeReader(rf, prog) + if size == 0 { + body = bytes.NewReader(nil) + } + req, err = http.NewRequestWithContext(ctx, method, info.URL, body) + if err != nil { + return err + } + for k, v := range info.Request.Headers { + req.Header.Set(k, v) + } + req.ContentLength = size + req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) + } + c := *base.RestyClient.GetClient() c.Timeout = 0 resp, err := (&c).Do(req) @@ -625,25 +976,74 @@ func (d *Wps) put(ctx context.Context, dstDir model.Obj, file model.FileStreamer return err } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + + if !statusOK(resp.StatusCode, info.Response.ExpectCode) { io.Copy(io.Discard, resp.Body) return fmt.Errorf("http error: %d", resp.StatusCode) } - etag := normalizeETag(resp.Header.Get("ETag")) - var pr uploadPutResp - if err := json.NewDecoder(resp.Body).Decode(&pr); err != nil { + + body, err := io.ReadAll(resp.Body) + if err != nil { return err } - sha1FromServer := strings.TrimSpace(pr.NewFilename) + + etag := normalizeETag(respArg(info.Response.ArgsETag, resp, body)) + if etag == "" { + etag = normalizeETag(resp.Header.Get("ETag")) + } + + key := strings.TrimSpace(respArg(info.Response.ArgsKey, resp, body)) + if key == "" { + key = strings.TrimSpace(resp.Header.Get("x-obs-save-key")) + } + + var pr uploadPutResp + sha1FromServer := "" + if err := json.Unmarshal(body, &pr); err == nil { + sha1FromServer = strings.TrimSpace(pr.NewFilename) + if sha1FromServer == "" { + sha1FromServer = strings.TrimSpace(pr.Sha1) + } + if etag == "" && pr.MD5 != "" { + etag = strings.TrimSpace(pr.MD5) + } + } + + if sha1FromServer == "" { + if v := extractXMLTag(string(body), "ETag"); v != "" { + sha1FromServer = v + if etag == "" { + etag = v + } + } + } + if sha1FromServer == "" && key != "" && len(key) == 40 { + sha1FromServer = key + } + if sha1FromServer == "" { + sha1FromServer = sha1Hex + } + if etag == "" { return fmt.Errorf("empty etag") } if sha1FromServer == "" { - return fmt.Errorf("empty newfilename") + return fmt.Errorf("empty sha1") + } + + store := strings.TrimSpace(info.Store) + commitKey := "" + if strings.TrimSpace(info.Response.ArgsKey) != "" { + commitKey = key + if commitKey == "" { + commitKey = sha1FromServer + } } - if err := d.commitUpload(ctx, etag, node.group.GroupID, parentID, file.GetName(), sha1FromServer, size); err != nil { + + if err := d.commitUpload(ctx, etag, commitKey, node.group.GroupID, parentID, uploadName, sha1FromServer, size, store); err != nil { return err } + up(1) return nil }