diff --git a/shortcuts/drive/drive_search.go b/shortcuts/drive/drive_search.go new file mode 100644 index 000000000..4f34f49f3 --- /dev/null +++ b/shortcuts/drive/drive_search.go @@ -0,0 +1,806 @@ +// Copyright (c) 2026 Lark Technologies Pte. Ltd. +// SPDX-License-Identifier: MIT + +package drive + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" + "time" + + "github.com/larksuite/cli/internal/output" + "github.com/larksuite/cli/shortcuts/common" +) + +// driveSearchErrUserNotVisible is the Lark service code returned by +// doc_wiki/search when an open_id referenced in --creator-ids / --sharer-ids +// falls outside the app's user-visibility scope (different from the +// search:docs:read API scope). +const driveSearchErrUserNotVisible = 99992351 + +// open_time has a server-side cap of 3 months per request. Rather than +// reject or silently clamp, we narrow this request to the most recent +// 3-month slice and list the remaining slices in a stderr notice so the +// agent can re-invoke for older ranges. +const ( + driveSearchSliceDays = 90 // one slice = server-side 3-month cap + driveSearchMaxOpenedSpanDays = 365 // hard cap: reject --opened-* spans beyond ~1 year +) + +var driveSearchSortValues = []string{ + "default", + "edit_time", + "edit_time_asc", + "open_time", + "create_time", +} + +var driveSearchDocTypeSet = map[string]struct{}{ + "DOC": {}, "SHEET": {}, "BITABLE": {}, "MINDNOTE": {}, "FILE": {}, + "WIKI": {}, "DOCX": {}, "FOLDER": {}, "CATALOG": {}, "SLIDES": {}, "SHORTCUT": {}, +} + +// driveSearchHourAggregatedFields lists filter keys the server aggregates at +// hour granularity. We pre-snap start/end and emit a stderr notice so callers +// see what was sent and why. +var driveSearchHourAggregatedFields = map[string]struct{}{ + "my_edit_time": {}, + "my_comment_time": {}, +} + +// Server caps list filters at 20 entries each. We reject above-cap input +// locally so users and agents get a named-flag error instead of an opaque +// server-side failure or truncated result. +const ( + driveSearchMaxChatIDs = 20 + driveSearchMaxSharerIDs = 20 +) + +// DriveSearch searches docs/wikis via the v2 doc_wiki/search API using flat +// flags instead of a nested JSON filter, which is friendlier for AI agents and +// `--help` readers. +var DriveSearch = common.Shortcut{ + Service: "drive", + Command: "+search", + Description: "Search Lark docs, Wiki, and spreadsheet files with flat filters (Search v2: doc_wiki/search)", + Risk: "read", + Scopes: []string{"search:docs:read"}, + AuthTypes: []string{"user"}, + HasFormat: true, + Flags: []common.Flag{ + {Name: "query", Desc: "search keyword (may be empty to browse by filter only)"}, + + {Name: "mine", Type: "bool", Desc: "restrict to docs I created (uses current user's open_id)"}, + {Name: "creator-ids", Desc: "comma-separated creator open_ids; mutually exclusive with --mine"}, + + {Name: "edited-since", Desc: "start of [my edited] time window (e.g. 7d, 1m, 1y, 2026-04-01, RFC3339, unix seconds)"}, + {Name: "edited-until", Desc: "end of [my edited] time window"}, + {Name: "commented-since", Desc: "start of [my commented] time window"}, + {Name: "commented-until", Desc: "end of [my commented] time window"}, + {Name: "opened-since", Desc: "start of [my opened] time window"}, + {Name: "opened-until", Desc: "end of [my opened] time window"}, + {Name: "created-since", Desc: "start of [document created] time window"}, + {Name: "created-until", Desc: "end of [document created] time window"}, + + {Name: "doc-types", Desc: "comma-separated types: doc,sheet,bitable,mindnote,file,wiki,docx,folder,catalog,slides,shortcut"}, + {Name: "folder-tokens", Desc: "comma-separated folder tokens (doc-only; mutually exclusive with --space-ids)"}, + {Name: "space-ids", Desc: "comma-separated wiki space IDs (wiki-only; mutually exclusive with --folder-tokens)"}, + {Name: "chat-ids", Desc: "comma-separated chat IDs"}, + {Name: "sharer-ids", Desc: "comma-separated sharer open_ids"}, + + {Name: "only-title", Type: "bool", Desc: "match titles only"}, + {Name: "only-comment", Type: "bool", Desc: "search comments only"}, + {Name: "sort", Desc: "sort type", Enum: driveSearchSortValues}, + + {Name: "page-token", Desc: "pagination token from a previous response"}, + {Name: "page-size", Default: "15", Desc: "page size (1-20, default 15)"}, + }, + Validate: func(ctx context.Context, runtime *common.RuntimeContext) error { + return validateDriveSearchIDs(readDriveSearchSpec(runtime)) + }, + Tips: []string{ + "Time flags accept relative (e.g. 7d, 1m, 1y), absolute (2026-04-01, RFC3339), or unix seconds.", + "my_edit_time and my_comment_time are hour-aggregated server-side; sub-hour inputs are snapped and a notice is printed to stderr.", + "Use --mine for a quick \"docs I created\" filter. For other people, use --creator-ids ou_xxx,ou_yyy.", + "--folder-tokens limits to doc-only search; --space-ids limits to wiki-only. They cannot be combined.", + }, + DryRun: func(ctx context.Context, runtime *common.RuntimeContext) *common.DryRunAPI { + spec := readDriveSearchSpec(runtime) + reqBody, notices, err := buildDriveSearchRequest(spec, runtime.UserOpenId(), time.Now()) + if err != nil { + return common.NewDryRunAPI().Set("error", err.Error()) + } + for _, n := range notices { + fmt.Fprintln(runtime.IO().ErrOut, n) + } + return common.NewDryRunAPI(). + POST("/open-apis/search/v2/doc_wiki/search"). + Body(reqBody) + }, + Execute: func(ctx context.Context, runtime *common.RuntimeContext) error { + spec := readDriveSearchSpec(runtime) + reqBody, notices, err := buildDriveSearchRequest(spec, runtime.UserOpenId(), time.Now()) + if err != nil { + return err + } + for _, n := range notices { + fmt.Fprintln(runtime.IO().ErrOut, n) + } + + data, err := callDriveSearchAPI(runtime, reqBody) + if err != nil { + return err + } + items, _ := data["res_units"].([]interface{}) + normalizedItems := addDriveSearchIsoTimeFields(items) + + resultData := map[string]interface{}{ + "total": data["total"], + "has_more": data["has_more"], + "page_token": data["page_token"], + "results": normalizedItems, + } + + runtime.OutFormat(resultData, &output.Meta{Count: len(normalizedItems)}, func(w io.Writer) { + renderDriveSearchTable(w, data, normalizedItems) + }) + return nil + }, +} + +// driveSearchSpec is the parsed flag set for a single +search invocation. +type driveSearchSpec struct { + Query string + PageToken string + PageSize string + + Mine bool + CreatorIDs []string + + EditedSince string + EditedUntil string + CommentedSince string + CommentedUntil string + OpenedSince string + OpenedUntil string + CreatedSince string + CreatedUntil string + + DocTypes []string + FolderTokens []string + SpaceIDs []string + ChatIDs []string + SharerIDs []string + + OnlyTitle bool + OnlyComment bool + Sort string +} + +func readDriveSearchSpec(runtime *common.RuntimeContext) driveSearchSpec { + return driveSearchSpec{ + Query: runtime.Str("query"), + PageToken: runtime.Str("page-token"), + PageSize: runtime.Str("page-size"), + + Mine: runtime.Bool("mine"), + CreatorIDs: common.SplitCSV(runtime.Str("creator-ids")), + + EditedSince: runtime.Str("edited-since"), + EditedUntil: runtime.Str("edited-until"), + CommentedSince: runtime.Str("commented-since"), + CommentedUntil: runtime.Str("commented-until"), + OpenedSince: runtime.Str("opened-since"), + OpenedUntil: runtime.Str("opened-until"), + CreatedSince: runtime.Str("created-since"), + CreatedUntil: runtime.Str("created-until"), + + DocTypes: upperAll(common.SplitCSV(runtime.Str("doc-types"))), + FolderTokens: common.SplitCSV(runtime.Str("folder-tokens")), + SpaceIDs: common.SplitCSV(runtime.Str("space-ids")), + ChatIDs: common.SplitCSV(runtime.Str("chat-ids")), + SharerIDs: common.SplitCSV(runtime.Str("sharer-ids")), + + OnlyTitle: runtime.Bool("only-title"), + OnlyComment: runtime.Bool("only-comment"), + Sort: strings.TrimSpace(runtime.Str("sort")), + } +} + +// buildDriveSearchRequest turns the parsed spec into the API request body and a +// list of stderr notices (e.g. hour-snap adjustments). It does all validation +// that depends on the combination of flag values. +func buildDriveSearchRequest(spec driveSearchSpec, userOpenID string, now time.Time) (map[string]interface{}, []string, error) { + if spec.Mine && len(spec.CreatorIDs) > 0 { + return nil, nil, output.ErrValidation("cannot combine --mine and --creator-ids") + } + if len(spec.FolderTokens) > 0 && len(spec.SpaceIDs) > 0 { + return nil, nil, output.ErrValidation("cannot combine --folder-tokens and --space-ids; doc and wiki scoped search cannot be combined") + } + if spec.Mine && userOpenID == "" { + return nil, nil, output.ErrValidation("--mine requires a logged-in user open_id, but none is configured; run `lark-cli auth login` or set user open_id in config") + } + + if err := validateDocTypes(spec.DocTypes); err != nil { + return nil, nil, err + } + + pageSize, err := parseDriveSearchPageSize(spec.PageSize) + if err != nil { + return nil, nil, err + } + + request := map[string]interface{}{ + "query": spec.Query, + "page_size": pageSize, + } + if spec.PageToken != "" { + request["page_token"] = spec.PageToken + } + + filter := map[string]interface{}{} + var notices []string + + // open_time is capped at 3 months server-side; if the user's window is + // longer, narrow this request and emit a notice with the remaining slices. + if n, err := clampOpenedTimeWindow(&spec, now); err != nil { + return nil, nil, err + } else if n != "" { + notices = append(notices, n) + } + + // Creator identity. + switch { + case spec.Mine: + filter["creator_ids"] = []string{userOpenID} + case len(spec.CreatorIDs) > 0: + filter["creator_ids"] = spec.CreatorIDs + } + + // Time dimensions — each fills at most one filter key; hour-aggregated ones + // also contribute notices. + timeDims := []struct { + key string + since, til string + }{ + {"my_edit_time", spec.EditedSince, spec.EditedUntil}, + {"my_comment_time", spec.CommentedSince, spec.CommentedUntil}, + {"open_time", spec.OpenedSince, spec.OpenedUntil}, + {"create_time", spec.CreatedSince, spec.CreatedUntil}, + } + for _, d := range timeDims { + rng, dimNotices, err := buildTimeRangeFilter(d.key, d.since, d.til, now) + if err != nil { + return nil, nil, err + } + if rng != nil { + filter[d.key] = rng + } + notices = append(notices, dimNotices...) + } + + // Scalar scope filters. + if len(spec.DocTypes) > 0 { + filter["doc_types"] = spec.DocTypes + } + if len(spec.ChatIDs) > 0 { + filter["chat_ids"] = spec.ChatIDs + } + if len(spec.SharerIDs) > 0 { + filter["sharer_ids"] = spec.SharerIDs + } + if spec.OnlyTitle { + filter["only_title"] = true + } + if spec.OnlyComment { + filter["only_comment"] = true + } + if spec.Sort != "" { + // Server enum uses "DEFAULT_TYPE" for the default sort; every other + // value upper-cases 1:1. + sortType := strings.ToUpper(spec.Sort) + if sortType == "DEFAULT" { + sortType = "DEFAULT_TYPE" + } + filter["sort_type"] = sortType + } + + // Wiki-/folder-scoped variants: keep the shared filter, then add the + // scope-specific key only into the correct side. + switch { + case len(spec.FolderTokens) > 0: + docFilter := cloneDriveSearchFilter(filter) + docFilter["folder_tokens"] = spec.FolderTokens + request["doc_filter"] = docFilter + case len(spec.SpaceIDs) > 0: + wikiFilter := cloneDriveSearchFilter(filter) + wikiFilter["space_ids"] = spec.SpaceIDs + request["wiki_filter"] = wikiFilter + default: + request["doc_filter"] = cloneDriveSearchFilter(filter) + request["wiki_filter"] = cloneDriveSearchFilter(filter) + } + + return request, notices, nil +} + +func parseDriveSearchPageSize(raw string) (int, error) { + if raw == "" { + return 15, nil + } + n, err := strconv.Atoi(raw) + if err != nil { + return 0, output.ErrValidation("--page-size must be a number, got %q", raw) + } + if n <= 0 { + return 15, nil + } + if n > 20 { + n = 20 + } + return n, nil +} + +// validateDriveSearchIDs checks open_id / chat_id format and enforces the +// 20-entry cap on chat_ids / sharer_ids before we build the API request, +// so misuse surfaces as a named-flag validation error rather than an opaque +// server-side failure or empty result. +func validateDriveSearchIDs(spec driveSearchSpec) error { + for _, id := range spec.CreatorIDs { + if _, err := common.ValidateUserID(id); err != nil { + return output.ErrValidation("--creator-ids %q: %s", id, err) + } + } + if n := len(spec.ChatIDs); n > driveSearchMaxChatIDs { + return output.ErrValidation("--chat-ids: max %d values per request, got %d", driveSearchMaxChatIDs, n) + } + for _, id := range spec.ChatIDs { + if _, err := common.ValidateChatID(id); err != nil { + return output.ErrValidation("--chat-ids %q: %s", id, err) + } + } + if n := len(spec.SharerIDs); n > driveSearchMaxSharerIDs { + return output.ErrValidation("--sharer-ids: max %d values per request, got %d", driveSearchMaxSharerIDs, n) + } + for _, id := range spec.SharerIDs { + if _, err := common.ValidateUserID(id); err != nil { + return output.ErrValidation("--sharer-ids %q: %s", id, err) + } + } + return nil +} + +func validateDocTypes(values []string) error { + for _, v := range values { + // values are already upper-cased by readDriveSearchSpec; compare as-is + // so the filter we emit to the server matches what we validated. + if _, ok := driveSearchDocTypeSet[v]; !ok { + return output.ErrValidation("--doc-types contains unknown value %q (allowed: doc,sheet,bitable,mindnote,file,wiki,docx,folder,catalog,slides,shortcut)", v) + } + } + return nil +} + +// upperAll returns a copy of s with every element upper-cased. +func upperAll(s []string) []string { + if len(s) == 0 { + return s + } + out := make([]string, len(s)) + for i, v := range s { + out[i] = strings.ToUpper(v) + } + return out +} + +// clampOpenedTimeWindow enforces the server-side 3-month cap on open_time by +// narrowing --opened-since / --opened-until to the most recent slice and +// returning a notice that lists every remaining slice, so the agent can +// re-invoke for older ranges. When no clamping is needed, returns ("", nil). +// +// Rules: +// - no --opened-since: skip (no range filter at all) +// - only --opened-since or both set, span ≤ 90 days: skip +// - span in (90, 365] days: clamp current request; spec is mutated in place +// with RFC3339 values so buildTimeRangeFilter parses round-trip +// - span > 365 days: validation error (prevents runaway slice counts) +func clampOpenedTimeWindow(spec *driveSearchSpec, now time.Time) (string, error) { + if spec.OpenedSince == "" { + return "", nil + } + sinceUnix, err := parseTimeValue(spec.OpenedSince, now) + if err != nil { + return "", output.ErrValidation("invalid --opened-since %q: %s", spec.OpenedSince, err) + } + var untilUnix int64 + if spec.OpenedUntil != "" { + untilUnix, err = parseTimeValue(spec.OpenedUntil, now) + if err != nil { + return "", output.ErrValidation("invalid --opened-until %q: %s", spec.OpenedUntil, err) + } + } else { + untilUnix = now.Unix() + } + if untilUnix <= sinceUnix { + // Malformed range; let buildTimeRangeFilter / server surface the error. + return "", nil + } + + spanSecs := untilUnix - sinceUnix + sliceSecs := int64(driveSearchSliceDays) * 24 * 3600 + if spanSecs <= sliceSecs { + return "", nil + } + maxSecs := int64(driveSearchMaxOpenedSpanDays) * 24 * 3600 + if spanSecs > maxSecs { + return "", output.ErrValidation( + "--opened-* window spans %d days, exceeds the %d-day (1-year) maximum; narrow the range or run multiple queries", + spanSecs/86400, driveSearchMaxOpenedSpanDays, + ) + } + + // Build slices newest-to-oldest; last (oldest) slice may be shorter than 90d. + numSlices := int((spanSecs + sliceSecs - 1) / sliceSecs) // ceil + type sliceSpec struct{ start, end int64 } + slices := make([]sliceSpec, numSlices) + cursor := untilUnix + for i := 0; i < numSlices; i++ { + start := cursor - sliceSecs + if start < sinceUnix { + start = sinceUnix + } + slices[i] = sliceSpec{start: start, end: cursor} + cursor = start + } + + fmtTime := func(unix int64) string { return time.Unix(unix, 0).Format(time.RFC3339) } + approxMonths := spanSecs / (30 * 24 * 3600) + + var b strings.Builder + fmt.Fprintf(&b, "notice: --opened-* window spans %d days (~%d months), exceeds the server-side 3-month (%d-day) limit.\n", + spanSecs/86400, approxMonths, driveSearchSliceDays) + fmt.Fprintf(&b, " this query was narrowed to the most recent slice; %d slices total:\n", numSlices) + // Every slice — including the current one — prints concrete --opened-since + // / --opened-until values so an agent paginating slice 1 can copy them + // verbatim. Reusing the user's original relative time (e.g. "1y") would + // re-resolve against time.Now() on the next call and silently drift the + // window away from any --page-token issued for this call. + for i, s := range slices { + label := fmt.Sprintf("[slice %d/%d]", i+1, numSlices) + if i == 0 { + label = fmt.Sprintf("[slice %d/%d current]", i+1, numSlices) + } + // %-19s pads to "[slice N/M current]" (19 chars at the 5-slice cap). + fmt.Fprintf(&b, " %-19s --opened-since %s --opened-until %s\n", + label, fmtTime(s.start), fmtTime(s.end)) + } + fmt.Fprint(&b, " pagination: paginate within a slice via --page-token using that slice's --opened-since / --opened-until values verbatim (NOT the original relative time like '1y' / '8m' — relative times re-resolve against time.Now() and would mismatch the page_token); switch to the next slice's --opened-* flags only after has_more=false, and do not carry --page-token across slices.") + + // Rewrite spec so buildTimeRangeFilter emits the clamped window. + spec.OpenedSince = fmtTime(slices[0].start) + spec.OpenedUntil = fmtTime(slices[0].end) + + return b.String(), nil +} + +// buildTimeRangeFilter parses since/until for one dimension and applies hour +// snapping for server-aggregated fields. Returns nil range when both inputs +// are empty. +func buildTimeRangeFilter(key, since, until string, now time.Time) (map[string]interface{}, []string, error) { + if since == "" && until == "" { + return nil, nil, nil + } + _, hourAggregated := driveSearchHourAggregatedFields[key] + + rng := map[string]interface{}{} + var notices []string + + if since != "" { + unix, err := parseTimeValue(since, now) + if err != nil { + return nil, nil, output.ErrValidation("invalid --%s-since %q: %s", timeDimCLIName(key), since, err) + } + if hourAggregated && unix%3600 != 0 { + snapped := floorHour(unix) + notices = append(notices, formatHourSnapNotice(key, "start", unix, snapped)) + unix = snapped + } + rng["start"] = unix + } + if until != "" { + unix, err := parseTimeValue(until, now) + if err != nil { + return nil, nil, output.ErrValidation("invalid --%s-until %q: %s", timeDimCLIName(key), until, err) + } + if hourAggregated && unix%3600 != 0 { + snapped := ceilHour(unix) + notices = append(notices, formatHourSnapNotice(key, "end", unix, snapped)) + unix = snapped + } + rng["end"] = unix + } + return rng, notices, nil +} + +// timeDimCLIName maps a filter key back to the CLI flag prefix, for error +// messages that say "--edited-since" rather than "my_edit_time.start". +func timeDimCLIName(key string) string { + switch key { + case "my_edit_time": + return "edited" + case "my_comment_time": + return "commented" + case "open_time": + return "opened" + case "create_time": + return "created" + } + return key +} + +func formatHourSnapNotice(key, side string, before, after int64) string { + return fmt.Sprintf("notice: %s has hour-level granularity server-side; %s %s → %s", + key, side, + time.Unix(before, 0).Format("2006-01-02 15:04:05"), + time.Unix(after, 0).Format("2006-01-02 15:04:05"), + ) +} + +func floorHour(unix int64) int64 { + return unix - (unix % 3600) +} + +func ceilHour(unix int64) int64 { + if unix%3600 == 0 { + return unix + } + return floorHour(unix) + 3600 +} + +var driveSearchRelativeRe = regexp.MustCompile(`^(\d+)([dmy])$`) + +// parseTimeValue accepts relative (7d, 1m=30d, 1y=365d), absolute dates in a +// few common layouts, RFC3339, and raw unix seconds. +func parseTimeValue(input string, now time.Time) (int64, error) { + s := strings.TrimSpace(input) + if s == "" { + return 0, fmt.Errorf("empty value") + } + + if m := driveSearchRelativeRe.FindStringSubmatch(s); m != nil { + n, _ := strconv.Atoi(m[1]) + var days int + switch m[2] { + case "d": + days = n + case "m": + days = n * 30 + case "y": + days = n * 365 + } + return now.Add(-time.Duration(days) * 24 * time.Hour).Unix(), nil + } + + layouts := []string{ + time.RFC3339, + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + "2006-01-02", + } + for _, layout := range layouts { + if t, err := time.ParseInLocation(layout, s, time.Local); err == nil { + return t.Unix(), nil + } + } + + // Digit-only string at the end so "20260423" doesn't get misread as unix. + // Real unix seconds for recent times are 10 digits; be conservative and + // require length >= 10 to avoid matching YYYYMMDD. Mirror unixToISO8601's + // ms-vs-s heuristic: 13-digit / >= 1e12 inputs are epoch-millis and get + // normalized to seconds, otherwise a copy-pasted ms timestamp would + // silently parse as a year-57000 unix and then trip the 1-year cap with + // a misleading message. + if len(s) >= 10 { + if n, err := strconv.ParseInt(s, 10, 64); err == nil { + if n >= 1e12 { + n /= 1000 + } + return n, nil + } + } + + return 0, fmt.Errorf("expected relative (7d/1m/1y), date (YYYY-MM-DD[ HH:MM:SS]), RFC3339, or unix seconds") +} + +func callDriveSearchAPI(runtime *common.RuntimeContext, reqBody map[string]interface{}) (map[string]interface{}, error) { + data, err := runtime.CallAPI("POST", "/open-apis/search/v2/doc_wiki/search", nil, reqBody) + if err != nil { + return nil, enrichDriveSearchError(err) + } + return data, nil +} + +// enrichDriveSearchError adds a +search-specific hint for known opaque Lark +// codes; other errors pass through unchanged. +func enrichDriveSearchError(err error) error { + var exitErr *output.ExitError + if !errors.As(err, &exitErr) || exitErr.Detail == nil { + return err + } + if exitErr.Detail.Code != driveSearchErrUserNotVisible { + return err + } + detail := *exitErr.Detail + detail.Hint = "one or more open_ids in --creator-ids / --sharer-ids are outside this app's user-visibility scope (this is the app's contact visibility, not the search:docs:read API scope); ask an admin to grant the app visibility to those users in the developer console, or drop the unreachable open_ids" + return &output.ExitError{ + Code: exitErr.Code, + Detail: &detail, + Err: exitErr.Err, + Raw: exitErr.Raw, + } +} + +func cloneDriveSearchFilter(src map[string]interface{}) map[string]interface{} { + dst := make(map[string]interface{}, len(src)) + for k, v := range src { + dst[k] = v + } + return dst +} + +// renderDriveSearchTable mirrors the column layout of doc +search so the pretty +// output is consistent for users switching between the two. +func renderDriveSearchTable(w io.Writer, data map[string]interface{}, items []interface{}) { + if len(items) == 0 { + fmt.Fprintln(w, "No matching results found.") + return + } + + htmlTagRe := regexp.MustCompile(``) + var rows []map[string]interface{} + for _, item := range items { + u, _ := item.(map[string]interface{}) + if u == nil { + continue + } + var rawTitle string + if s, ok := u["title_highlighted"].(string); ok && s != "" { + rawTitle = s + } else if s, ok := u["title"].(string); ok { + rawTitle = s + } + title := common.TruncateStr(htmlTagRe.ReplaceAllString(rawTitle, ""), 50) + + resultMeta, _ := u["result_meta"].(map[string]interface{}) + docTypes := "" + if resultMeta != nil { + docTypes = fmt.Sprintf("%v", resultMeta["doc_types"]) + } + entityType := fmt.Sprintf("%v", u["entity_type"]) + typeStr := docTypes + if typeStr == "" || typeStr == "" { + typeStr = entityType + } + + var url, editTime string + if resultMeta != nil { + if s, ok := resultMeta["url"].(string); ok { + url = s + } + if s, ok := resultMeta["update_time_iso"].(string); ok { + editTime = s + } + } + if len(url) > 80 { + url = url[:80] + } + + rows = append(rows, map[string]interface{}{ + "type": typeStr, + "title": title, + "edit_time": editTime, + "url": url, + }) + } + + output.PrintTable(w, rows) + moreHint := "" + hasMore, _ := data["has_more"].(bool) + if hasMore { + moreHint = " (more available, use --format json to get page_token, then --page-token to paginate)" + } + fmt.Fprintf(w, "\n%d result(s)%s\n", len(rows), moreHint) +} + +// addDriveSearchIsoTimeFields recursively annotates every `*_time` numeric +// field with a matching `*_time_iso` RFC3339 string, so clients that parse +// JSON output don't have to convert epoch timestamps themselves. +func addDriveSearchIsoTimeFields(value interface{}) []interface{} { + arr, ok := value.([]interface{}) + if !ok { + return nil + } + out := make([]interface{}, len(arr)) + for i, item := range arr { + out[i] = addDriveSearchIsoTimeFieldsOne(item) + } + return out +} + +func addDriveSearchIsoTimeFieldsOne(value interface{}) interface{} { + switch v := value.(type) { + case []interface{}: + result := make([]interface{}, len(v)) + for i, item := range v { + result[i] = addDriveSearchIsoTimeFieldsOne(item) + } + return result + case map[string]interface{}: + out := make(map[string]interface{}) + for key, item := range v { + if strings.HasSuffix(key, "_time_iso") { + out[key] = item + continue + } + out[key] = addDriveSearchIsoTimeFieldsOne(item) + if strings.HasSuffix(key, "_time") { + // If the input already carries the matching `_iso` sibling, + // the iso-suffix passthrough branch will copy it; don't race + // against it (map iteration order is non-deterministic). + if _, exists := v[key+"_iso"]; exists { + continue + } + if iso := unixToISO8601(item); iso != "" { + out[key+"_iso"] = iso + } + } + } + return out + default: + return value + } +} + +func unixToISO8601(v interface{}) string { + if v == nil { + return "" + } + var num float64 + switch val := v.(type) { + case float64: + num = val + case json.Number: + parsed, err := val.Float64() + if err != nil { + return "" + } + num = parsed + case string: + parsed, err := strconv.ParseFloat(val, 64) + if err != nil { + return "" + } + num = parsed + case int64: + num = float64(val) + case int: + num = float64(val) + default: + return "" + } + if math.IsInf(num, 0) || math.IsNaN(num) { + return "" + } + secs := int64(num) + if num >= 1e12 { + secs = secs / 1000 + } + return time.Unix(secs, 0).Format(time.RFC3339) +} diff --git a/shortcuts/drive/drive_search_test.go b/shortcuts/drive/drive_search_test.go new file mode 100644 index 000000000..a26faf3e6 --- /dev/null +++ b/shortcuts/drive/drive_search_test.go @@ -0,0 +1,962 @@ +// Copyright (c) 2026 Lark Technologies Pte. Ltd. +// SPDX-License-Identifier: MIT + +package drive + +import ( + "bytes" + "encoding/json" + "errors" + "math" + "reflect" + "strings" + "testing" + "time" + + "github.com/larksuite/cli/internal/output" +) + +// TestClampOpenedTimeWindow covers the 3-month / 1-year boundary logic that +// narrows --opened-since / --opened-until and generates the multi-slice notice. +func TestClampOpenedTimeWindow(t *testing.T) { + t.Parallel() + + // Fixed "now" keeps RFC3339 output stable across runs. + now := time.Date(2026, 4, 24, 16, 0, 0, 0, time.UTC) + day := int64(86400) + + t.Run("no opened-since: no clamp, no notice", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{OpenedUntil: "2026-04-01"} + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil || notice != "" { + t.Fatalf("got notice=%q err=%v, want both empty", notice, err) + } + if spec.OpenedSince != "" || spec.OpenedUntil != "2026-04-01" { + t.Fatalf("spec mutated unexpectedly: %+v", spec) + } + }) + + t.Run("span within 90d: no clamp", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{OpenedSince: "30d"} + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil || notice != "" { + t.Fatalf("got notice=%q err=%v, want both empty", notice, err) + } + if spec.OpenedSince != "30d" { + t.Fatalf("spec.OpenedSince mutated: %q", spec.OpenedSince) + } + }) + + t.Run("exactly 90 days: no clamp", func(t *testing.T) { + t.Parallel() + since := now.Unix() - 90*day + spec := driveSearchSpec{ + OpenedSince: time.Unix(since, 0).UTC().Format(time.RFC3339), + } + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil || notice != "" { + t.Fatalf("got notice=%q err=%v, want no clamp at boundary", notice, err) + } + }) + + t.Run("91 days: 2-slice clamp", func(t *testing.T) { + t.Parallel() + since := now.Unix() - 91*day + spec := driveSearchSpec{ + OpenedSince: time.Unix(since, 0).UTC().Format(time.RFC3339), + } + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !strings.Contains(notice, "2 slices total") { + t.Fatalf("expected '2 slices total' in notice, got:\n%s", notice) + } + // Each slice line — including slice 1 — must spell out concrete + // --opened-since / --opened-until values so a paginating agent can + // copy them verbatim instead of re-using the user's original + // relative time (which would drift against time.Now()). + for _, label := range []string{"[slice 1/2 current]", "[slice 2/2]"} { + var line string + for _, l := range strings.Split(notice, "\n") { + if strings.Contains(l, label) { + line = l + break + } + } + if line == "" { + t.Fatalf("missing %s line, got:\n%s", label, notice) + } + if !strings.Contains(line, "--opened-since ") || !strings.Contains(line, "--opened-until ") { + t.Fatalf("%s line must spell out both flag values, got: %q\nfull notice:\n%s", label, line, notice) + } + } + // After clamp the request window is exactly the most recent 90 days. + clampedSince, err := parseTimeValue(spec.OpenedSince, now) + if err != nil { + t.Fatalf("rewritten opened-since not parseable: %v", err) + } + clampedUntil, err := parseTimeValue(spec.OpenedUntil, now) + if err != nil { + t.Fatalf("rewritten opened-until not parseable: %v", err) + } + if clampedUntil-clampedSince != 90*day { + t.Fatalf("clamped span = %d days, want 90", (clampedUntil-clampedSince)/day) + } + if clampedUntil != now.Unix() { + t.Fatalf("clamped until should default to now; got %d, want %d", clampedUntil, now.Unix()) + } + }) + + t.Run("8 months: 3-slice clamp with shorter tail", func(t *testing.T) { + t.Parallel() + since := now.Unix() - 240*day // 8m ≈ 240 days + spec := driveSearchSpec{ + OpenedSince: time.Unix(since, 0).UTC().Format(time.RFC3339), + } + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + for _, want := range []string{"3 slices total", "[slice 1/3 current]", "[slice 2/3]", "[slice 3/3]"} { + if !strings.Contains(notice, want) { + t.Fatalf("missing %q in notice:\n%s", want, notice) + } + } + }) + + t.Run("365 days: 5-slice clamp at upper bound", func(t *testing.T) { + t.Parallel() + since := now.Unix() - 365*day + spec := driveSearchSpec{ + OpenedSince: time.Unix(since, 0).UTC().Format(time.RFC3339), + } + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil { + t.Fatalf("365 days should clamp, got err: %v", err) + } + if !strings.Contains(notice, "5 slices total") { + t.Fatalf("expected '5 slices total' for 365-day span, got:\n%s", notice) + } + }) + + t.Run("over 365 days: hard-cap error", func(t *testing.T) { + t.Parallel() + since := now.Unix() - 366*day + spec := driveSearchSpec{ + OpenedSince: time.Unix(since, 0).UTC().Format(time.RFC3339), + } + _, err := clampOpenedTimeWindow(&spec, now) + if err == nil { + t.Fatal("expected error for 366-day span, got nil") + } + if !strings.Contains(err.Error(), "365-day") { + t.Fatalf("error should mention 365-day cap, got: %v", err) + } + }) + + t.Run("since > until: no clamp, defer to downstream", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{ + OpenedSince: "2026-04-01", + OpenedUntil: "2026-03-01", + } + notice, err := clampOpenedTimeWindow(&spec, now) + if err != nil || notice != "" { + t.Fatalf("got notice=%q err=%v, want both empty for inverted range", notice, err) + } + }) + + t.Run("invalid opened-since: validation error", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{OpenedSince: "not-a-date"} + _, err := clampOpenedTimeWindow(&spec, now) + if err == nil { + t.Fatal("expected validation error for unparseable since") + } + if !strings.Contains(err.Error(), "--opened-since") { + t.Fatalf("error should name the flag, got: %v", err) + } + }) +} + +func TestParseDriveSearchPageSize(t *testing.T) { + t.Parallel() + tests := []struct { + name string + raw string + want int + wantErr bool + }{ + {"empty defaults to 15", "", 15, false}, + {"valid in-range", "10", 10, false}, + {"zero falls back to 15", "0", 15, false}, + {"negative falls back to 15", "-5", 15, false}, + {"clamps to 20 when exceeded", "100", 20, false}, + {"non-numeric is a hard error", "abc", 0, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got, err := parseDriveSearchPageSize(tt.raw) + if (err != nil) != tt.wantErr { + t.Fatalf("err=%v, wantErr=%v", err, tt.wantErr) + } + if !tt.wantErr && got != tt.want { + t.Fatalf("got %d, want %d", got, tt.want) + } + }) + } +} + +func TestValidateDocTypes(t *testing.T) { + t.Parallel() + if err := validateDocTypes(nil); err != nil { + t.Fatalf("nil slice should be valid, got: %v", err) + } + if err := validateDocTypes([]string{"DOC", "SHEET", "BITABLE"}); err != nil { + t.Fatalf("known values should pass, got: %v", err) + } + err := validateDocTypes([]string{"DOC", "PIE"}) + if err == nil || !strings.Contains(err.Error(), "PIE") { + t.Fatalf("expected error naming the unknown value, got: %v", err) + } +} + +func TestUpperAll(t *testing.T) { + t.Parallel() + if got := upperAll(nil); got != nil { + t.Fatalf("nil input should return nil, got %v", got) + } + got := upperAll([]string{"docx", "Sheet", "BITABLE"}) + want := []string{"DOCX", "SHEET", "BITABLE"} + if !reflect.DeepEqual(got, want) { + t.Fatalf("got %v, want %v", got, want) + } +} + +func TestValidateDriveSearchIDs(t *testing.T) { + t.Parallel() + + t.Run("all valid", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{ + CreatorIDs: []string{"ou_aaa"}, + ChatIDs: []string{"oc_xxx"}, + SharerIDs: []string{"ou_bbb"}, + } + if err := validateDriveSearchIDs(spec); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("bad creator id format", func(t *testing.T) { + t.Parallel() + err := validateDriveSearchIDs(driveSearchSpec{CreatorIDs: []string{"u_bad"}}) + if err == nil || !strings.Contains(err.Error(), "--creator-ids") { + t.Fatalf("expected --creator-ids error, got: %v", err) + } + }) + + t.Run("bad chat id format", func(t *testing.T) { + t.Parallel() + err := validateDriveSearchIDs(driveSearchSpec{ChatIDs: []string{"chat_bad"}}) + if err == nil || !strings.Contains(err.Error(), "--chat-ids") { + t.Fatalf("expected --chat-ids error, got: %v", err) + } + }) + + t.Run("bad sharer id format", func(t *testing.T) { + t.Parallel() + err := validateDriveSearchIDs(driveSearchSpec{SharerIDs: []string{"u_bad"}}) + if err == nil || !strings.Contains(err.Error(), "--sharer-ids") { + t.Fatalf("expected --sharer-ids error, got: %v", err) + } + }) + + t.Run("chat ids exactly at cap is allowed", func(t *testing.T) { + t.Parallel() + ids := make([]string, driveSearchMaxChatIDs) + for i := range ids { + ids[i] = "oc_x" + } + if err := validateDriveSearchIDs(driveSearchSpec{ChatIDs: ids}); err != nil { + t.Fatalf("exactly cap should pass, got: %v", err) + } + }) + + t.Run("chat ids over cap", func(t *testing.T) { + t.Parallel() + ids := make([]string, driveSearchMaxChatIDs+1) + for i := range ids { + ids[i] = "oc_x" + } + err := validateDriveSearchIDs(driveSearchSpec{ChatIDs: ids}) + if err == nil || !strings.Contains(err.Error(), "max") { + t.Fatalf("expected cap error, got: %v", err) + } + }) + + t.Run("sharer ids exactly at cap is allowed", func(t *testing.T) { + t.Parallel() + ids := make([]string, driveSearchMaxSharerIDs) + for i := range ids { + ids[i] = "ou_x" + } + if err := validateDriveSearchIDs(driveSearchSpec{SharerIDs: ids}); err != nil { + t.Fatalf("exactly cap should pass, got: %v", err) + } + }) + + t.Run("sharer ids over cap", func(t *testing.T) { + t.Parallel() + ids := make([]string, driveSearchMaxSharerIDs+1) + for i := range ids { + ids[i] = "ou_x" + } + err := validateDriveSearchIDs(driveSearchSpec{SharerIDs: ids}) + if err == nil || !strings.Contains(err.Error(), "max") { + t.Fatalf("expected cap error, got: %v", err) + } + }) +} + +func TestBuildTimeRangeFilter(t *testing.T) { + t.Parallel() + now := time.Date(2026, 4, 24, 16, 0, 0, 0, time.UTC) + + t.Run("both empty: nil range, no notice", func(t *testing.T) { + t.Parallel() + rng, notices, err := buildTimeRangeFilter("open_time", "", "", now) + if err != nil || rng != nil || len(notices) != 0 { + t.Fatalf("got rng=%v notices=%v err=%v", rng, notices, err) + } + }) + + t.Run("open_time passes through without snap", func(t *testing.T) { + t.Parallel() + rng, notices, err := buildTimeRangeFilter("open_time", + "2026-04-20T10:30:45+08:00", "2026-04-21T11:45:30+08:00", now) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(notices) != 0 { + t.Fatalf("open_time should not snap, got notices: %v", notices) + } + if rng["start"] == nil || rng["end"] == nil { + t.Fatalf("range missing endpoints: %v", rng) + } + }) + + t.Run("my_edit_time snaps sub-hour values", func(t *testing.T) { + t.Parallel() + rng, notices, err := buildTimeRangeFilter("my_edit_time", + "2026-04-20T10:30:45+08:00", "2026-04-21T11:45:30+08:00", now) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(notices) != 2 { + t.Fatalf("expected 2 snap notices (start + end), got %d: %v", len(notices), notices) + } + startUnix := rng["start"].(int64) + endUnix := rng["end"].(int64) + if startUnix%3600 != 0 || endUnix%3600 != 0 { + t.Fatalf("snapped values should align to hour: start=%d end=%d", startUnix, endUnix) + } + }) + + t.Run("invalid since surfaces with flag name", func(t *testing.T) { + t.Parallel() + _, _, err := buildTimeRangeFilter("my_edit_time", "garbage", "", now) + if err == nil || !strings.Contains(err.Error(), "--edited-since") { + t.Fatalf("expected --edited-since in error, got: %v", err) + } + }) + + t.Run("invalid until surfaces with flag name", func(t *testing.T) { + t.Parallel() + _, _, err := buildTimeRangeFilter("open_time", "", "garbage", now) + if err == nil || !strings.Contains(err.Error(), "--opened-until") { + t.Fatalf("expected --opened-until in error, got: %v", err) + } + }) +} + +func TestFloorAndCeilHour(t *testing.T) { + t.Parallel() + // 16:23:45 = unix 1745195025 (arbitrary) + t.Run("floor truncates", func(t *testing.T) { + t.Parallel() + if got := floorHour(1745195025); got%3600 != 0 || got >= 1745195025 { + t.Fatalf("floor(1745195025)=%d invalid", got) + } + }) + t.Run("ceil rounds up", func(t *testing.T) { + t.Parallel() + got := ceilHour(1745195025) + if got%3600 != 0 || got <= 1745195025 { + t.Fatalf("ceil(1745195025)=%d invalid", got) + } + }) + t.Run("ceil at exact hour is no-op", func(t *testing.T) { + t.Parallel() + exact := int64(1745193600) + if got := ceilHour(exact); got != exact { + t.Fatalf("ceil at hour boundary should be identity, got %d", got) + } + }) +} + +func TestParseTimeValue(t *testing.T) { + t.Parallel() + now := time.Date(2026, 4, 24, 16, 0, 0, 0, time.Local) + + tests := []struct { + name string + input string + wantErr bool + }{ + {"empty errors", "", true}, + {"7d relative", "7d", false}, + {"1m relative", "1m", false}, + {"1y relative", "1y", false}, + {"date-only YYYY-MM-DD", "2026-04-01", false}, + {"datetime with space", "2026-04-01 10:00:00", false}, + {"datetime with T", "2026-04-01T10:00:00", false}, + {"RFC3339 with offset", "2026-04-01T10:00:00+08:00", false}, + {"unix seconds", "1745193600", false}, + {"too short to be unix, garbage", "12345", true}, + {"YYYYMMDD digits not unix", "20260423", true}, + {"unparseable text", "not-a-date", true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + _, err := parseTimeValue(tt.input, now) + if (err != nil) != tt.wantErr { + t.Fatalf("err=%v, wantErr=%v", err, tt.wantErr) + } + }) + } + + // Sanity: relative units must scale correctly. A regression where "1m" + // silently meant "1 minute" instead of "30 days" would slip past the + // wantErr-only table above; this guards the unit semantics. + t.Run("relative units scale: 7d < 1m < 1y", func(t *testing.T) { + t.Parallel() + got7d, err := parseTimeValue("7d", now) + if err != nil { + t.Fatalf("7d: %v", err) + } + got1m, err := parseTimeValue("1m", now) + if err != nil { + t.Fatalf("1m: %v", err) + } + got1y, err := parseTimeValue("1y", now) + if err != nil { + t.Fatalf("1y: %v", err) + } + // All three are "now minus N days"; larger N means smaller (older) unix. + if !(got1y < got1m && got1m < got7d && got7d < now.Unix()) { + t.Fatalf("expected got1y < got1m < got7d < now; got %d %d %d (now=%d)", + got1y, got1m, got7d, now.Unix()) + } + // Spot-check the conversions: "1m" = 30d, "1y" = 365d. + const day = int64(86400) + if now.Unix()-got1m != 30*day { + t.Fatalf("'1m' should resolve to now-30d, got delta %d days", (now.Unix()-got1m)/day) + } + if now.Unix()-got1y != 365*day { + t.Fatalf("'1y' should resolve to now-365d, got delta %d days", (now.Unix()-got1y)/day) + } + }) + + // Sanity: unix-seconds round-trips exactly (no parsing as date). + t.Run("unix-seconds input round-trips", func(t *testing.T) { + t.Parallel() + got, err := parseTimeValue("1745193600", now) + if err != nil { + t.Fatalf("err: %v", err) + } + if got != 1745193600 { + t.Fatalf("unix round-trip got %d, want 1745193600", got) + } + }) + + // Regression: a 13-digit epoch-millis timestamp must be normalized to + // seconds. Previously it silently parsed as year-57000 and tripped the + // 1-year cap downstream with a misleading "exceeds 365 days" message. + t.Run("epoch-millis input normalizes to seconds", func(t *testing.T) { + t.Parallel() + got, err := parseTimeValue("1745193600000", now) + if err != nil { + t.Fatalf("err: %v", err) + } + if got != 1745193600 { + t.Fatalf("ms timestamp should normalize to %d seconds, got %d", int64(1745193600), got) + } + }) +} + +func TestUnixToISO8601(t *testing.T) { + t.Parallel() + const sec int64 = 1745193600 // 2025-04-21 00:00 UTC; only the YYYY-MM-DD prefix is checked below to stay timezone-agnostic + wantPrefix := time.Unix(sec, 0).Format(time.RFC3339)[:10] // YYYY-MM-DD prefix is timezone-stable + + tests := []struct { + name string + in interface{} + want string // empty means expect empty result + }{ + {"int64", sec, wantPrefix}, + {"int", int(sec), wantPrefix}, + {"float64", float64(sec), wantPrefix}, + {"json.Number", json.Number("1745193600"), wantPrefix}, + {"string numeric", "1745193600", wantPrefix}, + {"milliseconds get divided", sec * 1000, wantPrefix}, + {"nil returns empty", nil, ""}, + {"bool ignored", true, ""}, + {"unparseable string", "abc", ""}, + {"NaN returns empty", math.NaN(), ""}, + {"Inf returns empty", math.Inf(1), ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := unixToISO8601(tt.in) + if tt.want == "" { + if got != "" { + t.Fatalf("want empty, got %q", got) + } + return + } + if !strings.HasPrefix(got, tt.want) { + t.Fatalf("got %q, want prefix %q", got, tt.want) + } + }) + } +} + +func TestAddDriveSearchIsoTimeFields(t *testing.T) { + t.Parallel() + + t.Run("non-array input returns nil", func(t *testing.T) { + t.Parallel() + if got := addDriveSearchIsoTimeFields("not-an-array"); got != nil { + t.Fatalf("expected nil, got %v", got) + } + }) + + t.Run("annotates *_time at top level", func(t *testing.T) { + t.Parallel() + items := []interface{}{ + map[string]interface{}{"open_time": int64(1745193600)}, + } + row := addDriveSearchIsoTimeFields(items)[0].(map[string]interface{}) + if _, ok := row["open_time_iso"].(string); !ok { + t.Fatalf("open_time_iso should have been added, got: %v", row) + } + }) + + t.Run("recurses into nested map and annotates", func(t *testing.T) { + t.Parallel() + items := []interface{}{ + map[string]interface{}{ + "result_meta": map[string]interface{}{ + "update_time": json.Number("1745193600"), + }, + }, + } + row := addDriveSearchIsoTimeFields(items)[0].(map[string]interface{}) + meta := row["result_meta"].(map[string]interface{}) + if _, ok := meta["update_time_iso"].(string); !ok { + t.Fatalf("nested update_time_iso missing, got: %v", meta) + } + }) + + t.Run("standalone *_time_iso key passes through", func(t *testing.T) { + t.Parallel() + // No sibling *_time key, so the iso-suffix passthrough branch is the + // only one that touches this key — deterministic by construction. + items := []interface{}{ + map[string]interface{}{"some_time_iso": "preserved"}, + } + row := addDriveSearchIsoTimeFields(items)[0].(map[string]interface{}) + if row["some_time_iso"] != "preserved" { + t.Fatalf("existing _time_iso value should pass through, got: %v", row["some_time_iso"]) + } + }) + + // Regression: when both *_time and *_time_iso are present in the same map, + // the pre-existing _iso value must always win, regardless of map iteration + // order. This used to be flaky (a generated iso could overwrite the input + // one depending on which key got visited last). + t.Run("pre-existing *_iso wins over generated when both keys coexist", func(t *testing.T) { + t.Parallel() + const preserved = "PRESERVED-ISO-VALUE" + // Run several times to make a map-iteration-order race surface + // quickly if the guard regresses. + for i := 0; i < 50; i++ { + items := []interface{}{ + map[string]interface{}{ + "open_time": int64(1745193600), + "open_time_iso": preserved, + }, + } + row := addDriveSearchIsoTimeFields(items)[0].(map[string]interface{}) + if row["open_time_iso"] != preserved { + t.Fatalf("attempt %d: open_time_iso = %v, want %q (pre-existing must win)", + i, row["open_time_iso"], preserved) + } + } + }) +} + +func TestEnrichDriveSearchError(t *testing.T) { + t.Parallel() + + t.Run("non-ExitError passes through", func(t *testing.T) { + t.Parallel() + orig := errors.New("plain error") + if got := enrichDriveSearchError(orig); got != orig { + t.Fatalf("plain error should pass through unchanged") + } + }) + + t.Run("ExitError without Detail passes through", func(t *testing.T) { + t.Parallel() + orig := &output.ExitError{Code: 1} + if got := enrichDriveSearchError(orig); got != orig { + t.Fatalf("ExitError without Detail should pass through unchanged") + } + }) + + t.Run("ExitError with non-matching code passes through", func(t *testing.T) { + t.Parallel() + orig := &output.ExitError{ + Code: 1, + Detail: &output.ErrDetail{Code: 12345, Message: "other"}, + } + if got := enrichDriveSearchError(orig); got != orig { + t.Fatalf("non-matching code should pass through unchanged") + } + }) + + t.Run("matching code rewrites Hint without mutating original", func(t *testing.T) { + t.Parallel() + orig := &output.ExitError{ + Code: 1, + Detail: &output.ErrDetail{ + Code: driveSearchErrUserNotVisible, + Message: "[99992351] user not visible", + Hint: "", + }, + } + enriched := enrichDriveSearchError(orig) + eErr, ok := enriched.(*output.ExitError) + if !ok { + t.Fatalf("expected *output.ExitError, got %T", enriched) + } + if eErr == orig { + t.Fatal("should return a new ExitError, not mutate the original") + } + if orig.Detail.Hint != "" { + t.Fatal("original Detail.Hint must remain unchanged") + } + if !strings.Contains(eErr.Detail.Hint, "--creator-ids") { + t.Fatalf("hint should mention --creator-ids, got %q", eErr.Detail.Hint) + } + if eErr.Detail.Message != orig.Detail.Message { + t.Fatalf("Message should be preserved, got %q", eErr.Detail.Message) + } + }) +} + +func TestCloneDriveSearchFilter(t *testing.T) { + t.Parallel() + src := map[string]interface{}{"a": 1, "b": "x"} + dst := cloneDriveSearchFilter(src) + if !reflect.DeepEqual(src, dst) { + t.Fatalf("clone should equal source") + } + dst["a"] = 99 + if src["a"] != 1 { + t.Fatalf("mutating clone should not affect source") + } +} + +func TestBuildDriveSearchRequest(t *testing.T) { + t.Parallel() + now := time.Date(2026, 4, 24, 16, 0, 0, 0, time.UTC) + const userOpenID = "ou_self" + + t.Run("empty spec emits both filters as empty maps", func(t *testing.T) { + t.Parallel() + req, notices, err := buildDriveSearchRequest(driveSearchSpec{}, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(notices) != 0 { + t.Fatalf("expected no notices, got %v", notices) + } + if _, ok := req["doc_filter"].(map[string]interface{}); !ok { + t.Fatalf("doc_filter missing") + } + if _, ok := req["wiki_filter"].(map[string]interface{}); !ok { + t.Fatalf("wiki_filter missing") + } + if req["page_size"] != 15 { + t.Fatalf("default page_size should be 15, got %v", req["page_size"]) + } + }) + + t.Run("--mine fills creator_ids from userOpenID", func(t *testing.T) { + t.Parallel() + req, _, err := buildDriveSearchRequest(driveSearchSpec{Mine: true}, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + got := req["doc_filter"].(map[string]interface{})["creator_ids"].([]string) + if len(got) != 1 || got[0] != userOpenID { + t.Fatalf("expected [userOpenID], got %v", got) + } + }) + + t.Run("--mine without userOpenID errors", func(t *testing.T) { + t.Parallel() + _, _, err := buildDriveSearchRequest(driveSearchSpec{Mine: true}, "", now) + if err == nil || !strings.Contains(err.Error(), "--mine") { + t.Fatalf("expected --mine error, got: %v", err) + } + }) + + t.Run("--mine + --creator-ids mutually exclusive", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{Mine: true, CreatorIDs: []string{"ou_x"}} + _, _, err := buildDriveSearchRequest(spec, userOpenID, now) + if err == nil || !strings.Contains(err.Error(), "--mine") { + t.Fatalf("expected exclusion error, got: %v", err) + } + }) + + t.Run("--folder-tokens + --space-ids mutually exclusive", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{ + FolderTokens: []string{"fld_a"}, + SpaceIDs: []string{"sp_b"}, + } + _, _, err := buildDriveSearchRequest(spec, userOpenID, now) + if err == nil || !strings.Contains(err.Error(), "--folder-tokens") { + t.Fatalf("expected exclusion error, got: %v", err) + } + }) + + t.Run("--folder-tokens scopes only doc_filter", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{FolderTokens: []string{"fld_a"}} + req, _, err := buildDriveSearchRequest(spec, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := req["wiki_filter"]; ok { + t.Fatalf("wiki_filter should not be set when --folder-tokens is given") + } + df := req["doc_filter"].(map[string]interface{}) + if _, ok := df["folder_tokens"]; !ok { + t.Fatalf("doc_filter must carry folder_tokens") + } + }) + + t.Run("--space-ids scopes only wiki_filter", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{SpaceIDs: []string{"sp_x"}} + req, _, err := buildDriveSearchRequest(spec, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := req["doc_filter"]; ok { + t.Fatalf("doc_filter should not be set when --space-ids is given") + } + wf := req["wiki_filter"].(map[string]interface{}) + if _, ok := wf["space_ids"]; !ok { + t.Fatalf("wiki_filter must carry space_ids") + } + }) + + t.Run("sort=default maps to DEFAULT_TYPE", func(t *testing.T) { + t.Parallel() + req, _, err := buildDriveSearchRequest(driveSearchSpec{Sort: "default"}, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + if got := req["doc_filter"].(map[string]interface{})["sort_type"]; got != "DEFAULT_TYPE" { + t.Fatalf("sort_type=%v, want DEFAULT_TYPE", got) + } + }) + + t.Run("sort=edit_time upper-cases 1:1", func(t *testing.T) { + t.Parallel() + req, _, err := buildDriveSearchRequest(driveSearchSpec{Sort: "edit_time"}, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + if got := req["doc_filter"].(map[string]interface{})["sort_type"]; got != "EDIT_TIME" { + t.Fatalf("sort_type=%v, want EDIT_TIME", got) + } + }) + + t.Run("invalid doc-types surfaces", func(t *testing.T) { + t.Parallel() + _, _, err := buildDriveSearchRequest(driveSearchSpec{DocTypes: []string{"PIE"}}, userOpenID, now) + if err == nil || !strings.Contains(err.Error(), "--doc-types") { + t.Fatalf("expected --doc-types error, got: %v", err) + } + }) + + t.Run("opened-since 8m triggers clamp notice", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{ + OpenedSince: time.Unix(now.Unix()-240*86400, 0).UTC().Format(time.RFC3339), + } + _, notices, err := buildDriveSearchRequest(spec, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + joined := strings.Join(notices, "\n") + if !strings.Contains(joined, "3 slices total") { + t.Fatalf("expected 3-slice clamp notice, got: %s", joined) + } + }) + + t.Run("scalar filters land in both doc and wiki filters", func(t *testing.T) { + t.Parallel() + spec := driveSearchSpec{ + DocTypes: []string{"DOCX"}, + ChatIDs: []string{"oc_a"}, + OnlyTitle: true, + OnlyComment: true, + } + req, _, err := buildDriveSearchRequest(spec, userOpenID, now) + if err != nil { + t.Fatalf("err: %v", err) + } + df := req["doc_filter"].(map[string]interface{}) + wf := req["wiki_filter"].(map[string]interface{}) + for _, side := range []map[string]interface{}{df, wf} { + if _, ok := side["doc_types"]; !ok { + t.Fatal("doc_types missing") + } + if _, ok := side["chat_ids"]; !ok { + t.Fatal("chat_ids missing") + } + if side["only_title"] != true { + t.Fatal("only_title missing") + } + if side["only_comment"] != true { + t.Fatal("only_comment missing") + } + } + }) +} + +func TestRenderDriveSearchTable(t *testing.T) { + t.Parallel() + + t.Run("empty items prints fallback message", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + renderDriveSearchTable(&buf, map[string]interface{}{}, nil) + if !strings.Contains(buf.String(), "No matching results found") { + t.Fatalf("expected fallback message, got: %s", buf.String()) + } + }) + + t.Run("strips both and highlight tags", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + items := []interface{}{ + map[string]interface{}{ + "title_highlighted": "hi there bold!", + "entity_type": "DOC", + "result_meta": map[string]interface{}{"url": "https://example.com/x"}, + }, + } + renderDriveSearchTable(&buf, map[string]interface{}{}, items) + out := buf.String() + if strings.Contains(out, "") || strings.Contains(out, "") || strings.Contains(out, "") || strings.Contains(out, "") { + t.Fatalf("highlight tags leaked: %s", out) + } + if !strings.Contains(out, "hi there bold!") { + t.Fatalf("plain text should remain after stripping, got: %s", out) + } + }) + + t.Run("falls back to title when title_highlighted is missing", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + items := []interface{}{ + map[string]interface{}{ + "title": "plain title", + "entity_type": "DOC", + "result_meta": map[string]interface{}{ + "url": "https://example.com/x", + "update_time_iso": "2026-04-01T00:00:00Z", + "doc_types": "DOC", + }, + }, + } + renderDriveSearchTable(&buf, map[string]interface{}{}, items) + out := buf.String() + if !strings.Contains(out, "plain title") { + t.Fatalf("expected fallback title, got: %s", out) + } + if strings.Contains(out, "") { + t.Fatalf("title fallback should not produce , got: %s", out) + } + }) + + // Regression: when result_meta is missing url / update_time_iso (or + // result_meta itself is absent), the table must render empty cells, not + // the literal string "". This used to leak via fmt.Sprintf("%v", + // nil) before the type-assertion guard was added. + t.Run("missing url and update_time_iso render as empty, not ", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + items := []interface{}{ + // minimal item: title only, no result_meta keys at all + map[string]interface{}{ + "title_highlighted": "row1", + "entity_type": "DOC", + "result_meta": map[string]interface{}{}, + }, + // item with no result_meta at all + map[string]interface{}{ + "title_highlighted": "row2", + "entity_type": "DOC", + }, + } + renderDriveSearchTable(&buf, map[string]interface{}{}, items) + out := buf.String() + if strings.Contains(out, "") { + t.Fatalf("table must not render for missing url/edit_time, got:\n%s", out) + } + }) + + t.Run("appends has_more hint when there are more pages", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + items := []interface{}{ + map[string]interface{}{ + "title": "x", + "entity_type": "DOC", + "result_meta": map[string]interface{}{"url": "https://example.com/x"}, + }, + } + renderDriveSearchTable(&buf, map[string]interface{}{"has_more": true}, items) + if !strings.Contains(buf.String(), "more available") { + t.Fatalf("expected has_more hint, got: %s", buf.String()) + } + }) +} diff --git a/shortcuts/drive/shortcuts.go b/shortcuts/drive/shortcuts.go index 67ba703b2..bf4680ce9 100644 --- a/shortcuts/drive/shortcuts.go +++ b/shortcuts/drive/shortcuts.go @@ -20,5 +20,6 @@ func Shortcuts() []common.Shortcut { DriveDelete, DriveTaskResult, DriveApplyPermission, + DriveSearch, } } diff --git a/shortcuts/drive/shortcuts_test.go b/shortcuts/drive/shortcuts_test.go index be0857aa3..61c357699 100644 --- a/shortcuts/drive/shortcuts_test.go +++ b/shortcuts/drive/shortcuts_test.go @@ -23,6 +23,7 @@ func TestShortcutsIncludesExpectedCommands(t *testing.T) { "+delete", "+task_result", "+apply-permission", + "+search", } if len(got) != len(want) { diff --git a/skills/lark-doc/SKILL.md b/skills/lark-doc/SKILL.md index 56ba17c2d..3bdad9d8f 100644 --- a/skills/lark-doc/SKILL.md +++ b/skills/lark-doc/SKILL.md @@ -37,8 +37,8 @@ lark-cli docs +update --api-version v2 --doc "文档URL或token" --command appen - 用户说"看一下文档里的图片/附件/素材""预览素材" → 用 `lark-cli docs +media-preview` - 用户明确说"下载素材" → 用 `lark-cli docs +media-download` - 如果目标是画板/whiteboard/画板缩略图 → 只能用 `lark-cli docs +media-download --type whiteboard`(不要用 `+media-preview`) -- 用户说"找一个表格""按名称搜电子表格""找报表""最近打开的表格" → 先用 `lark-cli docs +search` 做资源发现 -- `docs +search` 不只搜文档/Wiki,结果里会直接返回 `SHEET` 等云空间对象 +- 用户说"找一个表格""按名称搜电子表格""找报表""最近打开的表格""最近我编辑过的 xxx" → 直接用 `lark-cli drive +search`(参考 [`lark-drive`](../lark-drive/references/lark-drive-search.md))。**老的 `docs +search` 已进入维护期、后续会下线,不要再新增依赖。** +- `drive +search` 结果里会直接返回 `SHEET` / `Base` / `FOLDER` 等云空间对象,是资源发现的统一入口 - 拿到 spreadsheet URL/token 后 → 切到 `lark-sheets` 做对象内部操作 - 用户说"给文档加评论""查看评论""回复评论""给评论加/删除表情 reaction" → 切到 `lark-drive` 处理 - 文档内容中出现嵌入的 ``、`` 或 `` 标签时 → **必须主动提取 token 并切到对应技能下钻读取内部数据**,不能只呈现标签本身 @@ -51,7 +51,7 @@ lark-cli docs +update --api-version v2 --doc "文档URL或token" --command appen | `` | 同 `` | [`lark-base`](../lark-base/SKILL.md) | | `` | `src-token` -> doc_token, `src-block-id` -> block_id | 用 `docs +fetch` 读取 src-token 文档,定位 block | -**补充:** `docs +search` 也承担"先定位云空间对象,再切回对应业务 skill 操作"的资源发现入口角色;当用户口头说"表格/报表"时,也优先从这里开始。 +**补充:** 云空间资源发现统一走 [`drive +search`](../lark-drive/references/lark-drive-search.md);当用户口头说"表格/报表/最近我编辑过的 xxx"时,也优先从 `drive +search` 开始。老的 `docs +search` 只在沿用 `--filter` JSON 的存量脚本里保留,后续会下线。 ## Shortcuts(推荐优先使用) @@ -59,10 +59,10 @@ Shortcut 是对常用操作的高级封装(`lark-cli docs + [flags]`) | Shortcut | 说明 | |----------|------| -| [`+search`](references/lark-doc-search.md) | Search Lark docs, Wiki, and spreadsheet files (Search v2: doc_wiki/search) | +| [`+search`](references/lark-doc-search.md) | ⚠️ **Deprecated — use [`drive +search`](../lark-drive/references/lark-drive-search.md)**. Search Lark docs, Wiki, and spreadsheet files (Search v2: doc_wiki/search). Kept for back-compat; new flows should use the drive-scoped command with flat flags. | | [`+create`](references/lark-doc-create.md) | Create a Lark document (XML / Markdown) | | [`+fetch`](references/lark-doc-fetch.md) | Fetch Lark document content (XML / Markdown) | | [`+update`](references/lark-doc-update.md) | Update a Lark document (str_replace / block_insert_after / block_replace / ...) | -| [`+media-insert`](references/lark-doc-media-insert.md) | Insert a local image or file at the end of a Lark document (4-step orchestration + auto-rollback) | +| [`+media-insert`](references/lark-doc-media-insert.md) | Insert a local image or file at the end of a Lark document (4-step orchestration + auto-rollback). Prefer `--from-clipboard` when the image is already on the system clipboard (screenshots, copy from Feishu/browser); use `--file` only for on-disk sources. | | [`+media-download`](references/lark-doc-media-download.md) | Download document media or whiteboard thumbnail (auto-detects extension) | | [`+whiteboard-update`](../lark-whiteboard/references/lark-whiteboard-update.md) | Alias of `whiteboard +update`. Update an existing whiteboard with DSL, Mermaid or PlantUML. Prefer `whiteboard +update`; refer to lark-whiteboard skill for details. | diff --git a/skills/lark-doc/references/lark-doc-search.md b/skills/lark-doc/references/lark-doc-search.md index 92affe613..6ca0df4d4 100644 --- a/skills/lark-doc/references/lark-doc-search.md +++ b/skills/lark-doc/references/lark-doc-search.md @@ -1,6 +1,10 @@ # docs +search(云空间搜索:文档 / Wiki / 电子表格) +> ⚠️ **此命令进入维护期,后续会下线。新用法请使用 [`drive +search`](../../lark-drive/references/lark-drive-search.md)。** +> +> `drive +search` 把所有过滤条件扁平化为独立 flag(`--edited-since` / `--mine` / `--doc-types` 等),面向自然语言场景设计,同时新增了 `my_edit_time`(我编辑过)、`my_comment_time`(我评论过)等维度。除非要沿用老脚本里的 `--filter` JSON,否则**都应该切到 `drive +search`**。 +> > **前置条件:** 先阅读 [`../lark-shared/SKILL.md`](../../lark-shared/SKILL.md) 了解认证、全局参数和安全规则。 基于 Search v2 接口 `POST /open-apis/search/v2/doc_wiki/search`,以**用户身份**统一搜索云空间对象。 diff --git a/skills/lark-drive/SKILL.md b/skills/lark-drive/SKILL.md index fb309fe88..f896ee5e6 100644 --- a/skills/lark-drive/SKILL.md +++ b/skills/lark-drive/SKILL.md @@ -16,6 +16,7 @@ metadata: ## 快速决策 +- 用户要**搜文档 / Wiki / 电子表格 / 多维表格 / 云空间对象**,优先使用 `lark-cli drive +search`。自然语言里"最近我编辑过的"、"我创建的"、"最近一周我打开过的 xxx"、"某人创建的 docx" 等直接映射到扁平 flag,避免手写嵌套 JSON。老的 `docs +search` 进入维护期、后续会下线,不要新增对它的依赖。 - 用户要把本地 `.xlsx` / `.csv` / `.base` 导入成 Base / 多维表格 / bitable,第一步必须使用 `lark-cli drive +import --type bitable`。 - 用户要把本地 `.md` / `.docx` / `.doc` / `.txt` / `.html` 导入成在线文档,使用 `lark-cli drive +import --type docx`。 - 用户要把本地 `.xlsx` / `.xls` / `.csv` 导入成电子表格,使用 `lark-cli drive +import --type sheet`。 @@ -221,6 +222,7 @@ Shortcut 是对常用操作的高级封装(`lark-cli drive + [flags]`) | Shortcut | 说明 | |----------|------| +| [`+search`](references/lark-drive-search.md) | Search Lark docs, Wiki, and spreadsheet files with flat filter flags (preferred over `docs +search`). Natural-language-friendly: `--edited-since`, `--mine`, `--doc-types`, etc. | | [`+upload`](references/lark-drive-upload.md) | Upload a local file to a Drive folder or wiki node | | [`+create-folder`](references/lark-drive-create-folder.md) | Create a Drive folder, optionally under a parent folder, with bot auto-grant support | | [`+download`](references/lark-drive-download.md) | Download a file from Drive to local | diff --git a/skills/lark-drive/references/lark-drive-search.md b/skills/lark-drive/references/lark-drive-search.md new file mode 100644 index 000000000..3b02459af --- /dev/null +++ b/skills/lark-drive/references/lark-drive-search.md @@ -0,0 +1,239 @@ + +# drive +search(云空间搜索:扁平 flag,面向自然语言场景) + +> **前置条件:** 先阅读 [`../lark-shared/SKILL.md`](../../lark-shared/SKILL.md) 了解认证、全局参数和安全规则。 + +基于 Search v2 接口 `POST /open-apis/search/v2/doc_wiki/search`,以**用户身份**统一搜索云空间对象。 + +和老的 `docs +search` 相比: + +- 把常用过滤条件全部**扁平化为独立 flag**(`--edited-since`、`--mine`、`--doc-types`、`--folder-tokens` 等),不再要求用户或 AI 手写嵌套 `--filter` JSON +- 额外暴露了 4 个"我"维度:`my_edit_time`(我编辑过)、`my_comment_time`(我评论过)、`open_time`(我打开过)、`create_time`(文档创建时间)——直接对应用户自然语言里的"最近我编辑过的"、"我评论过的"等表达 +- 自动处理 `my_edit_time` / `my_comment_time` 的小时级聚合(服务端存储粒度):亚小时输入会向整点 snap,并在 stderr 打出提示 +- `--mine` 一键从当前登录用户的 open_id 填 `creator_ids`,不必再先去查 contact + +> **资源发现入口统一**:`drive +search` 同样返回 `SHEET` / `Base` / `FOLDER` 等全部云空间对象,不只是文档 / Wiki。用户说"找一个表格"、"找报表"、"最近打开的表格"时,也从这里开始;定位后再切到对应业务 skill(如 `lark-sheets`)做对象内部操作。 + +## 命令 + +> **关键约束:搜索关键词必须通过 `--query` 传递。** +> 正确:`lark-cli drive +search --query "方案"` +> 错误:`lark-cli drive +search 方案` +> `+search` 不接受位置参数;空 `--query` 或省略 `--query` 表示纯靠 filter 浏览(合法)。 + +### 自然语言 → 命令映射速查 + +| 用户说 | 命令 | +|---|---| +| 最近一个月我编辑过的文档 | `lark-cli drive +search --query "" --edited-since 1m` | +| 最近一个月我编辑过 且 我评论过的 | `lark-cli drive +search --query "" --edited-since 1m --commented-since 1m` | +| 最近一周我打开过的表格 | `lark-cli drive +search --query "" --opened-since 7d --doc-types sheet` | +| 我创建的所有文档 | `lark-cli drive +search --query "" --mine` | +| 我 30-60 天前创建的文档(粗略"上个月",按 30 天滑窗算) | `lark-cli drive +search --query "" --mine --created-since 2m --created-until 1m` | +| 我 2026 年 3 月创建的文档(精确日历月) | `lark-cli drive +search --query "" --mine --created-since 2026-03-01 --created-until 2026-04-01` | +| 关键词"预算",最近一周我打开过,按编辑时间降序 | `lark-cli drive +search --query 预算 --opened-since 7d --sort edit_time` | +| 某个 wiki space 下、我 30-60 天前创建的 | `lark-cli drive +search --query "" --mine --space-ids space_xxx --created-since 2m --created-until 1m` | +| 张三创建的文档 | `lark-cli drive +search --query "" --creator-ids ou_zhangsan` | +| 我最近 3 个月评论过的 docx | `lark-cli drive +search --query "" --commented-since 3m --doc-types docx` | + +### 更多示例 + +```bash +# 纯关键词搜索 +lark-cli drive +search --query "季度总结" + +# 使用服务端 query 高级语法(和 docs +search 一致) +lark-cli drive +search --query 'intitle:方案' +lark-cli drive +search --query '"季度 总结"' +lark-cli drive +search --query '方案 OR 草稿' +lark-cli drive +search --query '方案 -草稿' + +# 只搜某个文件夹下的文档 +lark-cli drive +search --query 方案 --folder-tokens fld_123456 + +# 只搜某个知识空间下的 Wiki +lark-cli drive +search --query 研发规范 --space-ids space_1234567890fedcba + +# 指定群内分享过的文档 +lark-cli drive +search --query 方案 --chat-ids oc_1234567890abcdef + +# 只搜标题 / 只搜评论 +lark-cli drive +search --query 周报 --only-title +lark-cli drive +search --query 延期原因 --only-comment + +# 人类可读格式 +lark-cli drive +search --query OKR --format pretty + +# 翻页(--format json 先拿 page_token) +lark-cli drive +search --query 方案 --format json +lark-cli drive +search --query 方案 --page-token '' +``` + +## 参数 + +### 核心 + +| 参数 | 必填 | 说明 | +|---|---|---| +| `--query ` | 否 | 搜索关键词;支持服务端高级语法(`intitle:`、`""`、`OR`、`-`)。空字符串或省略表示纯 filter 浏览 | +| `--page-size ` | 否 | 每页数量,默认 15,最大 20。超过 20 自动 clamp;非正数(≤0)回落 15;**非数字值直接返回 validation 错误** | +| `--page-token ` | 否 | 上一次响应里的 `page_token`,用于翻页 | +| `--format` | 否 | `json`(默认)/ `pretty` | + +### 身份(creator 维度) + +| 参数 | 映射 | 说明 | +|---|---|---| +| `--mine` | `creator_ids = [当前用户 open_id]` | bool。一键"我创建的";从当前登录用户身份(`runtime.UserOpenId()`)解析 open_id,取不到直接报错(提示运行 `lark-cli auth login`) | +| `--creator-ids ou_x,ou_y` | `creator_ids = [...]` | 显式 open_id 列表,逗号分隔;**与 `--mine` 互斥** | + +### 时间维度(每个维度一对 since/until) + +| 参数 | 映射 API 字段 | 是否小时 snap | +|---|---|---| +| `--edited-since` / `--edited-until` | `my_edit_time.start` / `.end` | ✅ start 向下取整,end 向上取整 | +| `--commented-since` / `--commented-until` | `my_comment_time.start` / `.end` | ✅ 同上 | +| `--opened-since` / `--opened-until` | `open_time.start` / `.end` | ❌ 原样透传 | +| `--created-since` / `--created-until` | `create_time.start` / `.end` | ❌ 原样透传(文档创建时间,非"我"语义)| + +### 作用域 + +| 参数 | 映射 | 说明 | +|---|---|---| +| `--doc-types docx,sheet` | `doc_types` | 逗号分隔。允许值:`doc,sheet,bitable,mindnote,file,wiki,docx,folder,catalog,slides,shortcut` | +| `--folder-tokens fld_a,fld_b` | `folder_tokens`(仅 doc_filter) | 存在时只发 `doc_filter`;**与 `--space-ids` 互斥** | +| `--space-ids sp_x` | `space_ids`(仅 wiki_filter) | 存在时只发 `wiki_filter`;**与 `--folder-tokens` 互斥** | +| `--chat-ids oc_x` | `chat_ids` | 逗号分隔 | +| `--sharer-ids ou_x` | `sharer_ids` | 逗号分隔,open_id | + +### 其他 + +| 参数 | 映射 | 说明 | +|---|---|---| +| `--only-title` | `only_title: true` | bool | +| `--only-comment` | `only_comment: true` | bool | +| `--sort ` | `sort_type`(转大写枚举) | 允许值:`default, edit_time, edit_time_asc, open_time, create_time` | + +> `--sort`:CLI 只暴露服务端**正式支持**的 5 个值。服务端 enum 里 `CREATE_TIME_ASC` 协议标注"暂不支持",`ENTITY_CREATE_TIME_ASC` / `ENTITY_CREATE_TIME_DESC` 已废弃,CLI 直接不放出来,传了会被 cobra enum 校验拒掉。 + +## 时间值格式 + +所有 `--*-since` / `--*-until` 共用: + +| 输入 | 含义 | +|---|---| +| `7d` / `30d` | N 天前的当前时刻 | +| `1m` | 30 天前(固定 30 天,**不是**日历月)| +| `3m` / `6m` | 90 / 180 天前 | +| `1y` | 365 天前 | +| `2026-04-01` | 本地时区 00:00:00 | +| `2026-04-01 10:00:00` / `2026-04-01T10:00:00` | 本地时区具体时刻 | +| `2026-04-01T10:00:00+08:00` | RFC3339 带时区 | +| `1743523200`(≥ 10 位纯数字)| Unix 秒直接透传 | + +> `m` 绑定 month(30 天),不支持 minute——因为 `my_edit_time` / `my_comment_time` 在服务端是小时聚合,分钟粒度没意义。 + +## 小时聚合(my_edit_time / my_comment_time) + +服务端对这两个字段按整点聚合,亚小时输入会被 CLI 向整点对齐: + +```text +start: floor 到整点 16:23:45 → 16:00:00 +end: ceil 到整点 16:23:45 → 17:00:00 +``` + +发生对齐时,stderr 会打印一条 notice,例如: + +```text +notice: my_edit_time has hour-level granularity server-side; + start 2026-04-22 16:23:00 → 2026-04-22 16:00:00 + end 2026-04-22 16:28:00 → 2026-04-22 17:00:00 +``` + +stdout 的 JSON 输出不受影响。`open_time` / `create_time` 不做 snap。 + +## 输出 + +- `--format json`(默认):`{ total, has_more, page_token, results: [...] }`;所有 `*_time` 字段递归补 `*_time_iso` +- `--format pretty`:4 列 table —— `type | title | edit_time | url` +- `title_highlighted` / `summary_highlighted` 可能包含 `` / `` 高亮标签,客户端对比前需先剥离 + +> **注意**:返回体里的 `total` 字段不够准确(官方确认,仅供参考)。需要精确统计的场景,按实际 `results` 做去重和累加,不要把 `total` 当结果数承诺。 + +## 决策规则 + +- **和 `docs +search` 的选择**:优先使用 `drive +search`(本指令),不要再用 `docs +search`。`docs +search` 进入维护期、后续会下线。 +- **身份快捷方式**:只要用户说"我创建的",直接 `--mine` 即可,不需要先查 contact 拿 open_id。 +- **时间维度选择**: + - "我编辑的"、"我修改的" → `--edited-since` / `--edited-until` + - "我评论的"、"我回复过的" → `--commented-since` / `--commented-until` + - "我看过的"、"我打开过的"、"最近看过的" → `--opened-since` / `--opened-until` + - "创建于"、"新建的"(文档整体维度,与"我"无关)→ `--created-since` / `--created-until` +- **作用域选择**: + - "某个文件夹下" → `--folder-tokens`(doc-only) + - "某个 wiki 空间下" → `--space-ids`(wiki-only) + - 两者不能同时使用,混用会报错 +- **身份 flag 互斥**:`--mine` 和 `--creator-ids` 不要同时传,会直接报错。"我和张三创建的" 用 `--creator-ids ou_me,ou_zhangsan`(需要先拿到自己 open_id,但这种场景少见)。 +- **实体补全**: + - 用户说"某个群里",先用 `lark-im` 查 `chat_id` + - 用户说"某人创建/分享的"(非自己),先用 `lark-contact` 查 open_id,再填 `--creator-ids` / `--sharer-ids` +- **查询语义下推**:`--query` 支持的服务端高级语法(`intitle:`、`""`、`OR`、`-`)优先使用,不要先模糊搜再在客户端二次过滤。 +- **时间表达**: + - 模糊相对时间("最近半年"、"过去 30 天"、"最近一周")→ `--*-since 6m` / `--*-since 30d` / `--*-since 7d`,不展开成 ISO 时间 + - **日历表达**("上个月"、"上周"、"本月"、"前年"、"今年 3 月"等明确日历单位)→ **必须算出绝对 `YYYY-MM-DD` 边界**(如"上个月" = 上一个日历月的 1 号 → 当月 1 号),**不要近似成 `1m`/`2m`**:CLI 里 `m` 是固定 30 天、`y` 固定 365 天,跟日历差 0-3 天,月末月初尤其容易偏出去 + - 绝对日期 → 直接 `YYYY-MM-DD` 或 RFC3339 +- **分页策略**:默认只返回第一页,并说明 `has_more` 和下一页命令。只有用户明确要"全部 / 全量 / 继续翻"才继续。单轮翻页上限 5 页。 +- **原始返回**:用户要求"原始数据"、"接口返回"时用 `--format json`,不做客户端精确过滤或摘要重写。 + +## 权限 + +| 操作 | 所需 scope | +|---|---| +| 搜索云空间对象(文档 / Wiki / 表格等资源发现) | `search:docs:read` | + +## 常见错误 + +| code | 含义 | 处理 | +|---|---|---| +| `99992351` | `--creator-ids` / `--sharer-ids` 里有 open_id 超出**应用的通讯录可见范围**,服务端拒绝识别 | 让管理员在开发者后台把这些用户加进应用的"通讯录可见性"授权里;或把超出范围的 open_id 从参数里去掉。这和 `search:docs:read` scope 不是一回事 —— 是"应用能看见哪些人"而不是"应用能调用哪个接口" | + +## 时间范围自动裁剪(`--opened-*` 专有) + +服务端对 `open_time` 过滤**每次请求最多支持 3 个月**(90 天)窗口。其他三个时间维度(`--edited-*` / `--commented-*` / `--created-*`)**不受影响**。 + +CLI 在发请求前会检查 `--opened-since` 到有效 `--opened-until`(没传则取 `now`)的跨度: + +| 跨度 | 行为 | +|---|---| +| ≤ 90 天 | 原样透传 | +| 91 ~ 365 天 | **自动裁剪**到"最近一个 90 天 slice",stderr 打一条 notice 列出所有剩余 slice 的 `--opened-since` / `--opened-until` 参数值 | +| > 365 天 | 直接报 validation 错,要求缩小范围或自行拆分多次查询 | + +Notice 示例(用户原本要求"过去 8 个月",会被拆成 3 个 slice): + +```text +notice: --opened-* window spans 240 days (~8 months), exceeds the server-side 3-month (90-day) limit. + this query was narrowed to the most recent slice; 3 slices total: + [slice 1/3 current] --opened-since 2026-01-24T21:54:02+08:00 --opened-until 2026-04-24T21:54:02+08:00 + [slice 2/3] --opened-since 2025-10-26T21:54:02+08:00 --opened-until 2026-01-24T21:54:02+08:00 + [slice 3/3] --opened-since 2025-08-27T21:54:02+08:00 --opened-until 2025-10-26T21:54:02+08:00 + pagination: paginate within a slice via --page-token using that slice's --opened-since / --opened-until values verbatim (NOT the original relative time like '1y' / '8m' — relative times re-resolve against time.Now() and would mismatch the page_token); switch to the next slice's --opened-* flags only after has_more=false, and do not carry --page-token across slices. +``` + +### Agent 看到 notice 时的处理 + +**标准流程(分页 × slice 的先后顺序):** + +1. **跑 slice 1**(本次请求已自动裁剪到这个窗口),把结果呈现给用户 +2. **先在当前 slice 内翻页**:返回的 `has_more = true` 且用户想看更多时,把 `--opened-since` / `--opened-until` 改成 notice 里 `[slice 1/N current]` 行给出的**具体时间值**(**不要继续用原始的 `--opened-since 1y` 这种相对值**——CLI 每次调用都按 `time.Now()` 重算窗口,相对值 + `--page-token` 一起跑会让 page_token 绑到一个漂移的窗口上、结果静默失真),加 `--page-token` 继续翻,直到 `has_more = false` +3. **再切换到下一个 slice**:当前 slice 翻完后,如果用户还要"更老的",用 notice 里列的 slice 2 的 `--opened-since` / `--opened-until` 值,**其他 flag(`--query`、`--doc-types`、`--page-size`、`--sort`……)保持原样,`--page-token` 不带**,重新发请求 +4. **依次递推**:slice 2 翻完后切 slice 3,以此类推 +5. 用户只对最近一段感兴趣时,跳过第 3 步及以后 —— 避免无意义的 API 调用 + +> `--page-token` 只在单 slice 上下文内有效;切 slice 时不要把上一个 slice 的 `page_token` 带过去。 + +### 注意事项 + +- `--sort` 在**单 slice 内部**是正确的。跨 slice 的全局 sort(例如"过去一年我打开过的,按 edit_time desc 排")不被 CLI 保证,需要 agent 自行拉完多个 slice 后在客户端 re-sort 再呈现 +- 裁剪只改 request 发出去的 `open_time` 范围,`--query` / 其他 filter 不动 +- 最后一个(最老的)slice 常常不足 90 天,这是正常的截断 diff --git a/tests/cli_e2e/drive/drive_search_dryrun_test.go b/tests/cli_e2e/drive/drive_search_dryrun_test.go new file mode 100644 index 000000000..48cd4236a --- /dev/null +++ b/tests/cli_e2e/drive/drive_search_dryrun_test.go @@ -0,0 +1,338 @@ +// Copyright (c) 2026 Lark Technologies Pte. Ltd. +// SPDX-License-Identifier: MIT + +package drive + +import ( + "context" + "strings" + "testing" + "time" + + clie2e "github.com/larksuite/cli/tests/cli_e2e" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +// TestDriveSearchDryRun_RequestShape locks in the dry-run request body so +// agents that key off of stdout (URL, doc_filter / wiki_filter, scalar +// filters) don't silently regress. Run end-to-end so cobra flag parsing, +// readDriveSearchSpec, and the dry-run renderer all execute against the +// real binary. +// +// Fake credentials are sufficient because --dry-run short-circuits before +// any network call. +func TestDriveSearchDryRun_RequestShape(t *testing.T) { + setDriveSearchE2EEnv(t) + + tests := []struct { + name string + args []string + // JSONPath assertions over the dry-run body. + wantURL string + wantQuery string + wantDocFilter bool + wantWikiFilter bool + wantDocFilterFields map[string]string // gjson path under api.0.body.doc_filter -> string value (or "" to require existence only) + wantWikiFilterFields map[string]string + }{ + { + name: "basic --query emits both filters", + args: []string{ + "drive", "+search", + "--query", "season report", + "--page-size", "5", + "--dry-run", + }, + wantURL: "/open-apis/search/v2/doc_wiki/search", + wantQuery: "season report", + wantDocFilter: true, + wantWikiFilter: true, + }, + { + name: "--folder-tokens scopes to doc_filter only", + args: []string{ + "drive", "+search", + "--query", "x", + "--folder-tokens", "fld_aaa,fld_bbb", + "--dry-run", + }, + wantURL: "/open-apis/search/v2/doc_wiki/search", + wantQuery: "x", + wantDocFilter: true, + wantDocFilterFields: map[string]string{ + "folder_tokens.0": "fld_aaa", + "folder_tokens.1": "fld_bbb", + }, + }, + { + name: "--space-ids scopes to wiki_filter only", + args: []string{ + "drive", "+search", + "--query", "x", + "--space-ids", "sp_xxx", + "--dry-run", + }, + wantURL: "/open-apis/search/v2/doc_wiki/search", + wantQuery: "x", + wantWikiFilter: true, + wantWikiFilterFields: map[string]string{ + "space_ids.0": "sp_xxx", + }, + }, + { + name: "--sort default maps to DEFAULT_TYPE in body", + args: []string{ + "drive", "+search", + "--query", "x", + "--sort", "default", + "--dry-run", + }, + wantURL: "/open-apis/search/v2/doc_wiki/search", + wantQuery: "x", + wantDocFilter: true, + wantWikiFilter: true, + wantDocFilterFields: map[string]string{ + "sort_type": "DEFAULT_TYPE", + }, + }, + { + name: "mixed-case --doc-types is normalized to upper case in body", + args: []string{ + "drive", "+search", + "--query", "x", + "--doc-types", "docx,Sheet,BITABLE", + "--dry-run", + }, + wantURL: "/open-apis/search/v2/doc_wiki/search", + wantQuery: "x", + wantDocFilter: true, + wantWikiFilter: true, + wantDocFilterFields: map[string]string{ + "doc_types.0": "DOCX", + "doc_types.1": "SHEET", + "doc_types.2": "BITABLE", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + t.Cleanup(cancel) + + result, err := clie2e.RunCmd(ctx, clie2e.Request{ + Args: tt.args, + DefaultAs: "user", + }) + require.NoError(t, err) + result.AssertExitCode(t, 0) + + out := result.Stdout + if got := gjson.Get(out, "api.0.method").String(); got != "POST" { + t.Fatalf("method=%q, want POST\nstdout:\n%s", got, out) + } + if got := gjson.Get(out, "api.0.url").String(); got != tt.wantURL { + t.Fatalf("url=%q, want %q\nstdout:\n%s", got, tt.wantURL, out) + } + if got := gjson.Get(out, "api.0.body.query").String(); got != tt.wantQuery { + t.Fatalf("body.query=%q, want %q\nstdout:\n%s", got, tt.wantQuery, out) + } + if tt.wantDocFilter && !gjson.Get(out, "api.0.body.doc_filter").Exists() { + t.Fatalf("doc_filter missing\nstdout:\n%s", out) + } + if !tt.wantDocFilter && gjson.Get(out, "api.0.body.doc_filter").Exists() { + t.Fatalf("doc_filter should be omitted\nstdout:\n%s", out) + } + if tt.wantWikiFilter && !gjson.Get(out, "api.0.body.wiki_filter").Exists() { + t.Fatalf("wiki_filter missing\nstdout:\n%s", out) + } + if !tt.wantWikiFilter && gjson.Get(out, "api.0.body.wiki_filter").Exists() { + t.Fatalf("wiki_filter should be omitted\nstdout:\n%s", out) + } + for path, want := range tt.wantDocFilterFields { + if got := gjson.Get(out, "api.0.body.doc_filter."+path).String(); got != want { + t.Fatalf("doc_filter.%s=%q, want %q\nstdout:\n%s", path, got, want, out) + } + } + for path, want := range tt.wantWikiFilterFields { + if got := gjson.Get(out, "api.0.body.wiki_filter."+path).String(); got != want { + t.Fatalf("wiki_filter.%s=%q, want %q\nstdout:\n%s", path, got, want, out) + } + } + }) + } +} + +// TestDriveSearchDryRun_OpenedClamping locks in the agent-facing slice +// notice for --opened-* spans over 90 days: the request body must carry +// the most recent 90-day window, and stderr must list slice N's flag +// values verbatim so the agent can re-invoke for older ranges. +func TestDriveSearchDryRun_OpenedClamping(t *testing.T) { + setDriveSearchE2EEnv(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + t.Cleanup(cancel) + + result, err := clie2e.RunCmd(ctx, clie2e.Request{ + Args: []string{ + "drive", "+search", + "--query", "x", + "--opened-since", "8m", + "--dry-run", + }, + DefaultAs: "user", + }) + require.NoError(t, err) + result.AssertExitCode(t, 0) + + // Notice goes to stderr alongside other dimension notices. + for _, want := range []string{ + "--opened-* window spans", + "3 slices total", + "[slice 1/3 current]", + "[slice 2/3]", + "[slice 3/3]", + "--page-token", + } { + if !strings.Contains(result.Stderr, want) { + t.Fatalf("notice missing %q\nstderr:\n%s", want, result.Stderr) + } + } + // Slice 1 specifically must spell out concrete --opened-* flag values + // (not just the timestamps in arrow form): an agent paginating slice 1 + // has to copy these verbatim, otherwise reusing the original relative + // time '8m' would drift the window against time.Now() and mismatch the + // page_token. + for _, label := range []string{"[slice 1/3 current]", "[slice 2/3]", "[slice 3/3]"} { + var line string + for _, l := range strings.Split(result.Stderr, "\n") { + if strings.Contains(l, label) { + line = l + break + } + } + if !strings.Contains(line, "--opened-since ") || !strings.Contains(line, "--opened-until ") { + t.Fatalf("%s line must spell out both flags, got %q\nfull stderr:\n%s", label, line, result.Stderr) + } + } + + // And the request body's open_time must reflect the clamped window + // (start and end both present, span = 90 days exactly). + body := result.Stdout + start := gjson.Get(body, "api.0.body.doc_filter.open_time.start").Int() + end := gjson.Get(body, "api.0.body.doc_filter.open_time.end").Int() + if start == 0 || end == 0 { + t.Fatalf("doc_filter.open_time.start/end missing\nstdout:\n%s", body) + } + if delta := end - start; delta != 90*86400 { + t.Fatalf("clamped span = %d seconds, want %d (90 days)\nstdout:\n%s", delta, 90*86400, body) + } +} + +// TestDriveSearchDryRun_RejectsOpenedOver1Year locks in the hard cap: a +// --opened-* span beyond 365 days fails validation up front and never +// reaches the API. Important because the alternative (silent slicing into +// many windows) would produce a rate-limit / runaway request loop. +// +// Dry-run captures spec-level validation errors into the JSON envelope's +// `error` field (api list comes back empty); the process still exits 0 +// because the dry-run itself succeeded — it just told you what would have +// failed at execution time. +func TestDriveSearchDryRun_RejectsOpenedOver1Year(t *testing.T) { + setDriveSearchE2EEnv(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + t.Cleanup(cancel) + + result, err := clie2e.RunCmd(ctx, clie2e.Request{ + Args: []string{ + "drive", "+search", + "--query", "x", + "--opened-since", "2y", + "--dry-run", + }, + DefaultAs: "user", + }) + require.NoError(t, err) + result.AssertExitCode(t, 0) + + if api := gjson.Get(result.Stdout, "api"); api.IsArray() && len(api.Array()) > 0 { + t.Fatalf("dry-run api list must be empty when validation fails\nstdout:\n%s", result.Stdout) + } + errMsg := gjson.Get(result.Stdout, "error").String() + if !strings.Contains(errMsg, "365-day") { + t.Fatalf("expected 365-day cap message in dry-run error, got %q\nstdout:\n%s", errMsg, result.Stdout) + } +} + +// TestDriveSearchDryRun_RejectsInvalidSort locks in the cobra Enum guard. +// CLI intentionally exposes only 5 sort values (default, edit_time, +// edit_time_asc, open_time, create_time); the deprecated / +// not-supported server enum values must be rejected before reaching the +// request layer. +func TestDriveSearchDryRun_RejectsInvalidSort(t *testing.T) { + setDriveSearchE2EEnv(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + t.Cleanup(cancel) + + result, err := clie2e.RunCmd(ctx, clie2e.Request{ + Args: []string{ + "drive", "+search", + "--query", "x", + "--sort", "create_time_asc", + "--dry-run", + }, + DefaultAs: "user", + }) + require.NoError(t, err) + if result.ExitCode == 0 { + t.Fatalf("invalid sort must be rejected, got exit=0\nstdout:\n%s", result.Stdout) + } + combined := result.Stdout + "\n" + result.Stderr + // Pin to the flag name (with dashes) rather than the bare word "sort", + // which would also match "transport" / "sortable" / etc. + if !strings.Contains(combined, "--sort") { + t.Fatalf("expected --sort error message, got:\nstdout:\n%s\nstderr:\n%s", result.Stdout, result.Stderr) + } +} + +// TestDriveSearchDryRun_RejectsBadDocType verifies the doc-types validator +// is wired at the dry-run path: an unknown enum value surfaces as a +// validation error inside the dry-run JSON envelope rather than reaching +// the server. The process still exits 0 (see RejectsOpenedOver1Year). +func TestDriveSearchDryRun_RejectsBadDocType(t *testing.T) { + setDriveSearchE2EEnv(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + t.Cleanup(cancel) + + result, err := clie2e.RunCmd(ctx, clie2e.Request{ + Args: []string{ + "drive", "+search", + "--query", "x", + "--doc-types", "docx,pie", + "--dry-run", + }, + DefaultAs: "user", + }) + require.NoError(t, err) + result.AssertExitCode(t, 0) + + if api := gjson.Get(result.Stdout, "api"); api.IsArray() && len(api.Array()) > 0 { + t.Fatalf("dry-run api list must be empty when validation fails\nstdout:\n%s", result.Stdout) + } + errMsg := gjson.Get(result.Stdout, "error").String() + if !strings.Contains(errMsg, "--doc-types") { + t.Fatalf("expected --doc-types error in dry-run, got %q\nstdout:\n%s", errMsg, result.Stdout) + } +} + +func setDriveSearchE2EEnv(t *testing.T) { + t.Helper() + t.Setenv("LARKSUITE_CLI_CONFIG_DIR", t.TempDir()) + t.Setenv("LARKSUITE_CLI_APP_ID", "drive_search_e2e_app") + t.Setenv("LARKSUITE_CLI_APP_SECRET", "drive_search_e2e_secret") + t.Setenv("LARKSUITE_CLI_BRAND", "feishu") +}