diff --git a/.gitignore b/.gitignore index f18daa77ab..c4b8f452b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ ./src +./cmd/src/src +*.zip release ./vendor .idea diff --git a/CHANGELOG.md b/CHANGELOG.md index cb146dcca0..0ab10f1e4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ All notable changes to `src-cli` are documented in this file. ## Unreleased ### Added - +- New command `src debug`. [#731](https://github.com/sourcegraph/src-cli/pull/731) - `src lsif upload` now supports the `-gitlab-token` flag. [#721](https://github.com/sourcegraph/src-cli/pull/721) - Batch Changes can be applied to Bitbucket Cloud when `src` is used with Sourcegraph 3.40 or later. [#725](https://github.com/sourcegraph/src-cli/pull/725) diff --git a/cmd/src/debug.go b/cmd/src/debug.go new file mode 100644 index 0000000000..2a513cb1c4 --- /dev/null +++ b/cmd/src/debug.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" +) + +var debugCommands commander + +func init() { + usage := `'src debug' gathers and bundles debug data from a Sourcegraph deployment for troubleshooting. + +Usage: + + src debug command [command options] + +The commands are: + + kube dumps context from k8s deployments + compose dumps context from docker-compose deployments + server dumps context from single-container deployments + + +Use "src debug command -h" for more information about a subcommands. +src debug has access to flags on src -- Ex: src -v kube -o foo.zip + +` + + flagSet := flag.NewFlagSet("debug", flag.ExitOnError) + handler := func(args []string) error { + debugCommands.run(flagSet, "src debug", usage, args) + return nil + } + + // Register the command. + commands = append(commands, &command{ + flagSet: flagSet, + aliases: []string{}, + handler: handler, + usageFunc: func() { fmt.Println(usage) }, + }) +} diff --git a/cmd/src/debug_common.go b/cmd/src/debug_common.go new file mode 100644 index 0000000000..130cdd8eaf --- /dev/null +++ b/cmd/src/debug_common.go @@ -0,0 +1,123 @@ +package main + +import ( + "archive/zip" + "context" + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/sourcegraph/src-cli/internal/exec" + + "github.com/sourcegraph/sourcegraph/lib/errors" +) + +type archiveFile struct { + name string + data []byte + err error +} + +func archiveFileFromCommand(ctx context.Context, path, cmd string, args ...string) *archiveFile { + f := &archiveFile{name: path} + f.data, f.err = exec.CommandContext(ctx, cmd, args...).CombinedOutput() + if f.err != nil { + f.err = errors.Wrapf(f.err, "executing command: %s %s: received error: %s", cmd, strings.Join(args, " "), f.data) + } + return f +} + +// verify prompts the user to confirm they want to run the command +func verify(confirmationText string) (bool, error) { + input := "" + for strings.ToLower(input) != "y" && strings.ToLower(input) != "n" { + fmt.Printf("%s [y/N]: ", confirmationText) + if _, err := fmt.Scanln(&input); err != nil { + return false, err + } + } + + return strings.ToLower(input) == "y", nil +} + +func processBaseDir(base string) (string, string) { + if !strings.HasSuffix(base, ".zip") { + return base + ".zip", base + } + + return base, strings.TrimSuffix(base, ".zip") +} + +// write all the outputs from an archive command passed on the channel to to the zip writer +func writeChannelContentsToZip(zw *zip.Writer, ch <-chan *archiveFile, verbose bool) error { + for f := range ch { + if verbose { + log.Printf("archiving file %q with %d bytes", f.name, len(f.data)) + } + + if f.err != nil { + return f.err + } + + zf, err := zw.Create(f.name) + if err != nil { + return errors.Wrapf(err, "failed to create %q", f.name) + } + + if _, err := zf.Write(f.data); err != nil { + return errors.Wrapf(err, "failed to write to %q", f.name) + } + } + return nil +} + +// TODO: Currently external services and site configs are pulled using the SRC_ENDPOINT env var, +// if theres a way to validate that the env var is pointing at the same instance as the docker and kubectl commands, +// it should be implemented. + +// TODO: file issue on the existence of OAuth signKey which needs to be redacted + +// getExternalServicesConfig calls src extsvc list with the format flag -f, +// and then returns an archiveFile to be consumed +func getExternalServicesConfig(ctx context.Context, baseDir string) *archiveFile { + const fmtStr = `{{range .Nodes}}{{.id}} | {{.kind}} | {{.displayName}}{{"\n"}}{{.config}}{{"\n---\n"}}{{end}}` + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "config", "external_services.txt"), + os.Args[0], "extsvc", "list", "-f", fmtStr, + ) +} + +// getSiteConfig calls src api -query=... to query the api for site config json +func getSiteConfig(ctx context.Context, baseDir string) *archiveFile { + const siteConfigStr = `query { site { configuration { effectiveContents } } }` + f := archiveFileFromCommand(ctx, + filepath.Join(baseDir, "config", "siteConfig.json"), + os.Args[0], "api", "-query", siteConfigStr, + ) + + if f.err != nil { + return f + } + + var siteConfig struct { + Data struct { + Site struct { + Configuration struct { + EffectiveContents string + } + } + } + } + + if err := json.Unmarshal(f.data, &siteConfig); err != nil { + f.err = err + return f + } + + f.data = []byte(siteConfig.Data.Site.Configuration.EffectiveContents) + return f +} diff --git a/cmd/src/debug_compose.go b/cmd/src/debug_compose.go new file mode 100644 index 0000000000..9fb764f78a --- /dev/null +++ b/cmd/src/debug_compose.go @@ -0,0 +1,228 @@ +package main + +import ( + "archive/zip" + "context" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sourcegraph/sourcegraph/lib/errors" + + "golang.org/x/sync/errgroup" + + "golang.org/x/sync/semaphore" +) + +func init() { + usage := ` +'src debug compose' invokes docker cli diagnostic commands targeting a set of containers that are members of a docker-compose network, +writing an archive file from their returns. + +Usage: + + src debug compose [command options] + +Flags: + + -o Specify the name of the output zip archive. + --no-configs Don't include Sourcegraph configuration json. + +Examples: + + $ src debug compose -o debug.zip + + $ src -v debug compose -no-configs -o foo.zip + +` + + flagSet := flag.NewFlagSet("compose", flag.ExitOnError) + var base string + var noConfigs bool + flagSet.StringVar(&base, "o", "debug.zip", "The name of the output zip archive") + flagSet.BoolVar(&noConfigs, "no-configs", false, "If true include Sourcegraph configuration files. Default value true.") + + handler := func(args []string) error { + if err := flagSet.Parse(args); err != nil { + return err + } + + // process -o flag to get zipfile and base directory names + if base == "" { + return fmt.Errorf("empty -o flag") + } + // declare basedir for archive file structure + base, baseDir := processBaseDir(base) + + // init context + ctx := context.Background() + // open pipe to output file + out, err := os.OpenFile(base, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) + if err != nil { + return errors.Wrap(err, "failed to open file") + } + defer out.Close() + // init zip writer + zw := zip.NewWriter(out) + defer zw.Close() + + //Gather data for safety check + containers, err := getContainers(ctx) + if err != nil { + return errors.Wrap(err, "failed to get containers for subcommand with err") + } + // Safety check user knows what they are targeting with this debug command + log.Printf("This command will archive docker-cli data for %d containers\n SRC_ENDPOINT: %v\n Output filename: %v", len(containers), cfg.Endpoint, base) + if verified, _ := verify("Do you want to start writing to an archive?"); !verified { + return nil + } + + err = archiveCompose(ctx, zw, *verbose, noConfigs, baseDir) + if err != nil { + return err + } + return nil + } + + debugCommands = append(debugCommands, &command{ + flagSet: flagSet, + handler: handler, + usageFunc: func() { + fmt.Println(usage) + }, + }) +} + +// writes archive of common docker cli commands +func archiveCompose(ctx context.Context, zw *zip.Writer, verbose, noConfigs bool, baseDir string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + containers, err := getContainers(ctx) + if err != nil { + return errors.Wrap(err, "failed to get docker containers") + } + + if verbose { + log.Printf("getting docker data for %d containers...\n", len(containers)) + } + + // setup channel for slice of archive function outputs + ch := make(chan *archiveFile) + g, ctx := errgroup.WithContext(ctx) + semaphore := semaphore.NewWeighted(8) + + run := func(f func() *archiveFile) { + g.Go(func() error { + if err := semaphore.Acquire(ctx, 1); err != nil { + return err + } + defer semaphore.Release(1) + + if file := f(); file != nil { + ch <- file + } + + return nil + }) + } + + // start goroutine to run docker ps -o wide + run(func() *archiveFile { return getPs(ctx, baseDir) }) + + // start goroutine to run docker container stats --no-stream + run(func() *archiveFile { return getStats(ctx, baseDir) }) + + // start goroutine to run docker container logs + for _, container := range containers { + container := container + run(func() *archiveFile { return getContainerLog(ctx, container, baseDir) }) + } + + // start goroutine to run docker container inspect + for _, container := range containers { + container := container + run(func() *archiveFile { return getInspect(ctx, container, baseDir) }) + } + + // start goroutine to get configs + if !noConfigs { + run(func() *archiveFile { return getExternalServicesConfig(ctx, baseDir) }) + + run(func() *archiveFile { return getSiteConfig(ctx, baseDir) }) + } + + // close channel when wait group goroutines have completed + go func() { + if err := g.Wait(); err != nil { + fmt.Printf("archiveCompose failed to open wait group: %s\n", err) + os.Exit(1) + } + close(ch) + }() + + // Read binaries from channel and write to archive on host machine + if err := writeChannelContentsToZip(zw, ch, verbose); err != nil { + return errors.Wrap(err, "failed to write archives from channel") + } + + return nil +} + +// Returns list of containers that are members of the docker-compose_sourcegraph +func getContainers(ctx context.Context) ([]string, error) { + c, err := exec.CommandContext(ctx, "docker", "container", "ls", "--format", "{{.Names}} {{.Networks}}").Output() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container names with error") + } + s := string(c) + preprocessed := strings.Split(strings.TrimSpace(s), "\n") + containers := make([]string, 0, len(preprocessed)) + for _, container := range preprocessed { + tmpStr := strings.Split(container, " ") + if len(tmpStr) >= 2 && tmpStr[1] == "docker-compose_sourcegraph" { + containers = append(containers, tmpStr[0]) + } + } + return containers, err +} + +// runs archiveFileFromCommand with args docker ps +func getPs(ctx context.Context, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "docker", "docker-ps.txt"), + "docker", "ps", "--filter", "network=docker-compose_sourcegraph", + ) +} + +// runs archiveFileFromCommand with args docker container stats +func getStats(ctx context.Context, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "docker", "stats.txt"), + "docker", "container", "stats", "--no-stream", + ) +} + +// runs archiveFileFromCommand with args docker container logs $CONTAINER +func getContainerLog(ctx context.Context, container, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "docker", "containers", container, fmt.Sprintf("%s.log", container)), + "docker", "container", "logs", container, + ) +} + +// runs archiveFileFromCommand with args docker container inspect $CONTAINER +func getInspect(ctx context.Context, container, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "docker", "containers", container, fmt.Sprintf("inspect-%s.txt", container)), + "docker", "container", "inspect", container, + ) +} diff --git a/cmd/src/debug_kube.go b/cmd/src/debug_kube.go new file mode 100644 index 0000000000..9e3dfdaf7e --- /dev/null +++ b/cmd/src/debug_kube.go @@ -0,0 +1,320 @@ +package main + +import ( + "archive/zip" + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "log" + "os" + "path/filepath" + + "golang.org/x/sync/errgroup" + + "golang.org/x/sync/semaphore" + + "github.com/sourcegraph/sourcegraph/lib/errors" + "github.com/sourcegraph/src-cli/internal/exec" +) + +func init() { + usage := ` +'src debug kube' invokes kubectl diagnostic commands targeting kubectl's current-context, writing returns to an archive. +Usage: + + src debug kube [command options] + +Flags: + + -o Specify the name of the output zip archive. + -n Specify the namespace passed to kubectl commands. If not specified the 'default' namespace is used. + --no-config Don't include Sourcegraph configuration json. + +Examples: + + $ src debug kube -o debug.zip + + $ src -v debug kube -n ns-sourcegraph -o foo + + $ src debug kube -no-configs -o bar.zip + +` + + flagSet := flag.NewFlagSet("kube", flag.ExitOnError) + var base string + var namespace string + var noConfigs bool + flagSet.StringVar(&base, "o", "debug.zip", "The name of the output zip archive") + flagSet.StringVar(&namespace, "n", "default", "The namespace passed to kubectl commands, if not specified the 'default' namespace is used") + flagSet.BoolVar(&noConfigs, "no-configs", false, "If true include Sourcegraph configuration files. Default value true.") + + handler := func(args []string) error { + if err := flagSet.Parse(args); err != nil { + return err + } + + // process -o flag to get zipfile and base directory names + if base == "" { + return errors.New("empty -o flag") + } + base, baseDir := processBaseDir(base) + + // init context + ctx := context.Background() + // open pipe to output file + out, err := os.OpenFile(base, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) + if err != nil { + return errors.Wrapf(err, "failed to open file %q", base) + } + defer out.Close() + // init zip writer + zw := zip.NewWriter(out) + defer zw.Close() + + // Gather data for safety check + pods, err := selectPods(ctx, namespace) + if err != nil { + return errors.Wrap(err, "failed to get pods") + } + kubectx, err := exec.CommandContext(ctx, "kubectl", "config", "current-context").CombinedOutput() + if err != nil { + return errors.Wrapf(err, "failed to get current-context") + } + // Safety check user knows what they've targeted with this command + log.Printf("Archiving kubectl data for %d pods\n SRC_ENDPOINT: %v\n Context: %s Namespace: %v\n Output filename: %v", len(pods.Items), cfg.Endpoint, kubectx, namespace, base) + if verified, _ := verify("Do you want to start writing to an archive?"); !verified { + return nil + } + + err = archiveKube(ctx, zw, *verbose, noConfigs, namespace, baseDir, pods) + if err != nil { + return err + } + return nil + } + + debugCommands = append(debugCommands, &command{ + flagSet: flagSet, + handler: handler, + usageFunc: func() { + fmt.Println(usage) + }, + }) +} + +type podList struct { + Items []struct { + Metadata struct { + Name string + } + Spec struct { + Containers []struct { + Name string + } + } + } +} + +// Runs common kubectl functions and archive results to zip file +func archiveKube(ctx context.Context, zw *zip.Writer, verbose, noConfigs bool, namespace, baseDir string, pods podList) error { + // Create a context with a cancel function that we call when returning + // from archiveKube. This ensures we close all pending go-routines when returning + // early because of an error. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // setup channel for slice of archive function outputs, as well as throttling semaphore + ch := make(chan *archiveFile) + g, ctx := errgroup.WithContext(ctx) + semaphore := semaphore.NewWeighted(8) + + run := func(f func() *archiveFile) { + g.Go(func() error { + if err := semaphore.Acquire(ctx, 1); err != nil { + return err + } + defer semaphore.Release(1) + + if file := f(); file != nil { + ch <- file + } + + return nil + }) + } + + // create goroutine to get pods + run(func() *archiveFile { return getPods(ctx, namespace, baseDir) }) + + // create goroutine to get kubectl events + run(func() *archiveFile { return getEvents(ctx, namespace, baseDir) }) + + // create goroutine to get persistent volumes + run(func() *archiveFile { return getPV(ctx, namespace, baseDir) }) + + // create goroutine to get persistent volumes claim + run(func() *archiveFile { return getPVC(ctx, namespace, baseDir) }) + + // start goroutine to run kubectl logs for each pod's container's + for _, pod := range pods.Items { + for _, container := range pod.Spec.Containers { + p := pod.Metadata.Name + c := container.Name + run(func() *archiveFile { return getPodLog(ctx, p, c, namespace, baseDir) }) + } + } + + // start goroutine to run kubectl logs --previous for each pod's container's + // won't write to zip on err, only passes bytes to channel if err not nil + for _, pod := range pods.Items { + for _, container := range pod.Spec.Containers { + p := pod.Metadata.Name + c := container.Name + run(func() *archiveFile { + f := getPastPodLog(ctx, p, c, namespace, baseDir) + if f.err != nil { + if verbose { + fmt.Printf("Could not gather --previous pod logs for %s\n", p) + } + return nil + } + return f + }) + } + } + + // start goroutine for each pod to run kubectl describe pod + for _, pod := range pods.Items { + p := pod.Metadata.Name + run(func() *archiveFile { return getDescribe(ctx, p, namespace, baseDir) }) + } + + // start goroutine for each pod to run kubectl get pod -o yaml + for _, pod := range pods.Items { + p := pod.Metadata.Name + run(func() *archiveFile { return getManifest(ctx, p, namespace, baseDir) }) + } + + // start goroutine to get external service config + if !noConfigs { + run(func() *archiveFile { return getExternalServicesConfig(ctx, baseDir) }) + + run(func() *archiveFile { return getSiteConfig(ctx, baseDir) }) + } + + // close channel when wait group goroutines have completed + go func() { + if err := g.Wait(); err != nil { + fmt.Printf("archiveKube failed to open wait group: %s\n", err) + os.Exit(1) + } + close(ch) + }() + + // Read binaries from channel and write to archive on host machine + if err := writeChannelContentsToZip(zw, ch, verbose); err != nil { + return errors.Wrap(err, "failed to write archives from channel") + } + + return nil +} + +// Calls kubectl get pods and returns a list of pods to be processed +func selectPods(ctx context.Context, namespace string) (podList, error) { + // Declare buffer type var for kubectl pipe + var podsBuff bytes.Buffer + + // Get all pod names as json + podsCmd := exec.CommandContext( + ctx, + "kubectl", "-n", namespace, "get", "pods", "-l", "deploy=sourcegraph", "-o=json", + ) + podsCmd.Stdout = &podsBuff + podsCmd.Stderr = os.Stderr + err := podsCmd.Run() + if err != nil { + errors.Wrap(err, "failed to aquire pods for subcommands with err") + } + + //Decode json from podList + var pods podList + if err := json.Unmarshal(podsBuff.Bytes(), &pods); err != nil { + return pods, errors.Wrap(err, "failed to unmarshall get pods json") + } + + return pods, err +} + +// runs archiveFileFromCommand with arg get pods +func getPods(ctx context.Context, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "getPods.txt"), + "kubectl", "-n", namespace, "get", "pods", "-o", "wide", + ) +} + +// runs archiveFileFromCommand with arg get events +func getEvents(ctx context.Context, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "events.txt"), + "kubectl", "-n", namespace, "get", "events", + ) +} + +// runs archiveFileFromCommand with arg get pv +func getPV(ctx context.Context, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "persistent-volumes.txt"), + "kubectl", "-n", namespace, "get", "pv", + ) +} + +// runs archiveFileFromCommand with arg get pvc +func getPVC(ctx context.Context, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "persistent-volume-claims.txt"), + "kubectl", "-n", namespace, "get", "pvc", + ) +} + +// runs archiveFileFromCommand with arg logs $POD -c $CONTAINER +func getPodLog(ctx context.Context, podName, containerName, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "pods", podName, fmt.Sprintf("%s.log", containerName)), + "kubectl", "-n", namespace, "logs", podName, "-c", containerName, + ) +} + +// runs archiveFileFromCommand with arg logs --previous $POD -c $CONTAINER +func getPastPodLog(ctx context.Context, podName, containerName, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "pods", podName, fmt.Sprintf("prev-%s.log", containerName)), + "kubectl", "-n", namespace, "logs", "--previous", podName, "-c", containerName, + ) +} + +// runs archiveFileFromCommand with arg describe pod $POD +func getDescribe(ctx context.Context, podName, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "pods", podName, fmt.Sprintf("describe-%s.txt", podName)), + "kubectl", "-n", namespace, "describe", "pod", podName, + ) +} + +// runs archiveFileFromCommand with arg get pod $POD -o yaml +func getManifest(ctx context.Context, podName, namespace, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, "kubectl", "pods", podName, fmt.Sprintf("manifest-%s.yaml", podName)), + "kubectl", "-n", namespace, "get", "pod", podName, "-o", "yaml", + ) +} diff --git a/cmd/src/debug_server.go b/cmd/src/debug_server.go new file mode 100644 index 0000000000..4705ce2c1e --- /dev/null +++ b/cmd/src/debug_server.go @@ -0,0 +1,179 @@ +package main + +import ( + "archive/zip" + "context" + "flag" + "fmt" + "log" + "os" + "path/filepath" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + + "github.com/sourcegraph/sourcegraph/lib/errors" +) + +func init() { + usage := ` +'src debug server' invokes docker cli diagnostic commands targeting a Sourcegraph server container, +and writes an archive file from their returns. + +Usage: + + src debug server [command options] + +Flags: + + -o Specify the name of the output zip archive. + -no-config Don't include Sourcegraph configuration json. + +Examples: + + $ src debug server -c foo -o debug.zip + + $ src -v debug server --no-configs -c ViktorVaughn -o foo.zip + +` + + flagSet := flag.NewFlagSet("server", flag.ExitOnError) + var base string + var container string + var noConfigs bool + flagSet.StringVar(&base, "o", "debug.zip", "The name of the output zip archive") + flagSet.StringVar(&container, "c", "", "The container to target") + flagSet.BoolVar(&noConfigs, "no-configs", false, "If true include Sourcegraph configuration files. Default value true.") + + handler := func(args []string) error { + if err := flagSet.Parse(args); err != nil { + return err + } + + //process -o flag to get zipfile and base directory names, make sure container is targeted + if base == "" { + return fmt.Errorf("empty -o flag") + } + if container == "" { + return fmt.Errorf("empty -c flag, specify a container: src debug server -c foo") + } + base, baseDir := processBaseDir(base) + + // init context + ctx := context.Background() + // open pipe to output file + out, err := os.OpenFile(base, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) + if err != nil { + return errors.Wrapf(err, "failed to open file %q", base) + } + defer out.Close() + // init zip writer + zw := zip.NewWriter(out) + defer zw.Close() + + // Safety check user knows what they are targeting with this debug command + log.Printf("This command will archive docker-cli data for container: %s\n SRC_ENDPOINT: %s\n Output filename: %s", container, cfg.Endpoint, base) + if verified, _ := verify("Do you want to start writing to an archive?"); !verified { + return nil + } + + err = archiveServ(ctx, zw, *verbose, noConfigs, container, baseDir) + if err != nil { + return err + } + return nil + } + + debugCommands = append(debugCommands, &command{ + flagSet: flagSet, + handler: handler, + usageFunc: func() { + fmt.Println(usage) + }, + }) +} + +// Runs common docker cli commands on a single container +func archiveServ(ctx context.Context, zw *zip.Writer, verbose, noConfigs bool, container, baseDir string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // setup channel for slice of archive function outputs + ch := make(chan *archiveFile) + g, ctx := errgroup.WithContext(ctx) + semaphore := semaphore.NewWeighted(8) + + run := func(f func() *archiveFile) { + g.Go(func() error { + if err := semaphore.Acquire(ctx, 1); err != nil { + return err + } + defer semaphore.Release(1) + + if file := f(); file != nil { + ch <- file + } + + return nil + }) + } + + // start goroutine to run docker ps -o wide + run(func() *archiveFile { return getServLog(ctx, container, baseDir) }) + + // start goroutine to run docker ps -o wide + run(func() *archiveFile { return getServInspect(ctx, container, baseDir) }) + + // start goroutine to run docker ps -o wide + run(func() *archiveFile { return getServTop(ctx, container, baseDir) }) + + // start goroutine to get configs + if !noConfigs { + run(func() *archiveFile { return getExternalServicesConfig(ctx, baseDir) }) + + run(func() *archiveFile { return getSiteConfig(ctx, baseDir) }) + } + + // close channel when wait group goroutines have completed + go func() { + if err := g.Wait(); err != nil { + fmt.Printf("archiveServ failed to open wait group: %s\n", err) + os.Exit(1) + } + close(ch) + }() + + // Read binaries from channel and write to archive on host machine + if err := writeChannelContentsToZip(zw, ch, verbose); err != nil { + return errors.Wrap(err, "failed to write archives from channel") + } + + return nil +} + +// runs archiveFileFromCommand with args container logs $CONTAINER +func getServLog(ctx context.Context, container, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, fmt.Sprintf("%s.log", container)), + "docker", "container", "logs", container, + ) +} + +// runs archiveFileFromCommand with args container inspect $CONTAINER +func getServInspect(ctx context.Context, container, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, fmt.Sprintf("inspect-%s.txt", container)), + "docker", "container", "inspect", container, + ) +} + +// runs archiveFileFromCommand with args top $CONTAINER +func getServTop(ctx context.Context, container, baseDir string) *archiveFile { + return archiveFileFromCommand( + ctx, + filepath.Join(baseDir, fmt.Sprintf("top-%s.txt", container)), + "docker", "top", container, + ) +} diff --git a/go.mod b/go.mod index 0fc2782086..873cd0ac0d 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/sourcegraph/sourcegraph/lib v0.0.0-20220506010657-06e02488a3d7 github.com/stretchr/testify v1.7.1 golang.org/x/net v0.0.0-20220325170049-de3da57026de + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/protobuf v1.27.1 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b jaytaylor.com/html2text v0.0.0-20200412013138-3577fbdbcff7 @@ -56,7 +57,6 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect gopkg.in/yaml.v2 v2.4.0 // indirect