diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index b867470..8369a8e 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -54,4 +54,11 @@ jobs: run: | export CGO_ENABLED=0 go version - go run github.com/onsi/ginkgo/v2/ginkgo --skip-package=integration ./s3/... \ No newline at end of file + go run github.com/onsi/ginkgo/v2/ginkgo --skip-package=integration ./s3/... + + - name: common package unit tests + run: | + export CGO_ENABLED=0 + go version + go run github.com/onsi/ginkgo/v2/ginkgo ./common/... + \ No newline at end of file diff --git a/README.md b/README.md index 19f51dd..7e29589 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,8 @@ storage-cli -s -c [arguments] - `-s`: Storage provider type (azurebs|s3|gcs|alioss|dav) - `-c`: Path to provider-specific configuration file - `-v`: Show version +- `-log-file`: Path to log file (optional, logs to stderr by default) +- `-log-level`: Logging level: debug, info, warn, error (default: warn) **Common commands:** - `put ` - Upload a local file to remote storage @@ -57,7 +59,7 @@ storage-cli -s -c [arguments] - `copy ` - Copy object within the same storage - `sign ` - Generate signed URL (action: get|put, duration: e.g., 60s) - `properties ` - Display properties/metadata of a remote object -- `ensure-storage-exists` - Ensure the storage container/bucket exists +- `ensure-storage-exists` - Ensure the storage container/bucket exists, if not create the storage(bucket,container etc) **Examples:** ```shell @@ -75,6 +77,12 @@ storage-cli -s azurebs -c azure-config.json properties my-blob.txt # Sign object for 'get' in alioss for 60 seconds storage-cli -s alioss -c alioss-config.json sign object.txt get 60s + +# Upload file with debug logging to file +storage-cli -s s3 -c s3-config.json -log-level debug -log-file storage.log put local-file.txt remote-object.txt + +# List objects with error-level logging only +storage-cli -s gcs -c gcs-config.json -log-level error list my-prefix ``` ## Contributing diff --git a/alioss/client/client.go b/alioss/client/client.go index 644db96..414491f 100644 --- a/alioss/client/client.go +++ b/alioss/client/client.go @@ -5,7 +5,6 @@ import ( "encoding/base64" "fmt" "io" - "log" "os" "strings" "time" @@ -29,8 +28,6 @@ func (client *AliBlobstore) Put(sourceFilePath string, destinationObject string) if err != nil { return fmt.Errorf("upload failure: %w", err) } - - log.Println("Successfully uploaded file") return nil } diff --git a/alioss/client/storage_client.go b/alioss/client/storage_client.go index 0ada623..b6149da 100644 --- a/alioss/client/storage_client.go +++ b/alioss/client/storage_client.go @@ -4,13 +4,14 @@ import ( "encoding/json" "errors" "fmt" - "log" + "log/slog" "strconv" "strings" "time" "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/cloudfoundry/storage-cli/alioss/config" + "github.com/cloudfoundry/storage-cli/common" ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . StorageClient @@ -74,14 +75,20 @@ func NewStorageClient(storageConfig config.AliStorageConfig) (StorageClient, err }, nil } -func (dsc DefaultStorageClient) Upload( - sourceFilePath string, - sourceFileMD5 string, - destinationObject string, -) error { - log.Printf("Uploading %s/%s\n", dsc.storageConfig.BucketName, destinationObject) +func newOSSClient(endpoint, accesKeyID, accessKeySecret string) (*oss.Client, error) { + if common.IsDebug() { + slogLogger := slog.Default() + ossLogger := slog.NewLogLogger(slogLogger.Handler(), slog.LevelDebug) + return oss.New(endpoint, accesKeyID, accessKeySecret, oss.SetLogLevel(oss.Debug), oss.SetLogger(ossLogger)) + } else { + return oss.New(endpoint, accesKeyID, accessKeySecret) + } +} + +func (dsc DefaultStorageClient) Upload(sourceFilePath string, sourceFileMD5 string, destinationObject string) error { + slog.Info("Uploading object to OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", destinationObject, "file_path", sourceFilePath) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -94,13 +101,10 @@ func (dsc DefaultStorageClient) Upload( return bucket.PutObjectFromFile(destinationObject, sourceFilePath, oss.ContentMD5(sourceFileMD5)) } -func (dsc DefaultStorageClient) Download( - sourceObject string, - destinationFilePath string, -) error { - log.Printf("Downloading %s/%s\n", dsc.storageConfig.BucketName, sourceObject) +func (dsc DefaultStorageClient) Download(sourceObject string, destinationFilePath string) error { + slog.Info("Downloading object from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", sourceObject, "file_path", destinationFilePath) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -113,15 +117,12 @@ func (dsc DefaultStorageClient) Download( return bucket.GetObjectToFile(sourceObject, destinationFilePath) } -func (dsc DefaultStorageClient) Copy( - sourceObject string, - destinationObject string, -) error { - log.Printf("Copying object from %s to %s", sourceObject, destinationObject) +func (dsc DefaultStorageClient) Copy(sourceObject string, destinationObject string) error { + slog.Info("copying object within OSS bucket", "bucket", dsc.storageConfig.BucketName, "source_object", sourceObject, "destination_object", destinationObject) srcOut := fmt.Sprintf("%s/%s", dsc.storageConfig.BucketName, sourceObject) destOut := fmt.Sprintf("%s/%s", dsc.storageConfig.BucketName, destinationObject) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -138,12 +139,10 @@ func (dsc DefaultStorageClient) Copy( return nil } -func (dsc DefaultStorageClient) Delete( - object string, -) error { - log.Printf("Deleting %s/%s\n", dsc.storageConfig.BucketName, object) +func (dsc DefaultStorageClient) Delete(object string) error { + slog.Info("Deleting object from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -156,21 +155,14 @@ func (dsc DefaultStorageClient) Delete( return bucket.DeleteObject(object) } -func (dsc DefaultStorageClient) DeleteRecursive( - prefix string, -) error { +func (dsc DefaultStorageClient) DeleteRecursive(prefix string) error { if prefix != "" { - log.Printf("Deleting all objects in bucket %s with prefix '%s'\n", - dsc.storageConfig.BucketName, prefix) + slog.Info("Deleting all objects with prefix from OSS bucket", "bucket", dsc.storageConfig.BucketName, "prefix", prefix) } else { - log.Printf("Deleting all objects in bucket %s\n", dsc.storageConfig.BucketName) + slog.Info("Deleting all objects from OSS bucket", "bucket", dsc.storageConfig.BucketName) } - client, err := oss.New( - dsc.storageConfig.Endpoint, - dsc.storageConfig.AccessKeyID, - dsc.storageConfig.AccessKeySecret, - ) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -225,9 +217,9 @@ func (dsc DefaultStorageClient) DeleteRecursive( } func (dsc DefaultStorageClient) Exists(object string) (bool, error) { - log.Printf("Checking if blob: %s/%s\n", dsc.storageConfig.BucketName, object) + slog.Info("Checking if object exists in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return false, err } @@ -243,22 +235,18 @@ func (dsc DefaultStorageClient) Exists(object string) (bool, error) { } if objectExists { - log.Printf("File '%s' exists in bucket '%s'\n", object, dsc.storageConfig.BucketName) + slog.Info("Object exists in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object) return true, nil } else { - log.Printf("File '%s' does not exist in bucket '%s'\n", object, dsc.storageConfig.BucketName) + slog.Info("Object does not exist in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object) return false, nil } } -func (dsc DefaultStorageClient) SignedUrlPut( - object string, - expiredInSec int64, -) (string, error) { - - log.Printf("Getting signed PUT url for blob %s/%s\n", dsc.storageConfig.BucketName, object) +func (dsc DefaultStorageClient) SignedUrlPut(object string, expiredInSec int64) (string, error) { + slog.Info("Generating signed PUT URL for OSS object", "bucket", dsc.storageConfig.BucketName, "object_key", object, "expiration_seconds", expiredInSec) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return "", err } @@ -271,14 +259,10 @@ func (dsc DefaultStorageClient) SignedUrlPut( return bucket.SignURL(object, oss.HTTPPut, expiredInSec) } -func (dsc DefaultStorageClient) SignedUrlGet( - object string, - expiredInSec int64, -) (string, error) { - - log.Printf("Getting signed GET url for blob %s/%s\n", dsc.storageConfig.BucketName, object) +func (dsc DefaultStorageClient) SignedUrlGet(object string, expiredInSec int64) (string, error) { + slog.Info("Generating signed GET URL for OSS object", "bucket", dsc.storageConfig.BucketName, "object_key", object, "expiration_seconds", expiredInSec) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return "", err } @@ -291,14 +275,11 @@ func (dsc DefaultStorageClient) SignedUrlGet( return bucket.SignURL(object, oss.HTTPGet, expiredInSec) } -func (dsc DefaultStorageClient) List( - prefix string, -) ([]string, error) { +func (dsc DefaultStorageClient) List(prefix string) ([]string, error) { if prefix != "" { - log.Printf("Listing objects in bucket %s with prefix '%s'\n", - dsc.storageConfig.BucketName, prefix) + slog.Info("Listing all objects in OSS bucket with prefix", "bucket", dsc.storageConfig.BucketName, "prefix", prefix) } else { - log.Printf("Listing objects in bucket %s\n", dsc.storageConfig.BucketName) + slog.Info("Listing all objects in OSS bucket", "bucket", dsc.storageConfig.BucketName) } var ( @@ -315,7 +296,7 @@ func (dsc DefaultStorageClient) List( opts = append(opts, oss.Marker(marker)) } - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return nil, err } @@ -349,13 +330,10 @@ type BlobProperties struct { ContentLength int64 `json:"content_length,omitempty"` } -func (dsc DefaultStorageClient) Properties( - object string, -) error { - log.Printf("Getting properties for object %s/%s\n", - dsc.storageConfig.BucketName, object) +func (dsc DefaultStorageClient) Properties(object string) error { + slog.Info("Getting object properties from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -415,9 +393,9 @@ func (dsc DefaultStorageClient) Properties( } func (dsc DefaultStorageClient) EnsureBucketExists() error { - log.Printf("Ensuring bucket '%s' exists\n", dsc.storageConfig.BucketName) + slog.Info("Ensuring OSS bucket exists", "bucket", dsc.storageConfig.BucketName) - client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) + client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret) if err != nil { return err } @@ -428,7 +406,7 @@ func (dsc DefaultStorageClient) EnsureBucketExists() error { } if exists { - log.Printf("Bucket '%s' already exists\n", dsc.storageConfig.BucketName) + slog.Info("OSS bucket already exists", "bucket", dsc.storageConfig.BucketName) return nil } @@ -436,6 +414,6 @@ func (dsc DefaultStorageClient) EnsureBucketExists() error { return fmt.Errorf("failed to create bucket '%s': %w", dsc.storageConfig.BucketName, err) } - log.Printf("Bucket '%s' created successfully\n", dsc.storageConfig.BucketName) + slog.Info("OSS bucket created successfully", "bucket", dsc.storageConfig.BucketName) return nil } diff --git a/alioss/integration/general_ali_test.go b/alioss/integration/general_ali_test.go index 8a15d9f..5f59957 100644 --- a/alioss/integration/general_ali_test.go +++ b/alioss/integration/general_ali_test.go @@ -15,6 +15,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" ) var _ = Describe("General testing for all Ali regions", func() { @@ -50,8 +51,7 @@ var _ = Describe("General testing for all Ali regions", func() { cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - - Expect(string(cliSession.Err.Contents())).To(MatchRegexp("File '" + blobName + "' exists in bucket '" + bucketName + "'")) + Expect(cliSession.Err).Should(gbytes.Say(`"msg":"Object exists in OSS bucket"`)) }) It("overwrites an existing file", func() { diff --git a/alioss/integration/utils.go b/alioss/integration/utils.go index c32cb6f..b34f7e8 100644 --- a/alioss/integration/utils.go +++ b/alioss/integration/utils.go @@ -57,6 +57,8 @@ func RunCli(cliPath string, configPath string, storageType string, subcommand st configPath, "-s", storageType, + "-log-level", + "info", subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/azurebs/client/client.go b/azurebs/client/client.go index a40cb4b..caaa6f6 100644 --- a/azurebs/client/client.go +++ b/azurebs/client/client.go @@ -5,7 +5,7 @@ import ( "crypto/md5" "fmt" "io" - "log" + "log/slog" "os" "strings" "time" @@ -37,24 +37,24 @@ func (client *AzBlobstore) Put(sourceFilePath string, dest string) error { } if !bytes.Equal(sourceMD5, md5) { - log.Println("The upload failed because of an MD5 inconsistency. Triggering blob deletion ...") + slog.Error("Upload failed due to MD5 mismatch, deleting blob", "blob", dest, "expected_md5", fmt.Sprintf("%x", sourceMD5), "received_md5", fmt.Sprintf("%x", md5)) err := client.storageClient.Delete(dest) if err != nil { - log.Println(fmt.Errorf("blob deletion failed: %w", err)) - } + slog.Error("Failed to delete blob after MD5 mismatch", "blob", dest, "error", err) - return fmt.Errorf("the upload responded an MD5 %v does not match the source file MD5 %v", md5, sourceMD5) + } + return fmt.Errorf("MD5 mismatch: expected %x, got %x", sourceMD5, md5) } - log.Println("Successfully uploaded file") + slog.Debug("MD5 verification passed", "blob", dest, "md5", fmt.Sprintf("%x", md5)) return nil } func (client *AzBlobstore) Get(source string, dest string) error { dstFile, err := os.Create(dest) if err != nil { - log.Fatalln(err) + return fmt.Errorf("failed to create destination file: %w", err) } defer dstFile.Close() //nolint:errcheck diff --git a/azurebs/client/client_test.go b/azurebs/client/client_test.go index 851c6a9..0c86998 100644 --- a/azurebs/client/client_test.go +++ b/azurebs/client/client_test.go @@ -60,7 +60,7 @@ var _ = Describe("Client", func() { file, _ := os.CreateTemp("", "tmpfile") //nolint:errcheck putError := azBlobstore.Put(file.Name(), "target/blob") - Expect(putError.Error()).To(Equal("the upload responded an MD5 [1 2 3] does not match the source file MD5 [212 29 140 217 143 0 178 4 233 128 9 152 236 248 66 126]")) + Expect(putError.Error()).To(Equal("MD5 mismatch: expected d41d8cd98f00b204e9800998ecf8427e, got 010203")) Expect(storageClient.UploadCallCount()).To(Equal(1)) source, dest := storageClient.UploadArgsForCall(0) diff --git a/azurebs/client/storage_client.go b/azurebs/client/storage_client.go index 02f9f28..7ad72d6 100644 --- a/azurebs/client/storage_client.go +++ b/azurebs/client/storage_client.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" "io" - "log" + "log/slog" "os" "strconv" "strings" @@ -98,17 +98,18 @@ func (dsc DefaultStorageClient) Upload( timeoutInt, err := strconv.Atoi(dsc.storageConfig.Timeout) timeout := time.Duration(timeoutInt) * time.Second if timeout < 1 && err == nil { - log.Printf("Invalid time \"%s\", need at least 1 second", dsc.storageConfig.Timeout) + slog.Info("Invalid time, need at least 1 second", "timeout", dsc.storageConfig.Timeout) return nil, fmt.Errorf("invalid time: %w", err) } if err != nil { - log.Printf("Invalid timeout format \"%s\", need \"\" e.g. 30", dsc.storageConfig.Timeout) + slog.Info("Invalid timeout format, need seconds as number e.g. 30s", "timeout", dsc.storageConfig.Timeout) return nil, fmt.Errorf("invalid timeout format: %w", err) } - log.Println(fmt.Sprintf("Uploading %s with a timeout of %s", blobURL, timeout)) //nolint:staticcheck + slog.Info("Uploading blob to container", "container", dsc.storageConfig.ContainerName, "blob", dest, "url", blobURL, "timeout", timeout.String()) + ctx, cancel = context.WithTimeout(context.Background(), timeout) } else { - log.Println(fmt.Sprintf("Uploading %s with no timeout", blobURL)) //nolint:staticcheck + slog.Info("Uploading blob to container", "container", dsc.storageConfig.ContainerName, "blob", dest, "url", blobURL) ctx, cancel = context.WithCancel(context.Background()) } defer cancel() @@ -124,6 +125,8 @@ func (dsc DefaultStorageClient) Upload( } return nil, fmt.Errorf("upload failure: %w", err) } + + slog.Info("Successfully uploaded blob", "container", dsc.storageConfig.ContainerName, "blob", dest) return uploadResponse.ContentMD5, err } @@ -131,10 +134,8 @@ func (dsc DefaultStorageClient) Download( source string, dest *os.File, ) error { - blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, source) - - log.Println(fmt.Sprintf("Downloading %s", blobURL)) //nolint:staticcheck + slog.Info("Downloading blob from container", "container", dsc.storageConfig.ContainerName, "blob", source, "local_file", dest.Name()) client, err := blockblob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { return err @@ -149,7 +150,7 @@ func (dsc DefaultStorageClient) Download( return err } if blobSize != info.Size() { - log.Printf("Truncating file according to the blob size %v", blobSize) + slog.Debug("Truncating file to blob size", "blob_size", blobSize) dest.Truncate(blobSize) //nolint:errcheck } @@ -160,7 +161,7 @@ func (dsc DefaultStorageClient) Copy( srcBlob string, destBlob string, ) error { - log.Printf("Copying blob from %s to %s", srcBlob, destBlob) + slog.Info("Copying blob within container", "container", dsc.storageConfig.ContainerName, "source_blob", srcBlob, "dest_blob", destBlob) srcURL := fmt.Sprintf("%s/%s", dsc.serviceURL, srcBlob) destURL := fmt.Sprintf("%s/%s", dsc.serviceURL, destBlob) @@ -176,7 +177,7 @@ func (dsc DefaultStorageClient) Copy( } copyID := *resp.CopyID - log.Printf("Copy started with CopyID: %s", copyID) + slog.Debug("Copy started", "copy_id", copyID) // Wait for completion for { @@ -186,11 +187,11 @@ func (dsc DefaultStorageClient) Copy( } copyStatus := *props.CopyStatus - log.Printf("Copy status: %s", copyStatus) + slog.Debug("Copy status", "status", copyStatus) switch copyStatus { case "success": - log.Println("Copy completed successfully") + slog.Info("Copy completed successfully", "container", dsc.storageConfig.ContainerName, "source_blob", srcBlob, "dest_blob", destBlob) return nil case "pending": time.Sleep(200 * time.Millisecond) @@ -206,7 +207,7 @@ func (dsc DefaultStorageClient) Delete( blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, dest) - log.Println(fmt.Sprintf("Deleting %s", blobURL)) //nolint:staticcheck + slog.Info("Deleting blob from container", "container", dsc.storageConfig.ContainerName, "blob", dest, "url", blobURL) client, err := blockblob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { return err @@ -229,9 +230,9 @@ func (dsc DefaultStorageClient) DeleteRecursive( prefix string, ) error { if prefix != "" { - log.Printf("Deleting all blobs in container %s with prefix '%s'\n", dsc.storageConfig.ContainerName, prefix) + slog.Info("Deleting all blobs in container", "container", dsc.storageConfig.ContainerName, "prefix", prefix) } else { - log.Printf("Deleting all blobs in container %s\n", dsc.storageConfig.ContainerName) + slog.Info("Deleting all blobs in container", "container", dsc.storageConfig.ContainerName) } containerClient, err := azContainer.NewClientWithSharedKeyCredential(dsc.serviceURL, dsc.credential, nil) @@ -256,13 +257,13 @@ func (dsc DefaultStorageClient) DeleteRecursive( blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, *blob.Name) blobClient, err := blockblob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { - log.Printf("Failed to create blob client for %s: %v\n", *blob.Name, err) + slog.Error("Failed to create blob client", "blob", *blob.Name, "error", err) continue } _, err = blobClient.BlobClient().Delete(context.Background(), nil) if err != nil && !strings.Contains(err.Error(), "RESPONSE 404") { - log.Printf("Failed to delete blob %s: %v\n", *blob.Name, err) + slog.Error("Failed to delete blob", "blob", *blob.Name, "error", err) } } } @@ -276,7 +277,7 @@ func (dsc DefaultStorageClient) Exists( blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, dest) - log.Println(fmt.Sprintf("Checking if blob: %s exists", blobURL)) //nolint:staticcheck + slog.Info("Checking if blob exists", "container", dsc.storageConfig.ContainerName, "blob", dest, "url", blobURL) client, err := blockblob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { return false, err @@ -284,11 +285,11 @@ func (dsc DefaultStorageClient) Exists( _, err = client.BlobClient().GetProperties(context.Background(), nil) if err == nil { - log.Printf("File '%s' exists in bucket '%s'\n", dest, dsc.storageConfig.ContainerName) + slog.Info("Blob exists in container", "container", dsc.storageConfig.ContainerName, "blob", dest) return true, nil } if strings.Contains(err.Error(), "RESPONSE 404") { - log.Printf("File '%s' does not exist in bucket '%s'\n", dest, dsc.storageConfig.ContainerName) + slog.Info("Blob does not exist in container", "container", dsc.storageConfig.ContainerName, "blob", dest) return false, nil } @@ -303,7 +304,7 @@ func (dsc DefaultStorageClient) SignedUrl( blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, dest) - log.Println(fmt.Sprintf("Getting signed url for blob %s", blobURL)) //nolint:staticcheck + slog.Info("Generating SAS URL for blob", "container", dsc.storageConfig.ContainerName, "blob", dest, "request_type", requestType, "expiration", expiration) client, err := azBlob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { return "", err @@ -332,9 +333,9 @@ func (dsc DefaultStorageClient) List( ) ([]string, error) { if prefix != "" { - log.Println(fmt.Sprintf("Listing blobs in container %s with prefix '%s'", dsc.storageConfig.ContainerName, prefix)) //nolint:staticcheck + slog.Info("Listing blobs in container", "container", dsc.storageConfig.ContainerName, "prefix", prefix) } else { - log.Println(fmt.Sprintf("Listing blobs in container %s", dsc.storageConfig.ContainerName)) //nolint:staticcheck + slog.Info("Listing blobs in container", "container", dsc.storageConfig.ContainerName) } client, err := azContainer.NewClientWithSharedKeyCredential(dsc.serviceURL, dsc.credential, nil) @@ -375,7 +376,7 @@ func (dsc DefaultStorageClient) Properties( ) error { blobURL := fmt.Sprintf("%s/%s", dsc.serviceURL, dest) - log.Println(fmt.Sprintf("Getting properties for blob %s", blobURL)) //nolint:staticcheck + slog.Info("Getting properties for blob", "container", dsc.storageConfig.ContainerName, "blob", dest, "url", blobURL) client, err := blockblob.NewClientWithSharedKeyCredential(blobURL, dsc.credential, nil) if err != nil { return err @@ -406,7 +407,7 @@ func (dsc DefaultStorageClient) Properties( } func (dsc DefaultStorageClient) EnsureContainerExists() error { - log.Printf("Ensuring container '%s' exists\n", dsc.storageConfig.ContainerName) + slog.Info("Ensuring container exists", "container", dsc.storageConfig.ContainerName) containerClient, err := azContainer.NewClientWithSharedKeyCredential(dsc.serviceURL, dsc.credential, nil) if err != nil { @@ -417,12 +418,12 @@ func (dsc DefaultStorageClient) EnsureContainerExists() error { if err != nil { var respErr *azcore.ResponseError if errors.As(err, &respErr) && respErr.ErrorCode == string(bloberror.ContainerAlreadyExists) { - log.Printf("Container '%s' already exists", dsc.storageConfig.ContainerName) + slog.Info("Container already exists", "container", dsc.storageConfig.ContainerName) return nil } return fmt.Errorf("failed to create container: %w", err) } - log.Printf("Container '%s' created successfully", dsc.storageConfig.ContainerName) + slog.Info("Container created successfully", "container", dsc.storageConfig.ContainerName) return nil } diff --git a/azurebs/integration/assertions.go b/azurebs/integration/assertions.go index 003c44a..4b98d77 100644 --- a/azurebs/integration/assertions.go +++ b/azurebs/integration/assertions.go @@ -6,6 +6,7 @@ import ( "github.com/cloudfoundry/storage-cli/azurebs/config" . "github.com/onsi/gomega" //nolint:staticcheck + "github.com/onsi/gomega/gbytes" ) var storageType = "azurebs" @@ -23,8 +24,8 @@ func AssertPutUsesNoTimeout(cliPath string, cfg *config.AZStorageConfig) { sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) - Expect(string(sess.Err.Contents())).To(ContainSubstring("Uploading ")) // stderr has log.Println - Expect(string(sess.Err.Contents())).To(ContainSubstring("with no timeout")) + Expect(sess.Err).Should(gbytes.Say(`"msg":"Uploading blob to container`)) + Expect(sess.Err).ShouldNot(gbytes.Say(`"timeout":"*"`)) sess, err = RunCli(cliPath, configPath, storageType, "delete", blob) Expect(err).ToNot(HaveOccurred()) @@ -44,7 +45,8 @@ func AssertPutHonorsCustomTimeout(cliPath string, cfg *config.AZStorageConfig) { sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) - Expect(string(sess.Err.Contents())).To(ContainSubstring("with a timeout of 3s")) + Expect(sess.Err).Should(gbytes.Say(`"msg":"Uploading blob to container`)) + Expect(sess.Err).Should(gbytes.Say(`"timeout":"3s"`)) sess, err = RunCli(cliPath, configPath, storageType, "delete", blob) Expect(err).ToNot(HaveOccurred()) @@ -82,7 +84,7 @@ func AssertInvalidTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) - Expect(string(sess.Err.Contents())).To(ContainSubstring(`Invalid timeout format "bananas"`)) + Expect(sess.Err).Should(gbytes.Say(`"error":"upload failure: invalid timeout format: strconv.Atoi: parsing \\"bananas\\": invalid syntax"`)) } func AssertZeroTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { @@ -98,8 +100,7 @@ func AssertZeroTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) - - Expect(string(sess.Err.Contents())).To(ContainSubstring(`Invalid time "0", need at least 1 second`)) + Expect(sess.Err).Should(gbytes.Say(`"msg":"Invalid time, need at least 1 second"`)) } func AssertNegativeTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { @@ -116,7 +117,7 @@ func AssertNegativeTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) - Expect(string(sess.Err.Contents())).To(ContainSubstring(`Invalid time "-1", need at least 1 second`)) + Expect(sess.Err).Should(gbytes.Say(`"msg":"Invalid time, need at least 1 second"`)) } func AssertSignedURLTimeouts(cliPath string, cfg *config.AZStorageConfig) { @@ -198,7 +199,7 @@ func AssertLifecycleWorks(cliPath string, cfg *config.AZStorageConfig) { cliSession, err = RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - Expect(cliSession.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) + Expect(cliSession.Err).Should(gbytes.Say(`"msg":"Blob exists in container"`)) // Check blob properties cliSession, err = RunCli(cliPath, configPath, storageType, "properties", blobName) @@ -230,7 +231,7 @@ func AssertLifecycleWorks(cliPath string, cfg *config.AZStorageConfig) { cliSession, err = RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) - Expect(cliSession.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) + Expect(cliSession.Err).Should(gbytes.Say(`"msg":"Blob does not exist in container"`)) cliSession, err = RunCli(cliPath, configPath, storageType, "properties", blobName) Expect(err).ToNot(HaveOccurred()) diff --git a/azurebs/integration/general_azure_test.go b/azurebs/integration/general_azure_test.go index c0ffa1d..b8f0d84 100644 --- a/azurebs/integration/general_azure_test.go +++ b/azurebs/integration/general_azure_test.go @@ -9,6 +9,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" ) var _ = Describe("General testing for all Azure regions", func() { @@ -126,7 +127,7 @@ var _ = Describe("General testing for all Azure regions", func() { cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - Expect(string(cliSession.Err.Contents())).To(MatchRegexp("File '" + blobName + "' exists in bucket '" + defaultConfig.ContainerName + "'")) + Expect(cliSession.Err).Should(gbytes.Say(`"msg":"Blob exists in container"`)) }) It("overwrites an existing file", func() { diff --git a/azurebs/integration/utils.go b/azurebs/integration/utils.go index c845ff1..04a3b8e 100644 --- a/azurebs/integration/utils.go +++ b/azurebs/integration/utils.go @@ -57,6 +57,8 @@ func RunCli(cliPath string, configPath string, storageType string, subcommand st configPath, "-s", storageType, + "-log-level", + "info", subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/common/common_suite_test.go b/common/common_suite_test.go new file mode 100644 index 0000000..4069a9b --- /dev/null +++ b/common/common_suite_test.go @@ -0,0 +1,13 @@ +package common + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCommon(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Common") +} diff --git a/common/config.go b/common/config.go new file mode 100644 index 0000000..5f29e55 --- /dev/null +++ b/common/config.go @@ -0,0 +1,35 @@ +package common + +import ( + "log/slog" + "sync" +) + +type Config struct { + LogLevel slog.Level +} + +var ( + instance *Config + once sync.Once +) + +func InitConfig(logLevel slog.Level) { + once.Do(func() { + instance = &Config{LogLevel: logLevel} + }) +} + +func GetConfig() *Config { + return instance +} + +func IsDebug() bool { + if instance == nil { + return false + } + if instance.LogLevel == slog.LevelDebug { + return true + } + return false +} diff --git a/common/config_test.go b/common/config_test.go new file mode 100644 index 0000000..ea1c00f --- /dev/null +++ b/common/config_test.go @@ -0,0 +1,59 @@ +package common + +import ( + "log/slog" + "sync" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Storage-CLI Config", func() { + Context("when not initialized", func() { + It("IsDebug returns false", func() { + Expect(IsDebug()).To(BeFalse()) + }) + + It("Config is nil", func() { + Expect(GetConfig()).To(BeNil()) + }) + }) + + Context("when initialized with 'debug' level", func() { + BeforeEach(func() { + InitConfig(slog.LevelDebug) + }) + + It("IsDebug returns true", func() { + Expect(IsDebug()).To(BeTrue()) + }) + + It("Config is not nil", func() { + Expect(GetConfig()).ToNot(BeNil()) + }) + + AfterEach(func() { + instance = nil + once = sync.Once{} + }) + }) + + Context("when initialized with 'info' level", func() { + BeforeEach(func() { + InitConfig(slog.LevelInfo) + }) + + It("IsDebug returns false", func() { + Expect(IsDebug()).To(BeFalse()) + }) + + It("Config is not nil", func() { + Expect(GetConfig()).ToNot(BeNil()) + }) + + AfterEach(func() { + instance = nil + once = sync.Once{} + }) + }) +}) diff --git a/gcs/client/client.go b/gcs/client/client.go index 0d550b4..0970b75 100644 --- a/gcs/client/client.go +++ b/gcs/client/client.go @@ -22,7 +22,7 @@ import ( "errors" "fmt" "io" - "log" + "log/slog" "os" "strings" "sync" @@ -105,7 +105,7 @@ func New(ctx context.Context, cfg *config.GCSCli) (*GCSBlobstore, error) { // Get fetches a blob from the GCS blobstore. // Destination will be overwritten if it already exists. func (client *GCSBlobstore) Get(src string, dest string) error { - log.Printf("Getting object from '%s/%s' into file '%s'\n", client.config.BucketName, src, dest) + slog.Info("Getting object into file", "bucket", client.config.BucketName, "object_name", src, "local_path", dest) dstFile, err := os.Create(dest) if err != nil { @@ -139,7 +139,7 @@ func (client *GCSBlobstore) getReader(gcs *storage.Client, src string) (*storage const retryAttempts = 3 func (client *GCSBlobstore) Put(sourceFilePath string, dest string) error { - log.Printf("Putting file '%s' into '%s/%s'\n", sourceFilePath, client.config.BucketName, dest) + slog.Info("Putting file into object", "bucket", client.config.BucketName, "local_path", sourceFilePath, "object_name", dest) src, err := os.Open(sourceFilePath) if err != nil { @@ -168,7 +168,7 @@ func (client *GCSBlobstore) Put(sourceFilePath string, dest string) error { } errs = append(errs, err) - log.Printf("upload failed for %s, attempt %d/%d: %v\n", dest, i+1, retryAttempts, err) + slog.Error("Upload failed", "object_name", dest, "attempt", fmt.Sprintf("%d/%d", i+1, retryAttempts), "error", err) if _, err := src.Seek(pos, io.SeekStart); err != nil { return fmt.Errorf("restting buffer position after failed upload: %v", err) @@ -197,7 +197,7 @@ func (client *GCSBlobstore) putOnce(src io.ReadSeeker, dest string) error { // // If the object does not exist, Delete returns a nil error. func (client *GCSBlobstore) Delete(dest string) error { - log.Printf("Deleting object '%s' in bucket '%s' \n", dest, client.config.BucketName) + slog.Info("Deleting object in bucket", "bucket", client.config.BucketName, "object_name", dest) if client.readOnly() { return ErrInvalidROWriteOperation @@ -212,7 +212,7 @@ func (client *GCSBlobstore) Delete(dest string) error { // Exists checks if a blob exists in the GCS blobstore. func (client *GCSBlobstore) Exists(dest string) (exists bool, err error) { - log.Printf("Checking object '%s' exist in bucket '%s'\n", dest, client.config.BucketName) + slog.Info("Checking object exists in bucket", "bucket", client.config.BucketName, "object_name", dest) if exists, err = client.exists(client.publicGCS, dest); err == nil { return exists, nil @@ -229,10 +229,10 @@ func (client *GCSBlobstore) Exists(dest string) (exists bool, err error) { func (client *GCSBlobstore) exists(gcs *storage.Client, dest string) (bool, error) { _, err := client.getObjectHandle(gcs, dest).Attrs(context.Background()) if err == nil { - log.Printf("File '%s' exists in bucket '%s'\n", dest, client.config.BucketName) + slog.Info("Object exists in bucket", "bucket", client.config.BucketName, "object_name", dest) return true, nil } else if errors.Is(err, storage.ErrObjectNotExist) { - log.Printf("File '%s' does not exist in bucket '%s'\n", dest, client.config.BucketName) + slog.Info("Object does not exist in bucket", "bucket", client.config.BucketName, "object_name", dest) return false, nil } return false, err @@ -243,7 +243,7 @@ func (client *GCSBlobstore) readOnly() bool { } func (client *GCSBlobstore) Sign(id string, action string, expiry time.Duration) (string, error) { - log.Printf("Signing object '%s' with method '%s' for '%s'\n", id, action, expiry.String()) + slog.Info("Signing object", "bucket", client.config.BucketName, "object_name", id, "method", action, "expiration", expiry.String()) action = strings.ToUpper(action) token, err := google.JWTConfigFromJSON([]byte(client.config.ServiceAccountFile), storage.ScopeFullControl) @@ -273,9 +273,9 @@ func (client *GCSBlobstore) Sign(id string, action string, expiry time.Duration) func (client *GCSBlobstore) List(prefix string) ([]string, error) { if prefix != "" { - log.Printf("Listing objects in bucket '%s' with prefix '%s'\n", client.config.BucketName, prefix) + slog.Info("Listing all objects in bucket", "bucket", client.config.BucketName, "prefix", prefix) } else { - log.Printf("Listing objects in bucket '%s'\n", client.config.BucketName) + slog.Info("Listing all objects in bucket", "bucket", client.config.BucketName) } if client.readOnly() { return nil, ErrInvalidROWriteOperation @@ -304,7 +304,8 @@ func (client *GCSBlobstore) List(prefix string) ([]string, error) { } func (client *GCSBlobstore) Copy(srcBlob string, dstBlob string) error { - log.Printf("Copying an object from '%s/%s' to '%s/%s'\n", srcBlob, client.config.BucketName, dstBlob, client.config.BucketName) + slog.Info("Copying object", "bucket", client.config.BucketName, "source_object", srcBlob, "destination_object", dstBlob) + if client.readOnly() { return ErrInvalidROWriteOperation } @@ -320,7 +321,8 @@ func (client *GCSBlobstore) Copy(srcBlob string, dstBlob string) error { } func (client *GCSBlobstore) Properties(dest string) error { - log.Printf("Getting properties for object '%s' in bucket '%s'\n", dest, client.config.BucketName) + slog.Info("Getting properties for object", "bucket", client.config.BucketName, "object_name", dest) + if client.readOnly() { return ErrInvalidROWriteOperation } @@ -351,7 +353,8 @@ func (client *GCSBlobstore) Properties(dest string) error { } func (client *GCSBlobstore) EnsureStorageExists() error { - log.Printf("Ensuring bucket '%s' exists\n", client.config.BucketName) + slog.Info("Ensuring bucket exists", "bucket", client.config.BucketName) + if client.readOnly() { return ErrInvalidROWriteOperation } @@ -385,10 +388,9 @@ func (client *GCSBlobstore) EnsureStorageExists() error { func (client *GCSBlobstore) DeleteRecursive(prefix string) error { if prefix != "" { - log.Printf("Deleting all objects in bucket '%s' with prefix '%s'\n", - client.config.BucketName, prefix) + slog.Info("Deleting all the objects in bucket", "bucket", client.config.BucketName, "prefix", prefix) } else { - log.Printf("Deleting all objects in bucket '%s'\n", client.config.BucketName) + slog.Info("Deleting all the objects in bucket", "bucket", client.config.BucketName) } if client.readOnly() { diff --git a/gcs/client/middleware/http_logger.go b/gcs/client/middleware/http_logger.go new file mode 100644 index 0000000..bf95269 --- /dev/null +++ b/gcs/client/middleware/http_logger.go @@ -0,0 +1,47 @@ +package middleware + +import ( + "log/slog" + "net/http" + "time" +) + +type roundTripperFunc func(req *http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} + +func NewLoggingTransport(base http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(req *http.Request) (*http.Response, error) { + slog.Debug("http request", + "method", req.Method, + "url", req.URL.String(), + "headers", req.Header, + "host", req.Host, + "content_lenght", req.ContentLength) + + start := time.Now() + resp, err := base.RoundTrip(req) + duration := time.Since(start) + + if err != nil { + slog.Error("http request failed", + "method", req.Method, + "url", req.URL.String(), + "duration_ms", duration.Milliseconds(), + "error", err) + return resp, err + } + if resp != nil { + slog.Debug("http response", + "method", req.Method, + "url", req.URL.String(), + "status_code", resp.StatusCode, + "content_length", resp.ContentLength, + "duration_ms", duration.Milliseconds()) + } + return resp, err + }) + +} diff --git a/gcs/client/middleware/http_logger_test.go b/gcs/client/middleware/http_logger_test.go new file mode 100644 index 0000000..aadb40a --- /dev/null +++ b/gcs/client/middleware/http_logger_test.go @@ -0,0 +1,82 @@ +package middleware + +import ( + "bytes" + "errors" + "io" + "log/slog" + "net/http" + "net/http/httptest" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("HttpLogger", func() { + var buf bytes.Buffer + BeforeEach(func() { + buf.Reset() + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + slog.SetDefault(logger) + }) + + Context("when transport returns response,", func() { + It("log with 'http response' message", func() { + mockTransport := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader("OK")), + Header: http.Header{"Content-Type": []string{"text/plain"}}, + }, nil + }) + loggingTransport := NewLoggingTransport(mockTransport) + req := httptest.NewRequest("GET", "http://example.com/test", nil) + _, _ = loggingTransport.RoundTrip(req) //nolint:errcheck + logs := buf.String() + Expect(logs).To(ContainSubstring(`"msg":"http response"`)) + Expect(logs).To(ContainSubstring(`"method":"GET"`)) + Expect(logs).To(ContainSubstring(`"status_code":200`)) + }) + }) + + Context("when transport returns error,", func() { + It("log with 'http request failed' message", func() { + + hostNotFound := errors.New("dial tcp: lookup example.com: no such host") + + mockTransport := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return nil, hostNotFound + }) + loggingTransport := NewLoggingTransport(mockTransport) + req := httptest.NewRequest("GET", "http://example.com/test", nil) + _, _ = loggingTransport.RoundTrip(req) //nolint:errcheck + logs := buf.String() + Expect(logs).To(ContainSubstring(`"msg":"http request failed"`)) + Expect(logs).To(ContainSubstring(`"method":"GET"`)) + Expect(logs).To(ContainSubstring(`"error"`)) + Expect(logs).To(ContainSubstring("no such host")) + }) + }) + + Context("when transport make request,", func() { + It("log with 'http request' message always", func() { + mockTransport := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader("OK")), + Header: http.Header{"Content-Type": []string{"text/plain"}}, + }, nil + }) + loggingTransport := NewLoggingTransport(mockTransport) + req := httptest.NewRequest("GET", "http://example.com/test", nil) + _, _ = loggingTransport.RoundTrip(req) //nolint:errcheck + logs := buf.String() + Expect(logs).To(ContainSubstring(`"msg":"http request"`)) + Expect(logs).To(ContainSubstring(`"method":"GET"`)) + Expect(logs).To(ContainSubstring(`"url":"http://example.com/test"`)) + Expect(logs).To(ContainSubstring(`"host":"example.com"`)) + }) + }) + +}) diff --git a/gcs/client/middleware/middleware_suite_test.go b/gcs/client/middleware/middleware_suite_test.go new file mode 100644 index 0000000..7007df9 --- /dev/null +++ b/gcs/client/middleware/middleware_suite_test.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMiddleware(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "GCS Middleware") +} diff --git a/gcs/client/sdk.go b/gcs/client/sdk.go index 4eec4fa..e1e155b 100644 --- a/gcs/client/sdk.go +++ b/gcs/client/sdk.go @@ -22,13 +22,17 @@ import ( "errors" "fmt" + "golang.org/x/oauth2" "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" "google.golang.org/api/option" "net/http" "cloud.google.com/go/storage" + "github.com/cloudfoundry/storage-cli/common" + "github.com/cloudfoundry/storage-cli/gcs/client/middleware" "github.com/cloudfoundry/storage-cli/gcs/config" ) @@ -37,17 +41,38 @@ const uaString = "storage-cli-gcs" func newStorageClients(ctx context.Context, cfg *config.GCSCli) (*storage.Client, *storage.Client, error) { publicClient, err := storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithHTTPClient(http.DefaultClient)) var authenticatedClient *storage.Client + var tokenSource oauth2.TokenSource + var token *jwt.Config switch cfg.CredentialsSource { case config.NoneCredentialsSource: - // no-op + if common.IsDebug() { + httpClient := &http.Client{ + Transport: middleware.NewLoggingTransport(http.DefaultTransport), + } + publicClient, err = storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithHTTPClient(httpClient)) + } case config.DefaultCredentialsSource: - if tokenSource, err := google.DefaultTokenSource(ctx, storage.ScopeFullControl); err == nil { - authenticatedClient, err = storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithTokenSource(tokenSource)) //nolint:ineffassign,staticcheck + if tokenSource, err = google.DefaultTokenSource(ctx, storage.ScopeFullControl); err == nil { + if common.IsDebug() { + baseClient := oauth2.NewClient(ctx, tokenSource) + baseClient.Transport = middleware.NewLoggingTransport(baseClient.Transport) + authenticatedClient, err = storage.NewClient(ctx, option.WithHTTPClient(baseClient), option.WithUserAgent(uaString)) + + } else { + authenticatedClient, err = storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithTokenSource(tokenSource)) //nolint:ineffassign,staticcheck + } } case config.ServiceAccountFileCredentialsSource: - if token, err := google.JWTConfigFromJSON([]byte(cfg.ServiceAccountFile), storage.ScopeFullControl); err == nil { - authenticatedClient, err = storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithTokenSource(token.TokenSource(ctx))) //nolint:ineffassign,staticcheck + if token, err = google.JWTConfigFromJSON([]byte(cfg.ServiceAccountFile), storage.ScopeFullControl); err == nil { + if common.IsDebug() { + tokenSource := token.TokenSource(ctx) + baseClient := oauth2.NewClient(ctx, tokenSource) + baseClient.Transport = middleware.NewLoggingTransport(baseClient.Transport) + authenticatedClient, err = storage.NewClient(ctx, option.WithHTTPClient(baseClient), option.WithUserAgent(uaString)) + } else { + authenticatedClient, err = storage.NewClient(ctx, option.WithUserAgent(uaString), option.WithTokenSource(token.TokenSource(ctx))) //nolint:ineffassign,staticcheck + } } default: return nil, nil, errors.New("unknown credentials_source in configuration") diff --git a/gcs/integration/assertions.go b/gcs/integration/assertions.go index 8f476c4..fc64be6 100644 --- a/gcs/integration/assertions.go +++ b/gcs/integration/assertions.go @@ -45,7 +45,7 @@ func AssertLifecycleWorks(gcsCLIPath string, ctx AssertContext) { session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) - Expect(session.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) + Expect(session.Err.Contents()).To(MatchRegexp("Object exists in bucket")) tmpLocalFileName := "gcscli-download" defer os.Remove(tmpLocalFileName) //nolint:errcheck @@ -65,7 +65,7 @@ func AssertLifecycleWorks(gcsCLIPath string, ctx AssertContext) { session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(Equal(3)) - Expect(session.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) + Expect(session.Err.Contents()).To(MatchRegexp("Object does not exist in bucket")) } func AssertDeleteRecursiveWithPrefixLifecycle(gcsCLIPath string, ctx AssertContext) { @@ -101,16 +101,17 @@ func AssertDeleteRecursiveWithPrefixLifecycle(gcsCLIPath string, ctx AssertConte session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", dstObject3) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) + Expect(session.Err.Contents()).To(MatchRegexp("Object exists in bucket")) session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", dstObject1) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(Equal(3)) - Expect(session.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) + Expect(session.Err.Contents()).To(MatchRegexp("Object does not exist in bucket")) session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", dstObject1) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(Equal(3)) - Expect(session.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) + Expect(session.Err.Contents()).To(MatchRegexp("Object does not exist in bucket")) //cleanup artifact session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "delete", dstObject3) diff --git a/gcs/integration/gcs_public_test.go b/gcs/integration/gcs_public_test.go index 5efbc5f..0dfddc7 100644 --- a/gcs/integration/gcs_public_test.go +++ b/gcs/integration/gcs_public_test.go @@ -93,7 +93,7 @@ var _ = Describe("GCS Public Bucket", func() { session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "get", setupEnv.GCSFileName, "/dev/null") Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).ToNot(BeZero()) - Expect(string(session.Err.Contents())).To(ContainSubstring("object doesn't exist")) + Expect(session.Err.Contents()).To(MatchRegexp("Object does not exist in bucket")) }) It("fails to put", func() { diff --git a/gcs/integration/utils.go b/gcs/integration/utils.go index 5c88153..d255d40 100644 --- a/gcs/integration/utils.go +++ b/gcs/integration/utils.go @@ -90,6 +90,8 @@ func RunGCSCLI(gcsCLIPath, configPath, storageType, subcommand string, args ...s configPath, "-s", storageType, + "-log-level", + "info", subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/main.go b/main.go index 0b76400..fb5c89a 100644 --- a/main.go +++ b/main.go @@ -1,11 +1,16 @@ package main import ( + "errors" "flag" "fmt" + "io" "log" + "log/slog" "os" + "path/filepath" + "github.com/cloudfoundry/storage-cli/common" storage "github.com/cloudfoundry/storage-cli/storage" ) @@ -18,18 +23,64 @@ func fatalLog(cmd string, err error) { // If the object exists the exit status is 0, otherwise it is 3 // We are using `3` since `1` and `2` have special meanings if _, ok := err.(*storage.NotExistsError); ok { - log.Printf("performing operation %s: %s\n", cmd, err) + slog.Error("performing operation", "command", cmd, "error", err) os.Exit(3) } - log.Fatalf("performing operation %s: %s\n", cmd, err) + slog.Error("performing operation", "command", cmd, "error", err) + os.Exit(1) } +// first, create path if not exist, +// then open/create file and return file pointer +func createOrUseProvided(logFile string) *os.File { + if _, err := os.Stat(logFile); os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(logFile), 0755); err != nil { + log.Fatalf("failed to create directory: %v", err) + } + } + + f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatalf("failed to open file: %v", err) + } + return f +} + +func parseLogLevel(logLevel string) slog.Level { + switch logLevel { + case "debug": + return slog.LevelDebug + case "info": + return slog.LevelInfo + case "warn", "warning": + return slog.LevelWarn + case "error": + return slog.LevelError + default: + return slog.LevelWarn + } + +} + +// Configure slog to be json formated, set log level and +// stream to file if provided, by default it streams to os.Stderr +func configureSlog(m io.Writer, logLevel string) { + level := parseLogLevel(logLevel) + + hOpt := &slog.HandlerOptions{Level: level} + + logger := slog.New(slog.NewJSONHandler(m, hOpt)) + slog.SetDefault(logger) +} + func main() { configPath := flag.String("c", "", "configuration path") showVer := flag.Bool("v", false, "version") - storageType := flag.String("s", "s3", "storage type: azurebs|alioss|s3|gcs|dav") + storageType := flag.String("s", "", "storage type: azurebs|alioss|s3|gcs|dav") + logFile := flag.String("log-file", "", "optional file with full path to write logs(if not specified log to os.Stderr, default behavior)") + logLevel := flag.String("log-level", "warn", "log level: debug|info|warn|error") flag.Parse() if *showVer { @@ -37,24 +88,41 @@ func main() { os.Exit(0) } + // configure slog + writers := []io.Writer{os.Stderr} + if *logFile != "" { + f := createOrUseProvided(*logFile) + defer f.Close() //nolint:errcheck + writers = append(writers, f) + } + configureSlog(io.MultiWriter(writers...), *logLevel) + + // configure storage-cli config + common.InitConfig(parseLogLevel(*logLevel)) + + // check client config file exists configFile, err := os.Open(*configPath) if err != nil { - log.Fatalln(err) + fatalLog("", err) } defer configFile.Close() //nolint:errcheck + // create client client, err := storage.NewStorageClient(*storageType, configFile) if err != nil { - log.Fatalln(err) + fatalLog("", err) } + // inject client into executor cex := storage.NewCommandExecuter(client) + // simple check for any command nonFlagArgs := flag.Args() if len(nonFlagArgs) < 1 { - log.Fatalf("Expected at least 1 argument (command) got 0") + fatalLog("", errors.New("expected at least 1 argument (command) got 0")) } + // execute command cmd := nonFlagArgs[0] err = cex.Execute(cmd, nonFlagArgs[1:]) fatalLog(cmd, err)