diff --git a/.dockerignore b/.dockerignore index dad57d48e7e3d..b23ffaa2f0c52 100644 --- a/.dockerignore +++ b/.dockerignore @@ -13,3 +13,4 @@ !vendor !8.6-compose.repo !thirdparty +!patch \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 58c20c8d8e3df..cd8805859ac88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -524,8 +524,6 @@ dependencies = [ [[package]] name = "aws-config" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da63196d2d0dd38667b404459a35d32562a8d83c1f46c5b789ab89ab176fd53" dependencies = [ "aws-http", "aws-sdk-sso", @@ -541,7 +539,7 @@ dependencies = [ "hex", "http", "hyper", - "ring", + "openssl", "tokio", "tower", "tracing 0.1.32", @@ -796,8 +794,6 @@ dependencies = [ [[package]] name = "aws-sigv4" version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea07a5a108ee538793d681d608057218df95c5575f6c0699a1973c27a09334b2" dependencies = [ "aws-smithy-eventstream", "aws-smithy-http", @@ -806,9 +802,9 @@ dependencies = [ "hex", "http", "once_cell", + "openssl", "percent-encoding", "regex", - "ring", "time 0.3.7", "tracing 0.1.32", ] @@ -840,8 +836,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", - "lazy_static", + "hyper-tls", "pin-project 1.0.10", "pin-project-lite", "tokio", @@ -877,8 +872,6 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project 1.0.10", - "tokio", - "tokio-util 0.6.8", "tracing 0.1.32", ] @@ -5107,15 +5100,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-src" -version = "111.18.0+1.1.1n" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7897a926e1e8d00219127dc020130eca4292e5ca666dd592480d72c3eca2ff6c" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.72" @@ -5125,7 +5109,6 @@ dependencies = [ "autocfg", "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index 3bcac1c08ef1d..8981837bde707 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -140,17 +140,17 @@ metrics = { version = "0.17.1", default-features = false, features = ["std"] } metrics-tracing-context = { version = "0.9.0", default-features = false } # AWS - Official SDK -aws-config = { version = "0.9.0", optional = true } +aws-config = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } aws-types = { version = "0.9.0", optional = true, features = ["hardcoded-credentials"]} -aws-sdk-s3 = { version = "0.9.0", optional = true } -aws-sdk-sqs = { version = "0.9.0", optional = true } -aws-sdk-cloudwatch = { version = "0.9.0", optional = true } -aws-sdk-cloudwatchlogs = { version = "0.9.0", optional = true } -aws-sdk-elasticsearch = {version = "0.9.0", optional = true } -aws-sdk-firehose = { version = "0.9.0", optional = true } -aws-sdk-kinesis = { version = "0.9.0", optional = true } -aws-sigv4 = { version = "0.9.0", optional = true } -aws-smithy-client = { version = "0.39.0", optional = true } +aws-sdk-s3 = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-sqs = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-cloudwatch = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-cloudwatchlogs = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-elasticsearch = {version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-firehose = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sdk-kinesis = { version = "0.9.0", optional = true, default-features = false, features = ["native-tls"] } +aws-sigv4 = { version = "0.9.0", optional = true, features = ["openssl"] } +aws-smithy-client = { version = "0.39.0", optional = true, default-features = false, features = ["native-tls"] } aws-smithy-http = { version = "0.39.0", optional = true } # Azure @@ -208,7 +208,7 @@ async-compression = { version = "0.3.12", default-features = false, features = [ avro-rs = { version = "0.13.0", default-features = false, optional = true } base64 = { version = "0.13.0", default-features = false, optional = true } bloom = { version = "0.3.2", default-features = false, optional = true } -bollard = { version = "0.11.1", default-features = false, features = ["ssl"] } +bollard = { version = "0.11.1", default-features = false, features = ["ssl"], optional = true } bytes = { version = "1.1.0", default-features = false, features = ["serde"] } bytesize = { version = "1.1.0", default-features = false } chrono = { version = "0.4.19", default-features = false, features = ["serde"] } @@ -251,7 +251,7 @@ nom = { version = "7.1.1", default-features = false, optional = true } notify = { version = "4.0.17", default-features = false } num_cpus = { version = "1.13.1", default-features = false } once_cell = { version = "1.10", default-features = false } -openssl = { version = "0.10.38", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.38", default-features = false} openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "2.10.0", default-features = false } percent-encoding = { version = "2.1.0", default-features = false } @@ -334,6 +334,8 @@ leveldb-sys = { git = "https://github.com/vectordotdev/leveldb-sys.git", branch # Removes dependency on `time` v0.1 # https://github.com/chronotope/chrono/pull/578 chrono = { git = "https://github.com/vectordotdev/chrono.git", branch = "no-default-time" } +aws-config = { path = "patch/aws-config" } +aws-sigv4 = { path = "patch/aws-sigv4" } [features] ocp-logging = [ diff --git a/Dockerfile.unit b/Dockerfile.unit index b359c76fc062b..4ad936d93bf4d 100644 --- a/Dockerfile.unit +++ b/Dockerfile.unit @@ -21,3 +21,4 @@ RUN mkdir -p /src WORKDIR /src COPY . /src +RUN mkdir -p ~/.cargo/bin && ln -s /src/thirdparty/cargo-nextest/cargo-nextest-linux-$(arch) ~/.cargo/bin/cargo-nextest diff --git a/patch/aws-config/.cargo-checksum.json b/patch/aws-config/.cargo-checksum.json new file mode 100644 index 0000000000000..6f4aa70474e36 --- /dev/null +++ b/patch/aws-config/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.lock":"304f241b7aae1acc1e24fc91f6af6de6e3d9bef205633287e3e6bee1d54cff09","Cargo.toml":"5dbcd7201b9cec70cc9e90e90171dba69bde98938142f83b3f6bc50996b55cbe","LICENSE":"09e8a9bcec8067104652c168685ab0931e7868f9c8284b66f5ae6edae5f1130b","README.md":"c37b31a9c037887944302084a51873c901ca1525570bb23cdf3ce9029ba601f4","additional-ci":"635a2ab25920a342ec0d52cef4b78a0aa82dd637603667f01805d3e9a64ad5b5","examples/imds.rs":"e0df4959f2205d9a0ac544a99c461f96a09cf8013b56869943cfbeaca3f0fa57","src/cache.rs":"0ef94a4b8db871028c528b5f75dae107e4ceb34cc8ddf84a7bcde9bf8c34349f","src/connector.rs":"9181e21cbbeb503afb273b1934bc3699c97094608a658251701b6f99ae5fa568","src/default_provider.rs":"94b95238864fbc0c543f10370001b112a01b1535276a8cecac90189f48f6ef07","src/default_provider/app_name.rs":"ee2e87dc47397650611b9aaa930507d59173ef50411c71bebdf4c9536eb5c839","src/default_provider/credentials.rs":"a386ac05b34c077c8c178b8824ce91be96a8162732b71bf1c6c3230baa366103","src/default_provider/region.rs":"1f60f32d0e6046e79fb6fd3ed7ca695cefef27014afe64af134004b546199d18","src/default_provider/retry_config.rs":"f363195a93c25eba35df4cb158925951cf847e8f867b1b0d0cd60e0881538a41","src/default_provider/timeout_config.rs":"9e86355470dd54d9745cff0d7a408f52d7d05603a69c03516c52787e0a35db31","src/ecs.rs":"6f1410c19f1e78fab956f6b2232231e797996c45f723f8ea30cb61dc32eabb6a","src/environment/app_name.rs":"1cebf73cd10fd8a22cc8204cd35a54a228d4fba4aa830f9cea9c508e14d789ff","src/environment/credentials.rs":"7727f2f7a88cc806068b1c9a352c832ce517a625ccb7ba99ef540bb255c816e8","src/environment/mod.rs":"55d2e7d23e40844568bb8ee8b702cd6893a6aa056adb0ba8318537e0265376bb","src/environment/region.rs":"5874f35f37a92c29957dbdc0e57291e3296184634ca28b6fa7df4b5b3e572fd8","src/environment/retry_config.rs":"7052e0609d68a28ba026f13d8d91a8f2d0d55f2c0491acf360bbcfbf338976c7","src/environment/timeout_config.rs":"097767ce6e5fca6c89a9d823402119ddf2f06eecbce9961f24ed32315a07fa25","src/fs_util.rs":"ddd0c6ed3cebc68152ff99f9e33c24c8f0879e7ebd9d4f5f154f20dfe4847c8c","src/http_credential_provider.rs":"cd5fa4c201a857a84100b2b3efe65ae4267255e25ab351d58cccc6ddeab98b21","src/imds/client.rs":"5103d71a7ba746943f0461fb3729eec969a1d3c9b598efc6bfc24d46bf7edbc9","src/imds/client/token.rs":"9d0754186a5a18dae9078ce5359d44f683e3ef09bfb0651f40854783885d399f","src/imds/credentials.rs":"ef48ec83002497c4327094f74d2d1aa944ae94580969fcfa830045ca0e7c9ce7","src/imds/mod.rs":"52bcfd7cc9e807ccc2d8afb8568fbcc6a1cd571eac2e06dbf703765ee53c152d","src/imds/region.rs":"f2c41de6fc4c26f68e02d62af82321e56965114e345227e6de656d55e28b915f","src/json_credentials.rs":"de7cbc1120d1089388ae2aac5967b560f711a164221ee83ef50297e459dfff49","src/lib.rs":"43a7fa5db94faa84b1dd7f0ca7648025892359fa55168357abdb7f4aba56c475","src/meta/credentials/chain.rs":"dddcee3651fd2c6d0926c17b9bd37296eb43857639ee238d21b4b2da4caabd80","src/meta/credentials/credential_fn.rs":"1b319d5800ac53415f4a74968602b17f044f84a8339bb32e5b13709ef753e4aa","src/meta/credentials/lazy_caching.rs":"8d9b1c159b964aed9b196aaacf4a634ab2ffdb1295a7d2a121b9d89c41ce4d0e","src/meta/credentials/mod.rs":"a67a3110e4ba8fb641efb817975606b02cdb17c88ed4988aa4709e3b3f7f1824","src/meta/mod.rs":"031e68ce64dafb417b027f7665cc75ee5cc5664887c7806c85c4c7bc19aafc2c","src/meta/region.rs":"d669057f3f0a3d44cce403b66e61fa131dfe01ae8704762aa659b4503c1507e4","src/parsing.rs":"1ae7a5ca45b379b06d9ccaec845c38bd79081dbca13b59db4734bfe883de8ece","src/profile/app_name.rs":"e55119f7f587dfb899c5f073cd91169a8e4c41d73cc39aad3ffac8dc29369ba3","src/profile/credentials.rs":"08920469a873a3845564626560898a059e0114b11c78ea95e1243db848667913","src/profile/credentials/exec.rs":"5c834b05091a0f03888d3971dfbb0942b3b98433f116276f54b187b9e50abd00","src/profile/credentials/repr.rs":"93b7217011da3b0e9332d10e8d32b09c628a4b6a03b22684029ee1cc79dd2103","src/profile/mod.rs":"6938c8279487de762031897229122f039369ab9c6c45b37095c0087d77932dbd","src/profile/parser.rs":"c4f4d438dfa077483cf2111ee93999e5852370ff9af29c7dee0792c8ff5cf0e5","src/profile/parser/normalize.rs":"8d3d513f4cc77b09e5457fbad4f98bb580fe6ad40eb1ea2316e267cf2a9c986c","src/profile/parser/parse.rs":"55073a88b22db6eea8c88f345ad1a6190f6709f49b78bfbc06af7c90b08ed64b","src/profile/parser/source.rs":"010708b4d1ac11ea3df9c3fd6014dc180145024ed232264af47a20e32e6d252e","src/profile/region.rs":"7ff1690464824a3a400efce7f484acfe8d6260d01d2142c37a97edd3342ed2be","src/profile/retry_config.rs":"62a1dc2a0c5119da6a2203c3d5b75f38244fc4171ff7422e60384ac19deccb55","src/profile/timeout_config.rs":"dae51600a668d32f306be8189f7553b40e7055624f90cfeecd028473f1640f5d","src/provider_config.rs":"90ad94d87779793d3570906cf968381a78e08088d0170d00d93093ce5fcfc890","src/sso.rs":"0cafe289657fbb2afc8c7c74e8d6c675cb6e59ea811539c66fdffb0c70f9fda6","src/sts.rs":"55fd6b002f16ef8de378546ff1b069f3a0f07f18b6f41949e40d071d665a3648","src/sts/assume_role.rs":"1b68867bd2c027811856e63376d42929a7c5e81de628dda33d6c01ebb5e9a6b8","src/sts/util.rs":"58c7aaa6bcd4808f74d37ad3e00f9499800f5d7a5ea6ffffb21fc15de002cf97","src/test_case.rs":"5e1a539074b3b2c4b7385572070f1b60ee51425ad3d00f111bd95daa435074aa","src/web_identity_token.rs":"ed52452e6908ede1a3827a6a8ad611ed2bc41b4701998bc25f343bc3f18d2cbb"},"package":"2da63196d2d0dd38667b404459a35d32562a8d83c1f46c5b789ab89ab176fd53"} \ No newline at end of file diff --git a/patch/aws-config/Cargo.lock b/patch/aws-config/Cargo.lock new file mode 100644 index 0000000000000..3a40945c50881 --- /dev/null +++ b/patch/aws-config/Cargo.lock @@ -0,0 +1,1843 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "arbitrary" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c38b6b6b79f671c25e1a3e785b7b82d7562ffc9cd3efdc98627e5668a2472490" + +[[package]] +name = "assert-json-diff" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4259cbe96513d2f1073027a259fc2ca917feb3026a5a8d984e3628e490255cc0" +dependencies = [ + "extend", + "serde", + "serde_json", +] + +[[package]] +name = "async-trait" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "aws-config" +version = "0.9.0" +dependencies = [ + "arbitrary", + "async-trait", + "aws-http", + "aws-sdk-sso", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "env_logger", + "futures-util", + "hex", + "http", + "hyper", + "hyper-rustls 0.23.0", + "openssl", + "serde", + "serde_json", + "tokio", + "tower", + "tracing", + "tracing-test", + "zeroize", +] + +[[package]] +name = "aws-endpoint" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5279590d48e92b287f864e099c7e851af03a5e184a57cec0959872cee297c7a0" +dependencies = [ + "aws-smithy-http", + "aws-types", + "http", + "regex", + "tracing", +] + +[[package]] +name = "aws-http" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7046bdd807c70caf28d6dbc69b9d6d8dda1728577866d3ff3862de585b8b0eb" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "aws-types", + "http", + "lazy_static", + "percent-encoding", + "tracing", +] + +[[package]] +name = "aws-sdk-sso" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96f9038b498944025a39e426ae38f64e3e8481a9d675469580e1de7397b46ed5" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", + "aws-types", + "bytes", + "http", + "tokio-stream", + "tower", +] + +[[package]] +name = "aws-sdk-sts" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e717e67debcd7f9d87563d08e7d40e3c5c28634a8badc491650d5ad2305befd3" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-query", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "http", + "tower", +] + +[[package]] +name = "aws-sig-auth" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e6e4ba09f502057ad6a4ebf3627f9dae8402e366cf7b36ca1c09cbff8b5834" +dependencies = [ + "aws-sigv4", + "aws-smithy-http", + "aws-types", + "http", + "thiserror", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea07a5a108ee538793d681d608057218df95c5575f6c0699a1973c27a09334b2" +dependencies = [ + "aws-smithy-http", + "form_urlencoded", + "hex", + "http", + "once_cell", + "percent-encoding", + "regex", + "ring", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66ab5373d24e1651860240f122a8d956f7a2094d4553c78979617a7fac640030" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", + "tokio-stream", +] + +[[package]] +name = "aws-smithy-client" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88e8a92747322eace67f666402a5949da27675f60a2b9098b84b63edef8e6980" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-protocol-test", + "aws-smithy-types", + "bytes", + "fastrand", + "http", + "http-body", + "hyper", + "hyper-rustls 0.22.1", + "hyper-tls", + "lazy_static", + "pin-project", + "pin-project-lite", + "serde", + "tokio", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-http" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579d0c2ae96c700499c5330f082c4170b0535835f01eb845056324aa0abd04b4" +dependencies = [ + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http", + "http-body", + "hyper", + "once_cell", + "percent-encoding", + "pin-project", + "tracing", +] + +[[package]] +name = "aws-smithy-http-tower" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "101a2e213acebe624cfb9bfc944de5e33c849e0df0f09c3d3aa3b54368dbe7af" +dependencies = [ + "aws-smithy-http", + "bytes", + "http", + "http-body", + "pin-project", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd21f28535a2538b77274aa590abfb6d37aece3281dfc4c9411c1625d3b9239e" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-protocol-test" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793e1265ac2b4a6b135bda6f0c8e5c2361d3eda02e9d984054235247f2f3929e" +dependencies = [ + "assert-json-diff", + "http", + "pretty_assertions", + "regex", + "roxmltree", + "serde_json", + "thiserror", +] + +[[package]] +name = "aws-smithy-query" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5a2c90311b0d20cf23212a15961cad2b76480863b1f7ce0608d9ece8dacdfb" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-types" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "962f2da621cd29f272636eebce39ca321c91e02bbb7eb848c4587ac14933d339" +dependencies = [ + "itoa", + "num-integer", + "ryu", + "time", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829c7efd92b7a6d0536ceb48fd93a289ddf8763c67bffe875d82eae3f9886546" +dependencies = [ + "thiserror", + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68159725aa77553dbc6028f36d8378563cd45b18ef9cf03d1515ac469efacf13" +dependencies = [ + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-types", + "rustc_version", + "tracing", + "zeroize", +] + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bumpalo" +version = "3.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" + +[[package]] +name = "bytes" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" + +[[package]] +name = "bytes-utils" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e314712951c43123e5920a446464929adc667a5eade7f8fb3997776c9df6e54" +dependencies = [ + "bytes", + "either", +] + +[[package]] +name = "cc" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "ct-logs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" +dependencies = [ + "sct 0.6.1", +] + +[[package]] +name = "ctor" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "env_logger" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "extend" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" + +[[package]] +name = "futures-macro" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" + +[[package]] +name = "futures-task" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" + +[[package]] +name = "futures-util" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +dependencies = [ + "futures-core", + "futures-macro", + "futures-task", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "h2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eeb471aa3e3c9197aa4bfeabfe02982f6dc96f750486c0bb0009ac58b26d2b" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.14.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043f0e083e9901b6cc658a77d1eb86f4fc650bbb977a4337dd63192826aa85dd" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "ct-logs", + "futures-util", + "hyper", + "log", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", + "tokio", + "tokio-rustls 0.22.0", + "webpki 0.21.4", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "log", + "rustls 0.20.4", + "rustls-native-certs 0.6.1", + "tokio", + "tokio-rustls 0.23.2", + "webpki-roots", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "indexmap" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + +[[package]] +name = "js-sys" +version = "0.3.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" + +[[package]] +name = "lock_api" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matches" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "mio" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +dependencies = [ + "libc", + "log", + "miow", + "ntapi", + "wasi", + "winapi", +] + +[[package]] +name = "miow" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +dependencies = [ + "winapi", +] + +[[package]] +name = "native-tls" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "ntapi" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" +dependencies = [ + "winapi", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aba1801fb138d8e85e11d0fc70baf4fe1cdfffda7c6cd34a854905df588e5ed0" +dependencies = [ + "libc", +] + +[[package]] +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "openssl" +version = "0.10.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "output_vt100" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" +dependencies = [ + "winapi", +] + +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pin-project" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" + +[[package]] +name = "pretty_assertions" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c038cb5319b9c704bf9c227c261d275bfec0ad438118a2787ce47944fb228b" +dependencies = [ + "ansi_term", + "ctor", + "diff", + "output_vt100", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4af2ec4714533fcdf07e886f17025ace8b997b9ce51204ee69b6da831c3da57" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "roxmltree" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +dependencies = [ + "base64", + "log", + "ring", + "sct 0.6.1", + "webpki 0.21.4", +] + +[[package]] +name = "rustls" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", +] + +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls 0.19.1", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d" + +[[package]] +name = "serde" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" + +[[package]] +name = "smallvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" + +[[package]] +name = "socket2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "syn" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea297be220d52398dcc07ce15a209fce436d361735ac1db700cab3b6cdfb9f54" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" +dependencies = [ + "libc", + "num_threads", +] + +[[package]] +name = "tokio" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +dependencies = [ + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "once_cell", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "winapi", +] + +[[package]] +name = "tokio-macros" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls 0.19.1", + "tokio", + "webpki 0.21.4", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +dependencies = [ + "rustls 0.20.4", + "tokio", + "webpki 0.22.0", +] + +[[package]] +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + +[[package]] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + +[[package]] +name = "tracing" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa31669fa42c09c34d94d8165dd2012e8ff3c66aca50f3bb226b68f216f2706c" +dependencies = [ + "lazy_static", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" +dependencies = [ + "ansi_term", + "lazy_static", + "matchers", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "tracing-test" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3eb7bda2e93bbc9c5b247034acc6a4b3d04f033a3d4b8fc1cb87d4d1c7c7ebd7" +dependencies = [ + "lazy_static", + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4801dca35e4e2cee957c469bd4a1c370fadb7894c0d50721a40eba3523e6e91c" +dependencies = [ + "lazy_static", + "quote", + "syn", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "urlencoding" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" + +[[package]] +name = "web-sys" +version = "0.3.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +dependencies = [ + "webpki 0.22.0", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" + +[[package]] +name = "xmlparser" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "114ba2b24d2167ef6d67d7d04c8cc86522b87f490025f39f0303b7db5bf5e3d8" + +[[package]] +name = "zeroize" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb5728b8afd3f280a869ce1d4c554ffaed35f45c231fc41bfbd0381bef50317" diff --git a/patch/aws-config/Cargo.toml b/patch/aws-config/Cargo.toml new file mode 100644 index 0000000000000..8b2de243b9beb --- /dev/null +++ b/patch/aws-config/Cargo.toml @@ -0,0 +1,130 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "aws-config" +version = "0.9.0" +authors = ["AWS Rust SDK Team ", "Russell Cohen "] +exclude = ["test-data/*", "integration-tests/*"] +description = "AWS SDK config and credential provider implementations." +license = "Apache-2.0" +repository = "https://github.com/awslabs/smithy-rs" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +targets = ["x86_64-unknown-linux-gnu"] +[dependencies.aws-http] +version = "0.9.0" + +[dependencies.aws-sdk-sso] +version = "0.9.0" +default-features = false + +[dependencies.aws-sdk-sts] +version = "0.9.0" +default-features = false + +[dependencies.aws-smithy-async] +version = "0.39.0" + +[dependencies.aws-smithy-client] +version = "0.39.0" + +[dependencies.aws-smithy-http] +version = "0.39.0" + +[dependencies.aws-smithy-http-tower] +version = "0.39.0" + +[dependencies.aws-smithy-json] +version = "0.39.0" + +[dependencies.aws-smithy-types] +version = "0.39.0" + +[dependencies.aws-types] +version = "0.9.0" + +[dependencies.bytes] +version = "1.1.0" + +[dependencies.hex] +version = "0.4.3" + +[dependencies.http] +version = "0.2.4" + +[dependencies.hyper] +version = "0.14" +default-features = false + +[dependencies.tokio] +version = "1" +features = ["sync"] + +[dependencies.tower] +version = "0.4.8" + +[dependencies.tracing] +version = "0.1" + +[dependencies.zeroize] +version = "1" + +[dependencies.ring] +version = "0.16" +optional = true + +[dependencies.openssl] +version = "0.10.38" +optional = true + +[dev-dependencies.arbitrary] +version = "1.0.2" + +[dev-dependencies.async-trait] +version = "0.1.51" + +[dev-dependencies.aws-smithy-client] +version = "0.39.0" +features = ["test-util"] + +[dev-dependencies.env_logger] +version = "0.9.0" + +[dev-dependencies.futures-util] +version = "0.3.16" + +[dev-dependencies.hyper-rustls] +version = "0.23.0" +features = ["webpki-tokio", "http2", "http1"] + +[dev-dependencies.serde] +version = "1" +features = ["derive"] + +[dev-dependencies.serde_json] +version = "1" + +[dev-dependencies.tokio] +version = "1" +features = ["full", "test-util"] + +[dev-dependencies.tracing-test] +version = "0.2.1" + +[features] +default = ["rustls", "rt-tokio"] +native-tls = ["openssl", "aws-smithy-client/native-tls"] +rt-tokio = ["aws-smithy-async/rt-tokio"] +rustls = ["ring", "aws-smithy-client/rustls"] diff --git a/patch/aws-config/LICENSE b/patch/aws-config/LICENSE new file mode 100644 index 0000000000000..67db8588217f2 --- /dev/null +++ b/patch/aws-config/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/patch/aws-config/README.md b/patch/aws-config/README.md new file mode 100644 index 0000000000000..d14eff55843f7 --- /dev/null +++ b/patch/aws-config/README.md @@ -0,0 +1,72 @@ +# aws-config + +AWS SDK config and credential provider implementations. + +**Please Note: The SDK is currently released as an alpha and is intended strictly for feedback purposes only. Do not use this SDK for production workloads.** + + The implementations can be used either via the default chain implementation `from_env`/`ConfigLoader` or ad-hoc individual credential and region providers. + +A `ConfigLoader` can combine different configuration sources into an AWS shared-config `Config`. The `Config` can then be used to configure one or more AWS service clients. + +## Examples + +Load default SDK configuration: + +```rust +async fn example() { + let config = aws_config::load_from_env().await; + let client = aws_sdk_dynamodb::Client::new(&config); +} +``` + +Load SDK configuration with a region override: + +```rust +use aws_config::meta::region::RegionProviderChain; + +async fn example() { + let region_provider = RegionProviderChain::default_provider().or_else("us-east-1"); + let config = aws_config::from_env().region(region_provider).load().await; + let client = aws_sdk_dynamodb::Client::new(&config); +} +``` + +## Getting Started + +_Examples are available for many services and operations, check out the [examples folder in GitHub][Usage examples]._ + +The SDK provides one crate per AWS service. You must add [Tokio] as a dependency within your Rust project to execute asynchronous code. To add aws-sdk-config to your project, add the following to your Cargo.toml file where VERSION is the version of the SDK you want to use: + +```toml +[dependencies] +aws-config = "VERSION" +aws-sdk-config = "VERSION" +tokio = { version = "1", features = ["full"] } +``` + +## Using the SDK + +Until the SDK is released, we will be adding information about using the SDK to the [Guide]. Feel free to suggest additional sections for the guide by opening an issue and describing what you are trying to do. + +## Getting Help + +- [GitHub discussions] - For ideas, RFCs & general questions +- [GitHub issues] – For bug reports & feature requests +- [Generated Docs] (latest version) +- [Usage examples] + +## License + +This project is licensed under the Apache-2.0 License. + +[examples folder in GitHub]: https://github.com/awslabs/aws-sdk-rust/tree/main/examples +[Tokio]: https://crates.io/crates/tokio +[Guide]: https://github.com/awslabs/aws-sdk-rust/blob/main/Guide.md +[GitHub discussions]: https://github.com/awslabs/aws-sdk-rust/discussions +[GitHub issues]: https://github.com/awslabs/aws-sdk-rust/issues/new/choose +[Generated Docs]: https://awslabs.github.io/aws-sdk-rust/ +[Usage examples]: https://github.com/awslabs/aws-sdk-rust/tree/main/examples + + +This crate is part of the [AWS SDK for Rust](https://awslabs.github.io/aws-sdk-rust/) and the [smithy-rs](https://github.com/awslabs/smithy-rs) code generator. In most cases, it should not be used directly. + diff --git a/patch/aws-config/additional-ci b/patch/aws-config/additional-ci new file mode 100755 index 0000000000000..00522a9e0c77a --- /dev/null +++ b/patch/aws-config/additional-ci @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0. +# + +# This script contains additional CI checks to run for this specific package + +set -e + +echo "### Checking for duplicate dependency versions in the normal dependency graph with all features enabled" +cargo tree -d --edges normal --all-features + +echo "### Testing with all features enabled" +cargo test --all-features + +echo "### Testing each feature in isolation" +cargo hack test --feature-powerset diff --git a/patch/aws-config/examples/imds.rs b/patch/aws-config/examples/imds.rs new file mode 100644 index 0000000000000..3c6653df5c1dd --- /dev/null +++ b/patch/aws-config/examples/imds.rs @@ -0,0 +1,19 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +/// IMDSv2 client usage example +/// +/// The IMDS client is used with `aws-config` to load credentials and regions, however, you can also +/// use the client directly. This example demonstrates loading the instance-id from IMDS. More +/// fetures of IMDS can be found [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) +#[tokio::main] +async fn main() -> Result<(), Box> { + use aws_config::imds::Client; + + let imds = Client::builder().build().await?; + let instance_id = imds.get("/latest/meta-data/instance-id").await?; + println!("current instance id: {}", instance_id); + Ok(()) +} diff --git a/patch/aws-config/src/cache.rs b/patch/aws-config/src/cache.rs new file mode 100644 index 0000000000000..d3b0b8ef6b357 --- /dev/null +++ b/patch/aws-config/src/cache.rs @@ -0,0 +1,163 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Expiry-aware cache +//! +//! [`ExpiringCache`] implements two important features: +//! 1. Respect expiry of contents +//! 2. Deduplicate load requests to prevent thundering herds when no value is present. + +use std::future::Future; +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; +use tokio::sync::{OnceCell, RwLock}; + +#[derive(Debug)] +pub(crate) struct ExpiringCache { + /// Amount of time before the actual expiration time + /// when the value is considered expired. + buffer_time: Duration, + value: Arc>>, + _phantom: PhantomData, +} + +impl Clone for ExpiringCache { + fn clone(&self) -> Self { + Self { + buffer_time: self.buffer_time, + value: self.value.clone(), + _phantom: Default::default(), + } + } +} + +impl ExpiringCache +where + T: Clone, +{ + pub fn new(buffer_time: Duration) -> Self { + ExpiringCache { + buffer_time, + value: Arc::new(RwLock::new(OnceCell::new())), + _phantom: Default::default(), + } + } + + #[cfg(test)] + async fn get(&self) -> Option + where + T: Clone, + { + self.value + .read() + .await + .get() + .cloned() + .map(|(creds, _expiry)| creds) + } + + /// Attempts to refresh the cached value with the given future. + /// If multiple threads attempt to refresh at the same time, one of them will win, + /// and the others will await that thread's result rather than multiple refreshes occurring. + /// The function given to acquire a value future, `f`, will not be called + /// if another thread is chosen to load the value. + pub async fn get_or_load(&self, f: F) -> Result + where + F: FnOnce() -> Fut, + Fut: Future>, + { + let lock = self.value.read().await; + let future = lock.get_or_try_init(f); + future.await.map(|(value, _expiry)| value.clone()) + } + + /// If the value is expired, clears the cache. Otherwise, yields the current value. + pub async fn yield_or_clear_if_expired(&self, now: SystemTime) -> Option { + // Short-circuit if the value is not expired + if let Some((value, expiry)) = self.value.read().await.get() { + if !expired(*expiry, self.buffer_time, now) { + return Some(value.clone()); + } + } + + // Acquire a write lock to clear the cache, but then once the lock is acquired, + // check again that the value is not already cleared. If it has been cleared, + // then another thread is refreshing the cache by the time the write lock was acquired. + let mut lock = self.value.write().await; + if let Some((_value, expiration)) = lock.get() { + // Also check that we're clearing the expired value and not a value + // that has been refreshed by another thread. + if expired(*expiration, self.buffer_time, now) { + *lock = OnceCell::new(); + } + } + None + } +} + +fn expired(expiration: SystemTime, buffer_time: Duration, now: SystemTime) -> bool { + now >= (expiration - buffer_time) +} + +#[cfg(test)] +mod tests { + use super::{expired, ExpiringCache}; + use aws_types::credentials::CredentialsError; + use aws_types::Credentials; + use std::time::{Duration, SystemTime}; + use tracing_test::traced_test; + + fn credentials(expired_secs: u64) -> Result<(Credentials, SystemTime), CredentialsError> { + let expiry = epoch_secs(expired_secs); + let creds = Credentials::new("test", "test", None, Some(expiry), "test"); + Ok((creds, expiry)) + } + + fn epoch_secs(secs: u64) -> SystemTime { + SystemTime::UNIX_EPOCH + Duration::from_secs(secs) + } + + #[test] + fn expired_check() { + let ts = epoch_secs(100); + assert!(expired(ts, Duration::from_secs(10), epoch_secs(1000))); + assert!(expired(ts, Duration::from_secs(10), epoch_secs(90))); + assert!(!expired(ts, Duration::from_secs(10), epoch_secs(10))); + } + + #[traced_test] + #[tokio::test] + async fn cache_clears_if_expired_only() { + let cache = ExpiringCache::new(Duration::from_secs(10)); + assert!(cache + .yield_or_clear_if_expired(epoch_secs(100)) + .await + .is_none()); + + cache + .get_or_load(|| async { credentials(100) }) + .await + .unwrap(); + assert_eq!(Some(epoch_secs(100)), cache.get().await.unwrap().expiry()); + + // It should not clear the credentials if they're not expired + assert_eq!( + Some(epoch_secs(100)), + cache + .yield_or_clear_if_expired(epoch_secs(10)) + .await + .unwrap() + .expiry() + ); + + // It should clear the credentials if they're expired + assert!(cache + .yield_or_clear_if_expired(epoch_secs(500)) + .await + .is_none()); + assert!(cache.get().await.is_none()); + } +} diff --git a/patch/aws-config/src/connector.rs b/patch/aws-config/src/connector.rs new file mode 100644 index 0000000000000..f85a4c30d3b06 --- /dev/null +++ b/patch/aws-config/src/connector.rs @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Functionality related to creating new HTTP Connectors + +use std::sync::Arc; + +use aws_smithy_async::rt::sleep::AsyncSleep; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::http_connector::HttpSettings; + +// unused when all crate features are disabled +/// Unwrap an [`Option`](aws_smithy_client::erase::DynConnector), and panic with a helpful error message if it's `None` +pub(crate) fn expect_connector(connector: Option) -> DynConnector { + connector.expect("A connector was not available. Either set a custom connector or enable the `rustls` and `native-tls` crate features.") +} + +#[cfg(any(feature = "rustls", feature = "native-tls"))] +fn base( + settings: &HttpSettings, + sleep: Option>, +) -> aws_smithy_client::hyper_ext::Builder { + let mut hyper = + aws_smithy_client::hyper_ext::Adapter::builder().timeout(&settings.http_timeout_config); + if let Some(sleep) = sleep { + hyper = hyper.sleep_impl(sleep); + } + hyper +} + +/// Given `HttpSettings` and an `AsyncSleep`, create a `DynConnector` from defaults depending on what cargo features are activated. +#[cfg(feature = "rustls")] +pub fn default_connector( + settings: &HttpSettings, + sleep: Option>, +) -> Option { + let hyper = base(settings, sleep).build(aws_smithy_client::conns::https()); + Some(DynConnector::new(hyper)) +} + +/// Given `HttpSettings` and an `AsyncSleep`, create a `DynConnector` from defaults depending on what cargo features are activated. +#[cfg(all(not(feature = "rustls"), feature = "native-tls"))] +pub fn default_connector( + settings: &HttpSettings, + sleep: Option>, +) -> Option { + let hyper = base(settings, sleep).build(aws_smithy_client::conns::native_tls()); + Some(DynConnector::new(hyper)) +} + +/// Given `HttpSettings` and an `AsyncSleep`, create a `DynConnector` from defaults depending on what cargo features are activated. +#[cfg(not(any(feature = "rustls", feature = "native-tls")))] +pub fn default_connector( + _settings: &HttpSettings, + _sleep: Option>, +) -> Option { + None +} diff --git a/patch/aws-config/src/default_provider.rs b/patch/aws-config/src/default_provider.rs new file mode 100644 index 0000000000000..4ffe879ab1200 --- /dev/null +++ b/patch/aws-config/src/default_provider.rs @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Default Provider chains for [`region`](default_provider::region), [`credentials`](default_provider::credentials), +//! [retries](default_provider::retry_config), [timeouts](default_provider::timeout_config) and [app name](default_provider::app_name). +//! +//! Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +//! if you need to set custom configuration options to override the default resolution chain. + +/// Default [region](aws_types::region::Region) provider chain +/// +/// Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +/// if you need to set custom configuration options to override the default resolution chain. +pub mod region; + +/// Default retry behavior configuration provider chain +/// +/// Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +/// if you need to set custom configuration options to override the default resolution chain. +pub mod retry_config; + +/// Default app name provider chain +/// +/// Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +/// if you need to set custom configuration options to override the default resolution chain. +pub mod app_name; + +/// Default timeout configuration provider chain +/// +/// Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +/// if you need to set custom configuration options to override the default resolution chain. +pub mod timeout_config; + +/// Default credentials provider chain +/// +/// Typically, this module is used via [`load_from_env`](crate::load_from_env) or [`from_env`](crate::from_env). It should only be used directly +/// if you need to set custom configuration options like [`region`](credentials::Builder::region) or [`profile_name`](credentials::Builder::profile_name). +pub mod credentials; diff --git a/patch/aws-config/src/default_provider/app_name.rs b/patch/aws-config/src/default_provider/app_name.rs new file mode 100644 index 0000000000000..2909b170a543d --- /dev/null +++ b/patch/aws-config/src/default_provider/app_name.rs @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use crate::environment::app_name::EnvironmentVariableAppNameProvider; +use crate::profile::app_name; +use crate::provider_config::ProviderConfig; +use aws_types::app_name::AppName; + +/// Default App Name Provider chain +/// +/// This provider will check the following sources in order: +/// 1. [Environment variables](EnvironmentVariableAppNameProvider) +/// 2. [Profile file](crate::profile::app_name::ProfileFileAppNameProvider) +pub fn default_provider() -> Builder { + Builder::default() +} + +/// Default provider builder for [`AppName`] +#[derive(Default)] +pub struct Builder { + env_provider: EnvironmentVariableAppNameProvider, + profile_file: app_name::Builder, +} + +impl Builder { + #[doc(hidden)] + /// Configure the default chain + /// + /// Exposed for overriding the environment when unit-testing providers + pub fn configure(mut self, configuration: &ProviderConfig) -> Self { + self.env_provider = EnvironmentVariableAppNameProvider::new_with_env(configuration.env()); + self.profile_file = self.profile_file.configure(configuration); + self + } + + /// Override the profile name used by this provider + pub fn profile_name(mut self, name: &str) -> Self { + self.profile_file = self.profile_file.profile_name(name); + self + } + + /// Build an [`AppName`] from the default chain + pub async fn app_name(self) -> Option { + self.env_provider + .app_name() + .or(self.profile_file.build().app_name().await) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::provider_config::ProviderConfig; + use crate::test_case::no_traffic_connector; + use aws_types::os_shim_internal::{Env, Fs}; + + #[tokio::test] + async fn prefer_env_to_profile() { + let fs = Fs::from_slice(&[("test_config", "[default]\nsdk-ua-app-id = wrong")]); + let env = Env::from_slice(&[ + ("AWS_CONFIG_FILE", "test_config"), + ("AWS_SDK_UA_APP_ID", "correct"), + ]); + let app_name = Builder::default() + .configure( + &ProviderConfig::no_configuration() + .with_fs(fs) + .with_env(env) + .with_http_connector(no_traffic_connector()), + ) + .app_name() + .await; + + assert_eq!(Some(AppName::new("correct").unwrap()), app_name); + } + + #[tokio::test] + async fn load_from_profile() { + let fs = Fs::from_slice(&[("test_config", "[default]\nsdk-ua-app-id = correct")]); + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "test_config")]); + let app_name = Builder::default() + .configure( + &ProviderConfig::empty() + .with_fs(fs) + .with_env(env) + .with_http_connector(no_traffic_connector()), + ) + .app_name() + .await; + + assert_eq!(Some(AppName::new("correct").unwrap()), app_name); + } +} diff --git a/patch/aws-config/src/default_provider/credentials.rs b/patch/aws-config/src/default_provider/credentials.rs new file mode 100644 index 0000000000000..16a79af63f79b --- /dev/null +++ b/patch/aws-config/src/default_provider/credentials.rs @@ -0,0 +1,506 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::borrow::Cow; +use std::time::Duration; + +use aws_types::credentials::{self, future, ProvideCredentials}; +use tracing::Instrument; + +use crate::environment::credentials::EnvironmentVariableCredentialsProvider; +use crate::meta::credentials::{CredentialsProviderChain, LazyCachingCredentialsProvider}; +use crate::meta::region::ProvideRegion; +use crate::provider_config::ProviderConfig; + +#[cfg(any(feature = "rustls", feature = "native-tls"))] +/// Default Credentials Provider chain +/// +/// The region from the default region provider will be used +pub async fn default_provider() -> impl ProvideCredentials { + DefaultCredentialsChain::builder().build().await +} + +/// Default AWS Credential Provider Chain +/// +/// Resolution order: +/// 1. Environment variables: [`EnvironmentVariableCredentialsProvider`](crate::environment::EnvironmentVariableCredentialsProvider) +/// 2. Shared config (`~/.aws/config`, `~/.aws/credentials`): [`SharedConfigCredentialsProvider`](crate::profile::ProfileFileCredentialsProvider) +/// 3. [Web Identity Tokens](crate::web_identity_token) +/// 4. ECS (IAM Roles for Tasks) & General HTTP credentials: [`ecs`](crate::ecs) +/// 5. [EC2 IMDSv2](crate::imds) +/// +/// The outer provider is wrapped in a refreshing cache. +/// +/// More providers are a work in progress. +/// +/// # Examples +/// Create a default chain with a custom region: +/// ```no_run +/// use aws_types::region::Region; +/// use aws_config::default_provider::credentials::DefaultCredentialsChain; +/// let credentials_provider = DefaultCredentialsChain::builder() +/// .region(Region::new("us-west-1")) +/// .build(); +/// ``` +/// +/// Create a default chain with no overrides: +/// ```no_run +/// use aws_config::default_provider::credentials::DefaultCredentialsChain; +/// let credentials_provider = DefaultCredentialsChain::builder().build(); +/// ``` +/// +/// Create a default chain that uses a different profile: +/// ```no_run +/// use aws_config::default_provider::credentials::DefaultCredentialsChain; +/// let credentials_provider = DefaultCredentialsChain::builder() +/// .profile_name("otherprofile") +/// .build(); +/// ``` +#[derive(Debug)] +pub struct DefaultCredentialsChain(LazyCachingCredentialsProvider); + +impl DefaultCredentialsChain { + /// Builder for `DefaultCredentialsChain` + pub fn builder() -> Builder { + Builder::default() + } + + async fn credentials(&self) -> credentials::Result { + self.0 + .provide_credentials() + .instrument(tracing::info_span!("provide_credentials", provider = %"default_chain")) + .await + } +} + +impl ProvideCredentials for DefaultCredentialsChain { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} + +/// Builder for [`DefaultCredentialsChain`](DefaultCredentialsChain) +#[derive(Default)] +pub struct Builder { + profile_file_builder: crate::profile::credentials::Builder, + web_identity_builder: crate::web_identity_token::Builder, + imds_builder: crate::imds::credentials::Builder, + ecs_builder: crate::ecs::Builder, + credential_cache: crate::meta::credentials::lazy_caching::Builder, + region_override: Option>, + region_chain: crate::default_provider::region::Builder, + conf: Option, +} + +impl Builder { + /// Sets the region used when making requests to AWS services + /// + /// When unset, the default region resolver chain will be used. + pub fn region(mut self, region: impl ProvideRegion + 'static) -> Self { + self.set_region(Some(region)); + self + } + + /// Sets the region used when making requests to AWS services + /// + /// When unset, the default region resolver chain will be used. + pub fn set_region(&mut self, region: Option) -> &mut Self { + self.region_override = region.map(|provider| Box::new(provider) as _); + self + } + + /// Timeout for the entire credential loading chain. + /// + /// Defaults to 5 seconds. + pub fn load_timeout(mut self, timeout: Duration) -> Self { + self.set_load_timeout(Some(timeout)); + self + } + + /// Timeout for the entire credential loading chain. + /// + /// Defaults to 5 seconds. + pub fn set_load_timeout(&mut self, timeout: Option) -> &mut Self { + self.credential_cache.set_load_timeout(timeout); + self + } + + /// Amount of time before the actual credential expiration time + /// where credentials are considered expired. + /// + /// For example, if credentials are expiring in 15 minutes, and the buffer time is 10 seconds, + /// then any requests made after 14 minutes and 50 seconds will load new credentials. + /// + /// Defaults to 10 seconds. + pub fn buffer_time(mut self, buffer_time: Duration) -> Self { + self.set_buffer_time(Some(buffer_time)); + self + } + + /// Amount of time before the actual credential expiration time + /// where credentials are considered expired. + /// + /// For example, if credentials are expiring in 15 minutes, and the buffer time is 10 seconds, + /// then any requests made after 14 minutes and 50 seconds will load new credentials. + /// + /// Defaults to 10 seconds. + pub fn set_buffer_time(&mut self, buffer_time: Option) -> &mut Self { + self.credential_cache.set_buffer_time(buffer_time); + self + } + + /// Default expiration time to set on credentials if they don't have an expiration time. + /// + /// This is only used if the given [`ProvideCredentials`] returns + /// [`Credentials`](aws_types::Credentials) that don't have their `expiry` set. + /// This must be at least 15 minutes. + /// + /// Defaults to 15 minutes. + pub fn default_credential_expiration(mut self, duration: Duration) -> Self { + self.set_default_credential_expiration(Some(duration)); + self + } + + /// Default expiration time to set on credentials if they don't have an expiration time. + /// + /// This is only used if the given [`ProvideCredentials`] returns + /// [`Credentials`](aws_types::Credentials) that don't have their `expiry` set. + /// This must be at least 15 minutes. + /// + /// Defaults to 15 minutes. + pub fn set_default_credential_expiration(&mut self, duration: Option) -> &mut Self { + self.credential_cache + .set_default_credential_expiration(duration); + self + } + + /// Add an additional credential source for the ProfileProvider + /// + /// Assume role profiles may specify named credential sources: + /// ```ini + /// [default] + /// role_arn = arn:aws:iam::123456789:role/RoleA + /// credential_source = MyCustomProvider + /// ``` + /// + /// Typically, these are built-in providers like `Environment`, however, custom sources may + /// also be used. + /// + /// See [`with_custom_provider`](crate::profile::credentials::Builder::with_custom_provider) + pub fn with_custom_credential_source( + mut self, + name: impl Into>, + provider: impl ProvideCredentials + 'static, + ) -> Self { + self.profile_file_builder = self + .profile_file_builder + .with_custom_provider(name, provider); + self + } + + /// Override the profile name used by this provider + /// + /// When unset, the value of the `AWS_PROFILE` environment variable will be used. + pub fn profile_name(mut self, name: &str) -> Self { + self.profile_file_builder = self.profile_file_builder.profile_name(name); + self.region_chain = self.region_chain.profile_name(name); + self + } + + /// Override the configuration used for this provider + pub fn configure(mut self, config: ProviderConfig) -> Self { + self.region_chain = self.region_chain.configure(&config); + self.conf = Some(config); + self + } + + /// Creates a `DefaultCredentialsChain` + /// + /// ## Panics + /// This function will panic if no connector has been set and neither `rustls` and `native-tls` + /// features have both been disabled. + pub async fn build(self) -> DefaultCredentialsChain { + let region = match self.region_override { + Some(provider) => provider.region().await, + None => self.region_chain.build().region().await, + }; + + let conf = self.conf.unwrap_or_default().with_region(region); + + let env_provider = EnvironmentVariableCredentialsProvider::new_with_env(conf.env()); + let profile_provider = self.profile_file_builder.configure(&conf).build(); + let web_identity_token_provider = self.web_identity_builder.configure(&conf).build(); + let imds_provider = self.imds_builder.configure(&conf).build(); + let ecs_provider = self.ecs_builder.configure(&conf).build(); + + let provider_chain = CredentialsProviderChain::first_try("Environment", env_provider) + .or_else("Profile", profile_provider) + .or_else("WebIdentityToken", web_identity_token_provider) + .or_else("EcsContainer", ecs_provider) + .or_else("Ec2InstanceMetadata", imds_provider); + let cached_provider = self.credential_cache.configure(&conf).load(provider_chain); + + DefaultCredentialsChain(cached_provider.build()) + } +} + +#[cfg(test)] +mod test { + use tracing_test::traced_test; + + use aws_smithy_types::retry::{RetryConfig, RetryMode}; + use aws_types::credentials::ProvideCredentials; + use aws_types::os_shim_internal::{Env, Fs}; + + use crate::default_provider::credentials::DefaultCredentialsChain; + use crate::default_provider::retry_config; + use crate::provider_config::ProviderConfig; + use crate::test_case::TestEnvironment; + + /// Test generation macro + /// + /// # Examples + /// **Run the test case in `test-data/default-provider-chain/test_name` + /// ```no_run + /// make_test!(test_name); + /// ``` + /// + /// **Update (responses are replayed but new requests are recorded) the test case**: + /// ```no_run + /// make_test!(update: test_name) + /// ``` + /// + /// **Run the test case against a real HTTPS connection:** + /// > Note: Be careful to remove sensitive information before committing. Always use a temporary + /// > AWS account when recording live traffic. + /// ```no_run + /// make_test!(live: test_name) + /// ``` + macro_rules! make_test { + ($name: ident) => { + make_test!($name, execute); + }; + (update: $name:ident) => { + make_test!($name, execute_and_update); + }; + (live: $name:ident) => { + make_test!($name, execute_from_live_traffic); + }; + ($name: ident, $func: ident) => { + #[traced_test] + #[tokio::test] + async fn $name() { + crate::test_case::TestEnvironment::from_dir(concat!( + "./test-data/default-provider-chain/", + stringify!($name) + )) + .unwrap() + .$func(|conf| async { + crate::default_provider::credentials::Builder::default() + .configure(conf) + .build() + .await + }) + .await + } + }; + } + + make_test!(prefer_environment); + make_test!(profile_static_keys); + make_test!(web_identity_token_env); + make_test!(web_identity_source_profile_no_env); + make_test!(web_identity_token_invalid_jwt); + make_test!(web_identity_token_source_profile); + make_test!(web_identity_token_profile); + make_test!(profile_name); + make_test!(profile_overrides_web_identity); + make_test!(imds_token_fail); + + make_test!(imds_no_iam_role); + make_test!(imds_default_chain_error); + make_test!(imds_default_chain_success); + make_test!(imds_assume_role); + make_test!(imds_config_with_no_creds); + make_test!(imds_disabled); + make_test!(imds_default_chain_retries); + + make_test!(ecs_assume_role); + make_test!(ecs_credentials); + + make_test!(sso_assume_role); + make_test!(sso_no_token_file); + + #[tokio::test] + async fn profile_name_override() { + let (_, conf) = + TestEnvironment::from_dir("./test-data/default-provider-chain/profile_static_keys") + .unwrap() + .provider_config() + .await; + let provider = DefaultCredentialsChain::builder() + .profile_name("secondary") + .configure(conf) + .build() + .await; + let creds = provider + .provide_credentials() + .await + .expect("creds should load"); + assert_eq!(creds.access_key_id(), "correct_key_secondary"); + } + + #[tokio::test] + #[traced_test] + async fn no_providers_configured_err() { + use aws_smithy_async::rt::sleep::TokioSleep; + use aws_smithy_client::erase::boxclone::BoxCloneService; + use aws_smithy_client::never::NeverConnected; + use aws_types::credentials::CredentialsError; + use aws_types::os_shim_internal::TimeSource; + + tokio::time::pause(); + let conf = ProviderConfig::no_configuration() + .with_tcp_connector(BoxCloneService::new(NeverConnected::new())) + .with_time_source(TimeSource::real()) + .with_sleep(TokioSleep::new()); + let provider = DefaultCredentialsChain::builder() + .configure(conf) + .build() + .await; + let creds = provider + .provide_credentials() + .await + .expect_err("no providers enabled"); + assert!( + matches!(creds, CredentialsError::CredentialsNotLoaded { .. }), + "should be NotLoaded: {:?}", + creds + ) + } + + #[tokio::test] + async fn test_returns_default_retry_config_from_empty_profile() { + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "config")]); + let fs = Fs::from_slice(&[("config", "[default]\n")]); + + let provider_config = ProviderConfig::no_configuration().with_env(env).with_fs(fs); + + let actual_retry_config = retry_config::default_provider() + .configure(&provider_config) + .retry_config() + .await; + + let expected_retry_config = RetryConfig::new(); + + assert_eq!(actual_retry_config, expected_retry_config); + // This is redundant but it's really important to make sure that + // we're setting these exact values by default so we check twice + assert_eq!(actual_retry_config.max_attempts(), 3); + assert_eq!(actual_retry_config.mode(), RetryMode::Standard); + } + + #[tokio::test] + async fn test_no_retry_config_in_empty_profile() { + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "config")]); + let fs = Fs::from_slice(&[("config", "[default]\n")]); + + let provider_config = ProviderConfig::no_configuration().with_env(env).with_fs(fs); + + let actual_retry_config = retry_config::default_provider() + .configure(&provider_config) + .retry_config() + .await; + + let expected_retry_config = RetryConfig::new(); + + assert_eq!(actual_retry_config, expected_retry_config) + } + + #[tokio::test] + async fn test_creation_of_retry_config_from_profile() { + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "config")]); + // TODO(https://github.com/awslabs/aws-sdk-rust/issues/247): standard is the default mode; + // this test would be better if it was setting it to adaptive mode + // adaptive mode is currently unsupported so that would panic + let fs = Fs::from_slice(&[( + "config", + // If the lines with the vars have preceding spaces, they don't get read + r#"[default] +max_attempts = 1 +retry_mode = standard + "#, + )]); + + let provider_config = ProviderConfig::no_configuration().with_env(env).with_fs(fs); + + let actual_retry_config = retry_config::default_provider() + .configure(&provider_config) + .retry_config() + .await; + + let expected_retry_config = RetryConfig::new() + .with_max_attempts(1) + .with_retry_mode(RetryMode::Standard); + + assert_eq!(actual_retry_config, expected_retry_config) + } + + #[tokio::test] + async fn test_env_retry_config_takes_precedence_over_profile_retry_config() { + let env = Env::from_slice(&[ + ("AWS_CONFIG_FILE", "config"), + ("AWS_MAX_ATTEMPTS", "42"), + ("AWS_RETRY_MODE", "standard"), + ]); + // TODO(https://github.com/awslabs/aws-sdk-rust/issues/247) standard is the default mode; + // this test would be better if it was setting it to adaptive mode + // adaptive mode is currently unsupported so that would panic + let fs = Fs::from_slice(&[( + "config", + // If the lines with the vars have preceding spaces, they don't get read + r#"[default] +max_attempts = 88 +retry_mode = standard + "#, + )]); + + let provider_config = ProviderConfig::no_configuration().with_env(env).with_fs(fs); + + let actual_retry_config = retry_config::default_provider() + .configure(&provider_config) + .retry_config() + .await; + + let expected_retry_config = RetryConfig::new() + .with_max_attempts(42) + .with_retry_mode(RetryMode::Standard); + + assert_eq!(actual_retry_config, expected_retry_config) + } + + #[tokio::test] + #[should_panic = "failed to parse max attempts set by aws profile: invalid digit found in string"] + async fn test_invalid_profile_retry_config_panics() { + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "config")]); + let fs = Fs::from_slice(&[( + "config", + // If the lines with the vars have preceding spaces, they don't get read + r#"[default] +max_attempts = potato + "#, + )]); + + let provider_config = ProviderConfig::no_configuration().with_env(env).with_fs(fs); + + let _ = retry_config::default_provider() + .configure(&provider_config) + .retry_config() + .await; + } +} diff --git a/patch/aws-config/src/default_provider/region.rs b/patch/aws-config/src/default_provider/region.rs new file mode 100644 index 0000000000000..0da5d3936884b --- /dev/null +++ b/patch/aws-config/src/default_provider/region.rs @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_types::region::Region; + +use crate::environment::region::EnvironmentVariableRegionProvider; +use crate::meta::region::{ProvideRegion, RegionProviderChain}; +use crate::provider_config::ProviderConfig; +use crate::{imds, profile}; + +/// Default Region Provider chain +/// +/// This provider will check the following sources in order: +/// 1. [Environment variables](EnvironmentVariableRegionProvider) +/// 2. [Profile file](crate::profile::region::ProfileFileRegionProvider) +/// 3. [EC2 IMDSv2](crate::imds::region) +pub fn default_provider() -> impl ProvideRegion { + Builder::default().build() +} + +/// Default region provider chain +#[derive(Debug)] +pub struct DefaultRegionChain(RegionProviderChain); + +impl DefaultRegionChain { + /// Load a region from this chain + pub async fn region(&self) -> Option { + self.0.region().await + } + + /// Builder for [`DefaultRegionChain`] + pub fn builder() -> Builder { + Builder::default() + } +} + +/// Builder for [DefaultRegionChain] +#[derive(Default)] +pub struct Builder { + env_provider: EnvironmentVariableRegionProvider, + profile_file: profile::region::Builder, + imds: imds::region::Builder, +} + +impl Builder { + #[doc(hidden)] + /// Configure the default chain + /// + /// Exposed for overriding the environment when unit-testing providers + pub fn configure(mut self, configuration: &ProviderConfig) -> Self { + self.env_provider = EnvironmentVariableRegionProvider::new_with_env(configuration.env()); + self.profile_file = self.profile_file.configure(configuration); + self.imds = self.imds.configure(configuration); + self + } + + /// Override the profile name used by this provider + pub fn profile_name(mut self, name: &str) -> Self { + self.profile_file = self.profile_file.profile_name(name); + self + } + + /// Build a [DefaultRegionChain] + pub fn build(self) -> DefaultRegionChain { + DefaultRegionChain( + RegionProviderChain::first_try(self.env_provider) + .or_else(self.profile_file.build()) + .or_else(self.imds.build()), + ) + } +} + +impl ProvideRegion for DefaultRegionChain { + fn region(&self) -> crate::meta::region::future::ProvideRegion { + ProvideRegion::region(&self.0) + } +} diff --git a/patch/aws-config/src/default_provider/retry_config.rs b/patch/aws-config/src/default_provider/retry_config.rs new file mode 100644 index 0000000000000..d71bb891c8b00 --- /dev/null +++ b/patch/aws-config/src/default_provider/retry_config.rs @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_smithy_types::retry::RetryConfig; + +use crate::environment::retry_config::EnvironmentVariableRetryConfigProvider; +use crate::profile; +use crate::provider_config::ProviderConfig; + +/// Default RetryConfig Provider chain +/// +/// Unlike other "providers" `RetryConfig` has no related `RetryConfigProvider` trait. Instead, +/// a builder struct is returned which has a similar API. +/// +/// This provider will check the following sources in order: +/// 1. [Environment variables](EnvironmentVariableRetryConfigProvider) +/// 2. [Profile file](crate::profile::retry_config::ProfileFileRetryConfigProvider) +/// +/// # Example +/// +/// When running [`aws_config::from_env()`](crate::from_env()), a [`ConfigLoader`](crate::ConfigLoader) +/// is created that will then create a [`RetryConfig`] from the default_provider. There is no +/// need to call `default_provider` and the example below is only for illustration purposes. +/// +/// ```no_run +/// # use std::error::Error; +/// # #[tokio::main] +/// # async fn main() -> Result<(), Box> { +/// use aws_config::default_provider::retry_config; +/// +/// // Load a retry config from a specific profile +/// let retry_config = retry_config::default_provider() +/// .profile_name("other_profile") +/// .retry_config() +/// .await; +/// let config = aws_config::from_env() +/// // Override the retry config set by the default profile +/// .retry_config(retry_config) +/// .load() +/// .await; +/// // instantiate a service client: +/// // ::Client::new(&config); +/// # Ok(()) +/// # } +/// ``` +pub fn default_provider() -> Builder { + Builder::default() +} + +/// Builder for RetryConfig that checks the environment and aws profile for configuration +#[derive(Default)] +pub struct Builder { + env_provider: EnvironmentVariableRetryConfigProvider, + profile_file: profile::retry_config::Builder, +} + +impl Builder { + /// Configure the default chain + /// + /// Exposed for overriding the environment when unit-testing providers + pub fn configure(mut self, configuration: &ProviderConfig) -> Self { + self.env_provider = + EnvironmentVariableRetryConfigProvider::new_with_env(configuration.env()); + self.profile_file = self.profile_file.configure(configuration); + self + } + + /// Override the profile name used by this provider + pub fn profile_name(mut self, name: &str) -> Self { + self.profile_file = self.profile_file.profile_name(name); + self + } + + /// Attempt to create a [RetryConfig](aws_smithy_types::retry::RetryConfig) from following sources in order: + /// 1. [Environment variables](crate::environment::retry_config::EnvironmentVariableRetryConfigProvider) + /// 2. [Profile file](crate::profile::retry_config::ProfileFileRetryConfigProvider) + /// 3. [RetryConfig::default()](aws_smithy_types::retry::RetryConfig::default) + /// + /// Precedence is considered on a per-field basis + /// + /// # Panics + /// + /// - Panics if the `AWS_MAX_ATTEMPTS` env var or `max_attempts` profile var is set to 0 + /// - Panics if the `AWS_RETRY_MODE` env var or `retry_mode` profile var is set to "adaptive" (it's not yet supported) + pub async fn retry_config(self) -> RetryConfig { + // Both of these can return errors due to invalid config settings and we want to surface those as early as possible + // hence, we'll panic if any config values are invalid (missing values are OK though) + // We match this instead of unwrapping so we can print the error with the `Display` impl instead of the `Debug` impl that unwrap uses + let builder_from_env = match self.env_provider.retry_config_builder() { + Ok(retry_config_builder) => retry_config_builder, + Err(err) => panic!("{}", err), + }; + let builder_from_profile = match self.profile_file.build().retry_config_builder().await { + Ok(retry_config_builder) => retry_config_builder, + Err(err) => panic!("{}", err), + }; + + builder_from_env + .take_unset_from(builder_from_profile) + .build() + } +} diff --git a/patch/aws-config/src/default_provider/timeout_config.rs b/patch/aws-config/src/default_provider/timeout_config.rs new file mode 100644 index 0000000000000..f733237832cc5 --- /dev/null +++ b/patch/aws-config/src/default_provider/timeout_config.rs @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_smithy_types::timeout; + +use crate::environment::timeout_config::EnvironmentVariableTimeoutConfigProvider; +use crate::profile; +use crate::provider_config::ProviderConfig; + +/// Default [`timeout::Config`] Provider chain +/// +/// Unlike other credentials and region, [`timeout::Config`] has no related `TimeoutConfigProvider` trait. Instead, +/// a builder struct is returned which has a similar API. +/// +/// This provider will check the following sources in order: +/// 1. [Environment variables](EnvironmentVariableTimeoutConfigProvider) +/// 2. [Profile file](crate::profile::timeout_config::ProfileFileTimeoutConfigProvider) (`~/.aws/config`) +/// +/// # Example +/// +/// ```no_run +/// # use std::error::Error; +/// # #[tokio::main] +/// # async fn main() { +/// use aws_config::default_provider::timeout_config; +/// +/// // Load a timeout config from a specific profile +/// let timeout_config = timeout_config::default_provider() +/// .profile_name("other_profile") +/// .timeout_config() +/// .await; +/// let config = aws_config::from_env() +/// // Override the timeout config set by the default profile +/// .timeout_config(timeout_config) +/// .load() +/// .await; +/// // instantiate a service client: +/// // ::Client::new(&config); +/// # } +/// ``` +pub fn default_provider() -> Builder { + Builder::default() +} + +/// Builder for [`timeout::Config`](aws_smithy_types::timeout::Config) that checks the environment variables and AWS profile files for configuration +#[derive(Default)] +pub struct Builder { + env_provider: EnvironmentVariableTimeoutConfigProvider, + profile_file: profile::timeout_config::Builder, +} + +impl Builder { + /// Configure the default chain + /// + /// Exposed for overriding the environment when unit-testing providers + pub fn configure(mut self, configuration: &ProviderConfig) -> Self { + self.env_provider = + EnvironmentVariableTimeoutConfigProvider::new_with_env(configuration.env()); + self.profile_file = self.profile_file.configure(configuration); + self + } + + /// Override the profile name used by this provider + pub fn profile_name(mut self, name: &str) -> Self { + self.profile_file = self.profile_file.profile_name(name); + self + } + + /// Attempt to create a [`timeout::Config`](aws_smithy_types::timeout::Config) from following sources in order: + /// 1. [Environment variables](crate::environment::timeout_config::EnvironmentVariableTimeoutConfigProvider) + /// 2. [Profile file](crate::profile::timeout_config::ProfileFileTimeoutConfigProvider) + /// + /// Precedence is considered on a per-field basis. If no timeout is specified, requests will never time out. + /// + /// # Panics + /// + /// This will panic if: + /// - a timeout is set to `NaN`, a negative number, or infinity + /// - a timeout can't be parsed as a floating point number + pub async fn timeout_config(self) -> timeout::Config { + // Both of these can return errors due to invalid config settings and we want to surface those as early as possible + // hence, we'll panic if any config values are invalid (missing values are OK though) + // We match this instead of unwrapping so we can print the error with the `Display` impl instead of the `Debug` impl that unwrap uses + let builder_from_env = match self.env_provider.timeout_config() { + Ok(timeout_config_builder) => timeout_config_builder, + Err(err) => panic!("{}", err), + }; + let builder_from_profile = match self.profile_file.build().timeout_config().await { + Ok(timeout_config_builder) => timeout_config_builder, + Err(err) => panic!("{}", err), + }; + + builder_from_env.take_unset_from(builder_from_profile) + } +} diff --git a/patch/aws-config/src/ecs.rs b/patch/aws-config/src/ecs.rs new file mode 100644 index 0000000000000..4b7f1f487a815 --- /dev/null +++ b/patch/aws-config/src/ecs.rs @@ -0,0 +1,750 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Ecs Credentials Provider +//! +//! This credential provider is frequently used with an AWS-provided credentials service (e.g. +//! [IAM Roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)). +//! However, it's possible to use environment variables to configure this provider to use your own +//! credentials sources. +//! +//! This provider is part of the [default credentials chain](crate::default_provider::credentials). +//! +//! ## Configuration +//! **First**: It will check the value of `$AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`. It will use this +//! to construct a URI rooted at `http://169.254.170.2`. For example, if the value of the environment +//! variable was `/credentials`, the SDK would look for credentials at `http://169.254.170.2/credentials`. +//! +//! **Next**: It wil check the value of `$AWS_CONTAINER_CREDENTIALS_FULL_URI`. This specifies the full +//! URL to load credentials. The URL MUST satisfy one of the following two properties: +//! 1. The URL begins with `https` +//! 2. The URL refers to a loopback device. If a URL contains a domain name instead of an IP address, +//! a DNS lookup will be performed. ALL resolved IP addresses MUST refer to a loopback interface, or +//! the credentials provider will return `CredentialsError::InvalidConfiguration` +//! +//! **Finally**: It will check the value of `$AWS_CONTAINER_AUTHORIZATION_TOKEN`. If this is set, the +//! value will be passed in the `Authorization` header. +//! +//! ## Credentials Format +//! Credentials MUST be returned in a JSON format: +//! ```json +//! { +//! "AccessKeyId" : "MUA...", +//! "SecretAccessKey" : "/7PC5om....", +//! "Token" : "AQoDY....=", +//! "Expiration" : "2016-02-25T06:03:31Z" +//! } +//! ``` +//! +//! Credentials errors MAY be returned with a `code` and `message` field: +//! ```json +//! { +//! "code": "ErrorCode", +//! "message": "Helpful error message." +//! } +//! ``` + +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::io; +use std::net::IpAddr; + +use aws_smithy_client::erase::boxclone::BoxCloneService; +use aws_smithy_http::endpoint::Endpoint; +use aws_types::credentials; +use aws_types::credentials::{future, CredentialsError, ProvideCredentials}; +use http::uri::{InvalidUri, Scheme}; +use http::{HeaderValue, Uri}; +use tower::{Service, ServiceExt}; + +use crate::http_credential_provider::HttpCredentialProvider; +use crate::provider_config::ProviderConfig; +use aws_types::os_shim_internal::Env; +use http::header::InvalidHeaderValue; +use std::time::Duration; +use tokio::sync::OnceCell; + +// URL from https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html +const BASE_HOST: &str = "http://169.254.170.2"; +const ENV_RELATIVE_URI: &str = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"; +const ENV_FULL_URI: &str = "AWS_CONTAINER_CREDENTIALS_FULL_URI"; +const ENV_AUTHORIZATION: &str = "AWS_CONTAINER_AUTHORIZATION_TOKEN"; + +/// Credential provider for ECS and generalized HTTP credentials +/// +/// See the [module](crate::ecs) documentation for more details. +/// +/// This credential provider is part of the default chain. +#[derive(Debug)] +pub struct EcsCredentialsProvider { + inner: OnceCell, + env: Env, + builder: Builder, +} + +impl EcsCredentialsProvider { + /// Builder for [`EcsCredentialsProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + /// Load credentials from this credentials provider + pub async fn credentials(&self) -> credentials::Result { + let auth = match self.env.get(ENV_AUTHORIZATION).ok() { + Some(auth) => Some(HeaderValue::from_str(&auth).map_err(|err| { + tracing::warn!(token = %auth, "invalid auth token"); + CredentialsError::invalid_configuration(EcsConfigurationErr::InvalidAuthToken { + err, + value: auth, + }) + })?), + None => None, + }; + match self.provider().await { + Provider::NotConfigured => { + Err(CredentialsError::not_loaded("ECS provider not configured")) + } + Provider::InvalidConfiguration(err) => { + Err(CredentialsError::invalid_configuration(format!("{}", err))) + } + Provider::Configured(provider) => provider.credentials(auth).await, + } + } + + async fn provider(&self) -> &Provider { + self.inner + .get_or_init(|| Provider::make(self.builder.clone())) + .await + } +} + +impl ProvideCredentials for EcsCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} + +/// Inner Provider that can record failed configuration state +#[derive(Debug)] +enum Provider { + Configured(HttpCredentialProvider), + NotConfigured, + InvalidConfiguration(EcsConfigurationErr), +} + +impl Provider { + async fn uri(env: Env, dns: Option) -> Result { + let relative_uri = env.get(ENV_RELATIVE_URI).ok(); + let full_uri = env.get(ENV_FULL_URI).ok(); + if let Some(relative_uri) = relative_uri { + Self::build_full_uri(relative_uri) + } else if let Some(full_uri) = full_uri { + let mut dns = dns.or_else(tokio_dns); + validate_full_uri(&full_uri, dns.as_mut()) + .await + .map_err(|err| EcsConfigurationErr::InvalidFullUri { err, uri: full_uri }) + } else { + Err(EcsConfigurationErr::NotConfigured) + } + } + + pub async fn make(builder: Builder) -> Self { + let provider_config = builder.provider_config.unwrap_or_default(); + let env = provider_config.env(); + let uri = match Self::uri(env, builder.dns).await { + Ok(uri) => uri, + Err(EcsConfigurationErr::NotConfigured) => return Provider::NotConfigured, + Err(err) => return Provider::InvalidConfiguration(err), + }; + let http_provider = HttpCredentialProvider::builder() + .configure(&provider_config) + .connect_timeout(builder.connect_timeout) + .read_timeout(builder.read_timeout) + .build("EcsContainer", uri); + Provider::Configured(http_provider) + } + + fn build_full_uri(relative_uri: String) -> Result { + let mut relative_uri = match relative_uri.parse::() { + Ok(uri) => uri, + Err(invalid_uri) => { + tracing::warn!(uri = ?invalid_uri, "invalid URI loaded from environment"); + return Err(EcsConfigurationErr::InvalidRelativeUri { + err: invalid_uri, + uri: relative_uri, + }); + } + }; + let endpoint = Endpoint::immutable(Uri::from_static(BASE_HOST)); + endpoint.set_endpoint(&mut relative_uri, None); + Ok(relative_uri) + } +} + +#[derive(Debug)] +enum EcsConfigurationErr { + InvalidRelativeUri { + err: InvalidUri, + uri: String, + }, + InvalidFullUri { + err: InvalidFullUriError, + uri: String, + }, + InvalidAuthToken { + err: InvalidHeaderValue, + value: String, + }, + NotConfigured, +} + +impl Display for EcsConfigurationErr { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + EcsConfigurationErr::InvalidRelativeUri { err, uri } => write!( + f, + "invalid relative URI for ECS provider ({}): {}", + err, uri + ), + EcsConfigurationErr::InvalidFullUri { err, uri } => { + write!(f, "invalid full URI for ECS provider ({}): {}", err, uri) + } + EcsConfigurationErr::NotConfigured => write!( + f, + "No environment variables were set to configure ECS provider" + ), + EcsConfigurationErr::InvalidAuthToken { err, value } => write!( + f, + "`{}` could not be used as a header value for the auth token. {}", + value, err + ), + } + } +} + +impl Error for EcsConfigurationErr { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match &self { + EcsConfigurationErr::InvalidRelativeUri { err, .. } => Some(err), + EcsConfigurationErr::InvalidFullUri { err, .. } => Some(err), + _ => None, + } + } +} + +/// Builder for [`EcsCredentialsProvider`] +#[derive(Default, Debug, Clone)] +pub struct Builder { + provider_config: Option, + dns: Option, + connect_timeout: Option, + read_timeout: Option, +} + +impl Builder { + /// Override the configuration used for this provider + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.provider_config = Some(provider_config.clone()); + self + } + + /// Override the DNS resolver used to validate URIs + /// + /// URIs must refer to loopback addresses. The `DnsService` is used to retrieve IP addresses for + /// a given domain. + pub fn dns(mut self, dns: DnsService) -> Self { + self.dns = Some(dns); + self + } + + /// Override the connect timeout for the HTTP client + /// + /// This value defaults to 2 seconds + pub fn connect_timeout(mut self, timeout: Duration) -> Self { + self.connect_timeout = Some(timeout); + self + } + + /// Override the read timeout for the HTTP client + /// + /// This value defaults to 5 seconds + pub fn read_timeout(mut self, timeout: Duration) -> Self { + self.read_timeout = Some(timeout); + self + } + + /// Create an [`EcsCredentialsProvider`] from this builder + pub fn build(self) -> EcsCredentialsProvider { + let env = self + .provider_config + .as_ref() + .map(|config| config.env()) + .unwrap_or_default(); + EcsCredentialsProvider { + inner: OnceCell::new(), + env, + builder: self, + } + } +} + +/// Invalid Full URI +/// +/// When the full URI setting is used, the URI must either be HTTPS or point to a loopback interface. +#[derive(Debug)] +#[non_exhaustive] +pub enum InvalidFullUriError { + /// The provided URI could not be parsed as a URI + #[non_exhaustive] + InvalidUri(InvalidUri), + + /// No Dns service was provided + #[non_exhaustive] + NoDnsService, + + /// The URI did not specify a host + #[non_exhaustive] + MissingHost, + + /// The URI did not refer to the loopback interface + #[non_exhaustive] + NotLoopback, + + /// DNS lookup failed when attempting to resolve the host to an IP Address for validation. + DnsLookupFailed(io::Error), +} + +impl Display for InvalidFullUriError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + InvalidFullUriError::InvalidUri(err) => write!(f, "URI was invalid: {}", err), + InvalidFullUriError::MissingHost => write!(f, "URI did not specify a host"), + InvalidFullUriError::NotLoopback => { + write!(f, "URI did not refer to the loopback interface") + } + InvalidFullUriError::DnsLookupFailed(err) => { + write!( + f, + "failed to perform DNS lookup while validating URI: {}", + err + ) + } + InvalidFullUriError::NoDnsService => write!(f, "No DNS service was provided. Enable `rt-tokio` or provide a `dns` service to the builder.") + } + } +} + +impl Error for InvalidFullUriError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + InvalidFullUriError::InvalidUri(err) => Some(err), + InvalidFullUriError::DnsLookupFailed(err) => Some(err), + _ => None, + } + } +} + +/// Dns resolver interface +pub type DnsService = BoxCloneService, io::Error>; + +/// Validate that `uri` is valid to be used as a full provider URI +/// Either: +/// 1. The URL is uses `https` +/// 2. The URL refers to a loopback device. If a URL contains a domain name instead of an IP address, +/// a DNS lookup will be performed. ALL resolved IP addresses MUST refer to a loopback interface, or +/// the credentials provider will return `CredentialsError::InvalidConfiguration` +async fn validate_full_uri( + uri: &str, + dns: Option<&mut DnsService>, +) -> Result { + let uri = uri + .parse::() + .map_err(InvalidFullUriError::InvalidUri)?; + if uri.scheme() == Some(&Scheme::HTTPS) { + return Ok(uri); + } + // For HTTP URIs, we need to validate that it points to a loopback address + let host = uri.host().ok_or(InvalidFullUriError::MissingHost)?; + let is_loopback = match host.parse::() { + Ok(addr) => addr.is_loopback(), + Err(_domain_name) => { + let dns = dns.ok_or(InvalidFullUriError::NoDnsService)?; + dns.ready().await.map_err(InvalidFullUriError::DnsLookupFailed)? + .call(host.to_owned()) + .await + .map_err(InvalidFullUriError::DnsLookupFailed)? + .iter() + .all(|addr| { + if !addr.is_loopback() { + tracing::warn!( + addr = ?addr, + "HTTP credential provider cannot be used: Address does not resolve to the loopback interface." + ) + }; + addr.is_loopback() + }) + } + }; + match is_loopback { + true => Ok(uri), + false => Err(InvalidFullUriError::NotLoopback), + } +} + +#[cfg(not(feature = "rt-tokio"))] +fn tokio_dns() -> Option { + None +} + +/// DNS resolver that uses tokio::spawn_blocking +/// +/// DNS resolution is required to validate that provided URIs point to the loopback interface +#[cfg(feature = "rt-tokio")] +fn tokio_dns() -> Option { + use aws_smithy_client::erase::boxclone::BoxFuture; + use std::io::ErrorKind; + use std::net::ToSocketAddrs; + use std::task::{Context, Poll}; + + #[derive(Clone)] + struct TokioDns; + impl Service for TokioDns { + type Response = Vec; + type Error = io::Error; + type Future = BoxFuture; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: String) -> Self::Future { + Box::pin(async move { + let result = tokio::task::spawn_blocking(move || (req, 0).to_socket_addrs()).await; + match result { + Err(join_failure) => Err(io::Error::new(ErrorKind::Other, join_failure)), + Ok(Ok(dns_result)) => { + Ok(dns_result.into_iter().map(|addr| addr.ip()).collect()) + } + Ok(Err(dns_failure)) => Err(dns_failure), + } + }) + } + } + Some(BoxCloneService::new(TokioDns)) +} + +#[cfg(test)] +mod test { + use aws_smithy_client::erase::boxclone::BoxCloneService; + use aws_smithy_client::never::NeverService; + use futures_util::FutureExt; + use http::Uri; + use serde::Deserialize; + use tracing_test::traced_test; + + use crate::ecs::{ + tokio_dns, validate_full_uri, Builder, EcsCredentialsProvider, InvalidFullUriError, + Provider, + }; + use crate::provider_config::ProviderConfig; + use crate::test_case::GenericTestResult; + + use aws_types::credentials::ProvideCredentials; + use aws_types::os_shim_internal::Env; + use aws_types::Credentials; + + use aws_smithy_async::rt::sleep::TokioSleep; + use aws_smithy_client::erase::DynConnector; + use aws_smithy_client::test_connection::TestConnection; + use aws_smithy_http::body::SdkBody; + use http::header::AUTHORIZATION; + use std::collections::HashMap; + use std::error::Error; + use std::future::Ready; + use std::io; + use std::net::IpAddr; + use std::task::{Context, Poll}; + use std::time::{Duration, UNIX_EPOCH}; + use tower::Service; + + fn provider(env: Env, connector: DynConnector) -> EcsCredentialsProvider { + let provider_config = ProviderConfig::empty() + .with_env(env) + .with_http_connector(connector) + .with_sleep(TokioSleep::new()); + Builder::default().configure(&provider_config).build() + } + + #[derive(Deserialize)] + struct EcsUriTest { + env: HashMap, + result: GenericTestResult, + } + + impl EcsUriTest { + async fn check(&self) { + let env = Env::from(self.env.clone()); + let uri = Provider::uri(env, Some(BoxCloneService::new(TestDns::default()))) + .await + .map(|uri| uri.to_string()); + self.result.assert_matches(uri); + } + } + + #[tokio::test] + async fn run_config_tests() -> Result<(), Box> { + let test_cases = std::fs::read_to_string("test-data/ecs-tests.json")?; + #[derive(Deserialize)] + struct TestCases { + tests: Vec, + } + + let test_cases: TestCases = serde_json::from_str(&test_cases)?; + let test_cases = test_cases.tests; + for test in test_cases { + test.check().await + } + Ok(()) + } + + #[test] + fn validate_uri_https() { + // over HTTPs, any URI is fine + let never = NeverService::new(); + let mut dns = Some(BoxCloneService::new(never)); + assert_eq!( + validate_full_uri("https://amazon.com", None) + .now_or_never() + .unwrap() + .expect("valid"), + Uri::from_static("https://amazon.com") + ); + // over HTTP, it will try to lookup + assert!( + validate_full_uri("http://amazon.com", dns.as_mut()) + .now_or_never() + .is_none(), + "DNS lookup should occur, but it will never return" + ); + + let no_dns_error = validate_full_uri("http://amazon.com", None) + .now_or_never() + .unwrap() + .expect_err("DNS service is required"); + assert!( + matches!(no_dns_error, InvalidFullUriError::NoDnsService), + "expected no dns service, got: {}", + no_dns_error + ); + } + + #[test] + fn valid_uri_loopback() { + assert_eq!( + validate_full_uri("http://127.0.0.1:8080/get-credentials", None) + .now_or_never() + .unwrap() + .expect("valid uri"), + Uri::from_static("http://127.0.0.1:8080/get-credentials") + ); + + let err = validate_full_uri("http://192.168.10.120/creds", None) + .now_or_never() + .unwrap() + .expect_err("not a loopback"); + assert!(matches!(err, InvalidFullUriError::NotLoopback)); + } + + #[test] + fn all_addrs_local() { + let svc = TestDns::with_fallback(vec![ + "127.0.0.1".parse().unwrap(), + "127.0.0.2".parse().unwrap(), + ]); + let mut svc = Some(BoxCloneService::new(svc)); + let resp = validate_full_uri("http://localhost:8888", svc.as_mut()) + .now_or_never() + .unwrap(); + assert!(resp.is_ok(), "Should be valid: {:?}", resp); + } + + #[test] + fn all_addrs_not_local() { + let svc = TestDns::with_fallback(vec![ + "127.0.0.1".parse().unwrap(), + "192.168.0.1".parse().unwrap(), + ]); + let mut svc = Some(BoxCloneService::new(svc)); + let resp = validate_full_uri("http://localhost:8888", svc.as_mut()) + .now_or_never() + .unwrap(); + assert!( + matches!(resp, Err(InvalidFullUriError::NotLoopback)), + "Should be invalid: {:?}", + resp + ); + } + + fn creds_request(uri: &str, auth: Option<&str>) -> http::Request { + let mut builder = http::Request::builder(); + if let Some(auth) = auth { + builder = builder.header(AUTHORIZATION, auth); + } + builder.uri(uri).body(SdkBody::empty()).unwrap() + } + + fn ok_creds_response() -> http::Response { + http::Response::builder() + .status(200) + .body(SdkBody::from( + r#" { + "AccessKeyId" : "AKID", + "SecretAccessKey" : "SECRET", + "Token" : "TOKEN....=", + "Expiration" : "2009-02-13T23:31:30Z" + }"#, + )) + .unwrap() + } + + #[track_caller] + fn assert_correct(creds: Credentials) { + assert_eq!(creds.access_key_id(), "AKID"); + assert_eq!(creds.secret_access_key(), "SECRET"); + assert_eq!(creds.session_token().unwrap(), "TOKEN....="); + assert_eq!( + creds.expiry().unwrap(), + UNIX_EPOCH + Duration::from_secs(1234567890) + ); + } + + #[tokio::test] + async fn load_valid_creds_auth() { + let env = Env::from_slice(&[ + ("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/credentials"), + ("AWS_CONTAINER_AUTHORIZATION_TOKEN", "Basic password"), + ]); + let connector = TestConnection::new(vec![( + creds_request("http://169.254.170.2/credentials", Some("Basic password")), + ok_creds_response(), + )]); + let provider = provider(env, DynConnector::new(connector.clone())); + let creds = provider + .provide_credentials() + .await + .expect("valid credentials"); + assert_correct(creds); + connector.assert_requests_match(&[]); + } + + #[tokio::test] + async fn retry_5xx() { + let env = Env::from_slice(&[("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/credentials")]); + let connector = TestConnection::new(vec![ + ( + creds_request("http://169.254.170.2/credentials", None), + http::Response::builder() + .status(500) + .body(SdkBody::empty()) + .unwrap(), + ), + ( + creds_request("http://169.254.170.2/credentials", None), + ok_creds_response(), + ), + ]); + tokio::time::pause(); + let provider = provider(env, DynConnector::new(connector.clone())); + let creds = provider + .provide_credentials() + .await + .expect("valid credentials"); + assert_correct(creds); + } + + #[tokio::test] + async fn load_valid_creds_no_auth() { + let env = Env::from_slice(&[("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/credentials")]); + let connector = TestConnection::new(vec![( + creds_request("http://169.254.170.2/credentials", None), + ok_creds_response(), + )]); + let provider = provider(env, DynConnector::new(connector.clone())); + let creds = provider + .provide_credentials() + .await + .expect("valid credentials"); + assert_correct(creds); + connector.assert_requests_match(&[]); + } + + // ignored by default because it relies on actual DNS resolution + #[allow(unused_attributes)] + #[tokio::test] + #[traced_test] + #[ignore] + async fn real_dns_lookup() { + let mut dns = Some(tokio_dns().expect("feature must be enabled")); + let err = validate_full_uri("http://www.amazon.com/creds", dns.as_mut()) + .await + .expect_err("not a loopback"); + assert!(matches!(err, InvalidFullUriError::NotLoopback), "{:?}", err); + assert!(logs_contain( + "Address does not resolve to the loopback interface" + )); + validate_full_uri("http://localhost:8888/creds", dns.as_mut()) + .await + .expect("localhost is the loopback interface"); + } + + /// TestService which always returns the same IP addresses + #[derive(Clone)] + struct TestDns { + addrs: HashMap>, + fallback: Vec, + } + + /// Default that returns a loopback for `localhost` and a non-loopback for all other hostnames + impl Default for TestDns { + fn default() -> Self { + let mut addrs = HashMap::new(); + addrs.insert( + "localhost".into(), + vec!["127.0.0.1".parse().unwrap(), "127.0.0.2".parse().unwrap()], + ); + TestDns { + addrs, + // non-loopback address + fallback: vec!["72.21.210.29".parse().unwrap()], + } + } + } + + impl TestDns { + fn with_fallback(fallback: Vec) -> Self { + TestDns { + addrs: Default::default(), + fallback, + } + } + } + + impl Service for TestDns { + type Response = Vec; + type Error = io::Error; + type Future = Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: String) -> Self::Future { + std::future::ready(Ok(self.addrs.get(&_req).unwrap_or(&self.fallback).clone())) + } + } +} diff --git a/patch/aws-config/src/environment/app_name.rs b/patch/aws-config/src/environment/app_name.rs new file mode 100644 index 0000000000000..52033723db4bd --- /dev/null +++ b/patch/aws-config/src/environment/app_name.rs @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_types::app_name::AppName; +use aws_types::os_shim_internal::Env; + +/// Load an app name from the `AWS_SDK_UA_APP_ID` environment variable. +#[derive(Debug, Default)] +pub struct EnvironmentVariableAppNameProvider { + env: Env, +} + +impl EnvironmentVariableAppNameProvider { + /// Create a new `EnvironmentVariableAppNameProvider` + pub fn new() -> Self { + Self { env: Env::real() } + } + + #[doc(hidden)] + /// Create an region provider from a given `Env` + /// + /// This method is used for tests that need to override environment variables. + pub fn new_with_env(env: Env) -> Self { + Self { env } + } + + /// Attempts to create an `AppName` from the `AWS_SDK_UA_APP_ID` environment variable. + pub fn app_name(&self) -> Option { + if let Ok(name) = self.env.get("AWS_SDK_UA_APP_ID") { + match AppName::new(name) { + Ok(name) => Some(name), + Err(err) => { + tracing::warn!(err = %err, "`AWS_SDK_UA_APP_ID` environment variable value was invalid"); + None + } + } + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use crate::environment::EnvironmentVariableAppNameProvider; + use aws_types::app_name::AppName; + use aws_types::os_shim_internal::Env; + use std::collections::HashMap; + + #[test] + fn env_var_not_set() { + let provider = EnvironmentVariableAppNameProvider::new_with_env(Env::from(HashMap::new())); + assert_eq!(None, provider.app_name()); + } + + #[test] + fn env_var_set() { + let provider = EnvironmentVariableAppNameProvider::new_with_env(Env::from( + vec![("AWS_SDK_UA_APP_ID".to_string(), "something".to_string())] + .into_iter() + .collect::>(), + )); + assert_eq!( + Some(AppName::new("something").unwrap()), + provider.app_name() + ); + } +} diff --git a/patch/aws-config/src/environment/credentials.rs b/patch/aws-config/src/environment/credentials.rs new file mode 100644 index 0000000000000..aff4c05ee4834 --- /dev/null +++ b/patch/aws-config/src/environment/credentials.rs @@ -0,0 +1,193 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::env::VarError; + +use aws_types::credentials::future; +use aws_types::credentials::{CredentialsError, ProvideCredentials}; +use aws_types::os_shim_internal::Env; +use aws_types::{credentials, Credentials}; + +/// Load Credentials from Environment Variables +/// +/// `EnvironmentVariableCredentialsProvider` uses the following variables: +/// - `AWS_ACCESS_KEY_ID` +/// - `AWS_SECRET_ACCESS_KEY` with fallback to `SECRET_ACCESS_KEY` +/// - `AWS_SESSION_TOKEN` +#[derive(Debug, Clone)] +pub struct EnvironmentVariableCredentialsProvider { + env: Env, +} + +impl EnvironmentVariableCredentialsProvider { + fn credentials(&self) -> credentials::Result { + let access_key = self.env.get("AWS_ACCESS_KEY_ID").map_err(to_cred_error)?; + let secret_key = self + .env + .get("AWS_SECRET_ACCESS_KEY") + .or_else(|_| self.env.get("SECRET_ACCESS_KEY")) + .map_err(to_cred_error)?; + let session_token = self + .env + .get("AWS_SESSION_TOKEN") + .ok() + .map(|token| match token.trim() { + s if s.is_empty() => None, + s => Some(s.to_string()), + }) + .flatten(); + Ok(Credentials::new( + access_key, + secret_key, + session_token, + None, + ENV_PROVIDER, + )) + } +} + +impl EnvironmentVariableCredentialsProvider { + /// Create a `EnvironmentVariableCredentialsProvider` + pub fn new() -> Self { + Self::new_with_env(Env::real()) + } + + #[doc(hidden)] + /// Create a new `EnvironmentVariableCredentialsProvider` with `Env` overridden + /// + /// This function is intended for tests that mock out the process environment. + pub fn new_with_env(env: Env) -> Self { + Self { env } + } +} + +impl Default for EnvironmentVariableCredentialsProvider { + fn default() -> Self { + Self::new() + } +} + +const ENV_PROVIDER: &str = "EnvironmentVariable"; + +impl ProvideCredentials for EnvironmentVariableCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::ready(self.credentials()) + } +} + +fn to_cred_error(err: VarError) -> CredentialsError { + match err { + VarError::NotPresent => CredentialsError::not_loaded("environment variable not set"), + e @ VarError::NotUnicode(_) => CredentialsError::unhandled(e), + } +} + +#[cfg(test)] +mod test { + use aws_types::credentials::{CredentialsError, ProvideCredentials}; + use aws_types::os_shim_internal::Env; + use futures_util::FutureExt; + + use super::EnvironmentVariableCredentialsProvider; + + fn make_provider(vars: &[(&str, &str)]) -> EnvironmentVariableCredentialsProvider { + EnvironmentVariableCredentialsProvider { + env: Env::from_slice(vars), + } + } + + #[test] + fn valid_no_token() { + let provider = make_provider(&[ + ("AWS_ACCESS_KEY_ID", "access"), + ("AWS_SECRET_ACCESS_KEY", "secret"), + ]); + let creds = provider + .provide_credentials() + .now_or_never() + .unwrap() + .expect("valid credentials"); + assert_eq!(creds.session_token(), None); + assert_eq!(creds.access_key_id(), "access"); + assert_eq!(creds.secret_access_key(), "secret"); + } + + #[test] + fn valid_with_token() { + let provider = make_provider(&[ + ("AWS_ACCESS_KEY_ID", "access"), + ("AWS_SECRET_ACCESS_KEY", "secret"), + ("AWS_SESSION_TOKEN", "token"), + ]); + + let creds = provider + .provide_credentials() + .now_or_never() + .unwrap() + .expect("valid credentials"); + assert_eq!(creds.session_token().unwrap(), "token"); + assert_eq!(creds.access_key_id(), "access"); + assert_eq!(creds.secret_access_key(), "secret"); + } + + #[test] + fn empty_token_env_var() { + for token_value in &["", " "] { + let provider = make_provider(&[ + ("AWS_ACCESS_KEY_ID", "access"), + ("AWS_SECRET_ACCESS_KEY", "secret"), + ("AWS_SESSION_TOKEN", token_value), + ]); + + let creds = provider + .provide_credentials() + .now_or_never() + .unwrap() + .expect("valid credentials"); + assert_eq!(creds.access_key_id(), "access"); + assert_eq!(creds.secret_access_key(), "secret"); + assert_eq!(creds.session_token(), None); + } + } + + #[test] + fn secret_key_fallback() { + let provider = make_provider(&[ + ("AWS_ACCESS_KEY_ID", "access"), + ("SECRET_ACCESS_KEY", "secret"), + ("AWS_SESSION_TOKEN", "token"), + ]); + + let creds = provider + .provide_credentials() + .now_or_never() + .unwrap() + .expect("valid credentials"); + assert_eq!(creds.session_token().unwrap(), "token"); + assert_eq!(creds.access_key_id(), "access"); + assert_eq!(creds.secret_access_key(), "secret"); + } + + #[test] + fn missing() { + let provider = make_provider(&[]); + let err = provider + .provide_credentials() + .now_or_never() + .unwrap() + .expect_err("no credentials defined"); + assert!(matches!(err, CredentialsError::CredentialsNotLoaded { .. })); + } + + #[test] + fn real_environment() { + let provider = EnvironmentVariableCredentialsProvider::new(); + // we don't know what's in the env, just make sure it doesn't crash. + let _ = provider.provide_credentials(); + } +} diff --git a/patch/aws-config/src/environment/mod.rs b/patch/aws-config/src/environment/mod.rs new file mode 100644 index 0000000000000..520a7b7191581 --- /dev/null +++ b/patch/aws-config/src/environment/mod.rs @@ -0,0 +1,24 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +/// Load app name from the environment +pub mod app_name; +pub use app_name::EnvironmentVariableAppNameProvider; + +/// Load credentials from the environment +pub mod credentials; +pub use credentials::EnvironmentVariableCredentialsProvider; + +/// Load regions from the environment +pub mod region; +pub use region::EnvironmentVariableRegionProvider; + +/// Load retry behavior configuration from the environment +pub mod retry_config; +pub use retry_config::EnvironmentVariableRetryConfigProvider; + +/// Load timeout configuration from the environment +pub mod timeout_config; +pub use timeout_config::EnvironmentVariableTimeoutConfigProvider; diff --git a/patch/aws-config/src/environment/region.rs b/patch/aws-config/src/environment/region.rs new file mode 100644 index 0000000000000..30079249c1f5c --- /dev/null +++ b/patch/aws-config/src/environment/region.rs @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use crate::meta::region::{future, ProvideRegion}; +use aws_types::os_shim_internal::Env; +use aws_types::region::Region; + +/// Load a region from environment variables +/// +/// This provider will first check the value of `AWS_REGION`, falling back to `AWS_DEFAULT_REGION` +/// when `AWS_REGION` is unset. +#[derive(Debug, Default)] +pub struct EnvironmentVariableRegionProvider { + env: Env, +} + +impl EnvironmentVariableRegionProvider { + /// Create a new `EnvironmentVariableRegionProvider` + pub fn new() -> Self { + EnvironmentVariableRegionProvider { env: Env::real() } + } + + #[doc(hidden)] + /// Create an region provider from a given `Env` + /// + /// This method is used for tests that need to override environment variables. + pub fn new_with_env(env: Env) -> Self { + EnvironmentVariableRegionProvider { env } + } +} + +impl ProvideRegion for EnvironmentVariableRegionProvider { + fn region(&self) -> future::ProvideRegion { + let region = self + .env + .get("AWS_REGION") + .or_else(|_| self.env.get("AWS_DEFAULT_REGION")) + .map(Region::new) + .ok(); + future::ProvideRegion::ready(region) + } +} +#[cfg(test)] +mod test { + use crate::environment::region::EnvironmentVariableRegionProvider; + use crate::meta::region::ProvideRegion; + use aws_types::os_shim_internal::Env; + use aws_types::region::Region; + use futures_util::FutureExt; + + fn test_provider(vars: &[(&str, &str)]) -> EnvironmentVariableRegionProvider { + EnvironmentVariableRegionProvider::new_with_env(Env::from_slice(vars)) + } + + #[test] + fn no_region() { + assert_eq!( + test_provider(&[]) + .region() + .now_or_never() + .expect("no polling"), + None + ); + } + + #[test] + fn prioritize_aws_region() { + let provider = test_provider(&[ + ("AWS_REGION", "us-east-1"), + ("AWS_DEFAULT_REGION", "us-east-2"), + ]); + assert_eq!( + provider.region().now_or_never().expect("no polling"), + Some(Region::new("us-east-1")) + ); + } + + #[test] + fn fallback_to_default_region() { + assert_eq!( + test_provider(&[("AWS_DEFAULT_REGION", "us-east-2")]) + .region() + .now_or_never() + .expect("no polling"), + Some(Region::new("us-east-2")) + ); + } +} diff --git a/patch/aws-config/src/environment/retry_config.rs b/patch/aws-config/src/environment/retry_config.rs new file mode 100644 index 0000000000000..bb459af5d3fbc --- /dev/null +++ b/patch/aws-config/src/environment/retry_config.rs @@ -0,0 +1,156 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::str::FromStr; + +use aws_smithy_types::retry::{RetryConfigBuilder, RetryConfigErr, RetryMode}; +use aws_types::os_shim_internal::Env; + +const ENV_VAR_MAX_ATTEMPTS: &str = "AWS_MAX_ATTEMPTS"; +const ENV_VAR_RETRY_MODE: &str = "AWS_RETRY_MODE"; + +/// Load a retry_config from environment variables +/// +/// This provider will check the values of `AWS_RETRY_MODE` and `AWS_MAX_ATTEMPTS` +/// in order to build a retry config. +#[derive(Debug, Default)] +pub struct EnvironmentVariableRetryConfigProvider { + env: Env, +} + +impl EnvironmentVariableRetryConfigProvider { + /// Create a new [`EnvironmentVariableRetryConfigProvider`] + pub fn new() -> Self { + EnvironmentVariableRetryConfigProvider { env: Env::real() } + } + + #[doc(hidden)] + /// Create an retry_config provider from a given `Env` + /// + /// This method is used for tests that need to override environment variables. + pub fn new_with_env(env: Env) -> Self { + EnvironmentVariableRetryConfigProvider { env } + } + + /// Attempt to create a new `RetryConfig` from environment variables + pub fn retry_config_builder(&self) -> Result { + let max_attempts = match self.env.get(ENV_VAR_MAX_ATTEMPTS).ok() { + Some(max_attempts) => match max_attempts.parse::() { + Ok(max_attempts) if max_attempts == 0 => { + return Err(RetryConfigErr::MaxAttemptsMustNotBeZero { + set_by: "environment variable".into(), + }); + } + Ok(max_attempts) => Some(max_attempts), + Err(source) => { + return Err(RetryConfigErr::FailedToParseMaxAttempts { + set_by: "environment variable".into(), + source, + }); + } + }, + None => None, + }; + + let retry_mode = match self.env.get(ENV_VAR_RETRY_MODE) { + Ok(retry_mode) => match RetryMode::from_str(&retry_mode) { + Ok(retry_mode) => Some(retry_mode), + Err(retry_mode_err) => { + return Err(RetryConfigErr::InvalidRetryMode { + set_by: "environment variable".into(), + source: retry_mode_err, + }); + } + }, + Err(_) => None, + }; + + let mut retry_config_builder = RetryConfigBuilder::new(); + retry_config_builder + .set_max_attempts(max_attempts) + .set_mode(retry_mode); + + Ok(retry_config_builder) + } +} + +#[cfg(test)] +mod test { + use aws_smithy_types::retry::{RetryConfig, RetryConfigErr, RetryMode}; + use aws_types::os_shim_internal::Env; + + use super::{EnvironmentVariableRetryConfigProvider, ENV_VAR_MAX_ATTEMPTS, ENV_VAR_RETRY_MODE}; + + fn test_provider(vars: &[(&str, &str)]) -> EnvironmentVariableRetryConfigProvider { + EnvironmentVariableRetryConfigProvider::new_with_env(Env::from_slice(vars)) + } + + #[test] + fn defaults() { + let built = test_provider(&[]).retry_config_builder().unwrap().build(); + + assert_eq!(built.mode(), RetryMode::Standard); + assert_eq!(built.max_attempts(), 3); + } + + #[test] + fn max_attempts_is_read_correctly() { + assert_eq!( + test_provider(&[(ENV_VAR_MAX_ATTEMPTS, "88")]) + .retry_config_builder() + .unwrap() + .build(), + RetryConfig::new().with_max_attempts(88) + ); + } + + #[test] + fn max_attempts_errors_when_it_cant_be_parsed_as_an_integer() { + assert!(matches!( + test_provider(&[(ENV_VAR_MAX_ATTEMPTS, "not an integer")]) + .retry_config_builder() + .unwrap_err(), + RetryConfigErr::FailedToParseMaxAttempts { .. } + )); + } + + #[test] + fn retry_mode_is_read_correctly() { + assert_eq!( + test_provider(&[(ENV_VAR_RETRY_MODE, "standard")]) + .retry_config_builder() + .unwrap() + .build(), + RetryConfig::new().with_retry_mode(RetryMode::Standard) + ); + } + + #[test] + fn both_fields_can_be_set_at_once() { + assert_eq!( + test_provider(&[ + (ENV_VAR_RETRY_MODE, "standard"), + (ENV_VAR_MAX_ATTEMPTS, "13") + ]) + .retry_config_builder() + .unwrap() + .build(), + RetryConfig::new() + .with_max_attempts(13) + .with_retry_mode(RetryMode::Standard) + ); + } + + #[test] + fn disallow_zero_max_attempts() { + let err = test_provider(&[(ENV_VAR_MAX_ATTEMPTS, "0")]) + .retry_config_builder() + .unwrap_err(); + assert!(matches!( + err, + RetryConfigErr::MaxAttemptsMustNotBeZero { .. } + )); + } +} diff --git a/patch/aws-config/src/environment/timeout_config.rs b/patch/aws-config/src/environment/timeout_config.rs new file mode 100644 index 0000000000000..53128bf9f1d10 --- /dev/null +++ b/patch/aws-config/src/environment/timeout_config.rs @@ -0,0 +1,141 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load timeout configuration properties from environment variables + +use crate::parsing::parse_str_as_timeout; + +use aws_smithy_types::timeout; +use aws_smithy_types::tristate::TriState; +use aws_types::os_shim_internal::Env; + +use std::time::Duration; + +// Currently unsupported timeouts +const ENV_VAR_CONNECT_TIMEOUT: &str = "AWS_CONNECT_TIMEOUT"; +const ENV_VAR_TLS_NEGOTIATION_TIMEOUT: &str = "AWS_TLS_NEGOTIATION_TIMEOUT"; +const ENV_VAR_READ_TIMEOUT: &str = "AWS_READ_TIMEOUT"; + +// Supported timeouts +const ENV_VAR_API_CALL_ATTEMPT_TIMEOUT: &str = "AWS_API_CALL_ATTEMPT_TIMEOUT"; +const ENV_VAR_API_CALL_TIMEOUT: &str = "AWS_API_CALL_TIMEOUT"; + +/// Load a timeout_config from environment variables +/// +/// This provider will check the values of the following variables in order to build a +/// [`timeout::Config`](aws_smithy_types::timeout::Config) +/// +/// - `AWS_API_CALL_ATTEMPT_TIMEOUT` +/// - `AWS_API_CALL_TIMEOUT` +/// +/// Timeout values represent the number of seconds before timing out and must be non-negative floats +/// or integers. NaN and infinity are also invalid. +#[derive(Debug, Default)] +pub struct EnvironmentVariableTimeoutConfigProvider { + env: Env, +} + +impl EnvironmentVariableTimeoutConfigProvider { + /// Create a new [`EnvironmentVariableTimeoutConfigProvider`] + pub fn new() -> Self { + EnvironmentVariableTimeoutConfigProvider { env: Env::real() } + } + + #[doc(hidden)] + /// Create a timeout config provider from a given [`Env`] + /// + /// This method is used for tests that need to override environment variables. + pub fn new_with_env(env: Env) -> Self { + EnvironmentVariableTimeoutConfigProvider { env } + } + + /// Attempt to create a new [`timeout::Config`](aws_smithy_types::timeout::Config) from environment variables + pub fn timeout_config(&self) -> Result { + // Warn users that set unsupported timeouts in their profile + for timeout in [ + ENV_VAR_CONNECT_TIMEOUT, + ENV_VAR_TLS_NEGOTIATION_TIMEOUT, + ENV_VAR_READ_TIMEOUT, + ] { + warn_if_unsupported_timeout_is_set(&self.env, timeout); + } + + let api_call_attempt_timeout = + construct_timeout_from_env_var(&self.env, ENV_VAR_API_CALL_ATTEMPT_TIMEOUT)?; + let api_call_timeout = construct_timeout_from_env_var(&self.env, ENV_VAR_API_CALL_TIMEOUT)?; + + let api_timeouts = timeout::Api::new() + .with_call_timeout(api_call_timeout) + .with_call_attempt_timeout(api_call_attempt_timeout); + + // Only API-related timeouts are currently supported + Ok(timeout::Config::new().with_api_timeouts(api_timeouts)) + } +} + +fn construct_timeout_from_env_var( + env: &Env, + var: &'static str, +) -> Result, timeout::ConfigError> { + match env.get(var).ok() { + Some(timeout) => parse_str_as_timeout(&timeout, var.into(), "environment variable".into()) + .map(TriState::Set), + None => Ok(TriState::Unset), + } +} + +fn warn_if_unsupported_timeout_is_set(env: &Env, var: &'static str) { + if env.get(var).is_ok() { + tracing::warn!( + "Environment variable for '{}' timeout was set but that feature is currently unimplemented so the setting will be ignored. \ + To help us prioritize support for this feature, please upvote aws-sdk-rust#151 (https://github.com/awslabs/aws-sdk-rust/issues/151)", + var + ) + } +} + +#[cfg(test)] +mod test { + use super::{ + EnvironmentVariableTimeoutConfigProvider, ENV_VAR_API_CALL_ATTEMPT_TIMEOUT, + ENV_VAR_API_CALL_TIMEOUT, + }; + use aws_smithy_types::timeout; + use aws_smithy_types::tristate::TriState; + use aws_types::os_shim_internal::Env; + use std::time::Duration; + + fn test_provider(vars: &[(&str, &str)]) -> EnvironmentVariableTimeoutConfigProvider { + EnvironmentVariableTimeoutConfigProvider::new_with_env(Env::from_slice(vars)) + } + + #[test] + fn no_defaults() { + let built = test_provider(&[]).timeout_config().unwrap(); + + assert_eq!(built.api.call_timeout(), TriState::Unset); + assert_eq!(built.api.call_attempt_timeout(), TriState::Unset); + } + + #[test] + fn all_fields_can_be_set_at_once() { + let expected_api_timeouts = timeout::Api::new() + .with_call_attempt_timeout(TriState::Set(Duration::from_secs_f32(4.0))) + // Some floats can't be represented as f32 so this duration will end up equalling the + // duration from the env. + .with_call_timeout(TriState::Set(Duration::from_secs_f32(900012350.0))); + let expected_timeouts = timeout::Config::new().with_api_timeouts(expected_api_timeouts); + + assert_eq!( + test_provider(&[ + (ENV_VAR_API_CALL_ATTEMPT_TIMEOUT, "04.000"), + (ENV_VAR_API_CALL_TIMEOUT, "900012345.0") + ]) + .timeout_config() + .unwrap(), + expected_timeouts + ); + } +} diff --git a/patch/aws-config/src/fs_util.rs b/patch/aws-config/src/fs_util.rs new file mode 100644 index 0000000000000..22b6cebad72df --- /dev/null +++ b/patch/aws-config/src/fs_util.rs @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_types::os_shim_internal; + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) enum Os { + Windows, + NotWindows, +} + +impl Os { + pub fn real() -> Self { + match std::env::consts::OS { + "windows" => Os::Windows, + _ => Os::NotWindows, + } + } +} + +/// Resolve a home directory given a set of environment variables +pub(crate) fn home_dir(env_var: &os_shim_internal::Env, os: Os) -> Option { + if let Ok(home) = env_var.get("HOME") { + tracing::debug!(src = "HOME", "loaded home directory"); + return Some(home); + } + + if os == Os::Windows { + if let Ok(home) = env_var.get("USERPROFILE") { + tracing::debug!(src = "USERPROFILE", "loaded home directory"); + return Some(home); + } + + let home_drive = env_var.get("HOMEDRIVE"); + let home_path = env_var.get("HOMEPATH"); + tracing::debug!(src = "HOMEDRIVE/HOMEPATH", "loaded home directory"); + if let (Ok(mut drive), Ok(path)) = (home_drive, home_path) { + drive.push_str(&path); + return Some(drive); + } + } + None +} + +#[cfg(test)] +mod test { + use super::*; + use aws_types::os_shim_internal::Env; + + #[test] + fn homedir_profile_only_windows() { + // windows specific variables should only be considered when the platform is windows + let env = Env::from_slice(&[("USERPROFILE", "C:\\Users\\name")]); + assert_eq!( + home_dir(&env, Os::Windows), + Some("C:\\Users\\name".to_string()) + ); + assert_eq!(home_dir(&env, Os::NotWindows), None); + } +} diff --git a/patch/aws-config/src/http_credential_provider.rs b/patch/aws-config/src/http_credential_provider.rs new file mode 100644 index 0000000000000..2616600a687e3 --- /dev/null +++ b/patch/aws-config/src/http_credential_provider.rs @@ -0,0 +1,301 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Generalized HTTP credential provider. Currently, this cannot be used directly and can only +//! be used via the ECS credential provider. +//! +//! Future work will stabilize this interface and enable it to be used directly. + +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::http_connector::HttpSettings; +use aws_smithy_http::body::SdkBody; +use aws_smithy_http::operation::{Operation, Request}; +use aws_smithy_http::response::ParseStrictResponse; +use aws_smithy_http::result::{SdkError, SdkSuccess}; +use aws_smithy_http::retry::ClassifyResponse; +use aws_smithy_types::retry::{ErrorKind, RetryKind}; +use aws_smithy_types::timeout; +use aws_smithy_types::tristate::TriState; +use aws_types::credentials::CredentialsError; +use aws_types::{credentials, Credentials}; + +use crate::connector::expect_connector; +use crate::json_credentials::{parse_json_credentials, JsonCredentials}; +use crate::provider_config::ProviderConfig; + +use bytes::Bytes; +use http::header::{ACCEPT, AUTHORIZATION}; +use http::{HeaderValue, Response, Uri}; +use std::time::Duration; +use tower::layer::util::Identity; + +const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(5); +const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_secs(2); + +#[derive(Debug)] +pub(crate) struct HttpCredentialProvider { + uri: Uri, + client: aws_smithy_client::Client, + provider_name: &'static str, +} + +impl HttpCredentialProvider { + pub fn builder() -> Builder { + Builder::default() + } + + pub async fn credentials(&self, auth: Option) -> credentials::Result { + let credentials = self.client.call(self.operation(auth)).await; + match credentials { + Ok(creds) => Ok(creds), + Err(SdkError::ServiceError { err, .. }) => Err(err), + Err(other) => Err(CredentialsError::unhandled(other)), + } + } + + fn operation( + &self, + auth: Option, + ) -> Operation { + let mut http_req = http::Request::builder() + .uri(&self.uri) + .header(ACCEPT, "application/json"); + + if let Some(auth) = auth { + http_req = http_req.header(AUTHORIZATION, auth); + } + let http_req = http_req.body(SdkBody::empty()).expect("valid request"); + Operation::new( + Request::new(http_req), + CredentialsResponseParser { + provider_name: self.provider_name, + }, + ) + .with_retry_policy(HttpCredentialRetryPolicy) + } +} + +#[derive(Default)] +pub(crate) struct Builder { + provider_config: Option, + http_timeout_config: timeout::Http, +} + +impl Builder { + pub(crate) fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.provider_config = Some(provider_config.clone()); + self + } + + // read_timeout and connect_timeout accept options to enable easy pass through from + // other builders + pub(crate) fn read_timeout(mut self, read_timeout: Option) -> Self { + self.http_timeout_config = self + .http_timeout_config + .with_read_timeout(read_timeout.into()); + self + } + + pub(crate) fn connect_timeout(mut self, connect_timeout: Option) -> Self { + self.http_timeout_config = self + .http_timeout_config + .with_connect_timeout(connect_timeout.into()); + self + } + + pub(crate) fn build(self, provider_name: &'static str, uri: Uri) -> HttpCredentialProvider { + let provider_config = self.provider_config.unwrap_or_default(); + let default_timeout_config = timeout::Http::new() + .with_connect_timeout(TriState::Set(DEFAULT_CONNECT_TIMEOUT)) + .with_read_timeout(TriState::Set(DEFAULT_READ_TIMEOUT)); + let http_timeout_config = self + .http_timeout_config + .take_unset_from(default_timeout_config); + let http_settings = HttpSettings::default().with_http_timeout_config(http_timeout_config); + let connector = expect_connector(provider_config.connector(&http_settings)); + let client = aws_smithy_client::Builder::new() + .connector(connector) + .sleep_impl(provider_config.sleep()) + .build(); + HttpCredentialProvider { + uri, + client, + provider_name, + } + } +} + +#[derive(Clone, Debug)] +struct CredentialsResponseParser { + provider_name: &'static str, +} +impl ParseStrictResponse for CredentialsResponseParser { + type Output = credentials::Result; + + fn parse(&self, response: &Response) -> Self::Output { + if !response.status().is_success() { + return Err(CredentialsError::provider_error(format!( + "Non-success status from HTTP credential provider: {:?}", + response.status() + ))); + } + let str_resp = + std::str::from_utf8(response.body().as_ref()).map_err(CredentialsError::unhandled)?; + let json_creds = parse_json_credentials(str_resp).map_err(CredentialsError::unhandled)?; + match json_creds { + JsonCredentials::RefreshableCredentials { + access_key_id, + secret_access_key, + session_token, + expiration, + } => Ok(Credentials::new( + access_key_id, + secret_access_key, + Some(session_token.to_string()), + Some(expiration), + self.provider_name, + )), + JsonCredentials::Error { code, message } => Err(CredentialsError::provider_error( + format!("failed to load credentials [{}]: {}", code, message), + )), + } + } +} + +#[derive(Clone, Debug)] +struct HttpCredentialRetryPolicy; + +impl ClassifyResponse, SdkError> + for HttpCredentialRetryPolicy +{ + fn classify( + &self, + response: Result<&SdkSuccess, &SdkError>, + ) -> RetryKind { + /* The following errors are retryable: + * - Socket errors + * - Networking timeouts + * - 5xx errors + * - Non-parseable 200 responses. + * */ + match response { + Ok(_) => RetryKind::Unnecessary, + // socket errors, networking timeouts + Err(SdkError::DispatchFailure(client_err)) + if client_err.is_timeout() || client_err.is_io() => + { + RetryKind::Error(ErrorKind::TransientError) + } + // non-parseable 200s + Err(SdkError::ServiceError { + err: CredentialsError::Unhandled { .. }, + raw, + }) if raw.http().status().is_success() => RetryKind::Error(ErrorKind::ServerError), + // 5xx errors + Err(SdkError::ServiceError { raw, .. } | SdkError::ResponseError { raw, .. }) + if raw.http().status().is_server_error() => + { + RetryKind::Error(ErrorKind::ServerError) + } + Err(_) => RetryKind::UnretryableFailure, + } + } +} + +#[cfg(test)] +mod test { + use crate::http_credential_provider::{CredentialsResponseParser, HttpCredentialRetryPolicy}; + use aws_smithy_http::body::SdkBody; + use aws_smithy_http::operation; + use aws_smithy_http::response::ParseStrictResponse; + use aws_smithy_http::result::{SdkError, SdkSuccess}; + use aws_smithy_http::retry::ClassifyResponse; + use aws_smithy_types::retry::{ErrorKind, RetryKind}; + use aws_types::credentials::CredentialsError; + use aws_types::Credentials; + use bytes::Bytes; + + fn sdk_resp( + resp: http::Response<&'static str>, + ) -> Result, SdkError> { + let resp = resp.map(|data| Bytes::from_static(data.as_bytes())); + match (CredentialsResponseParser { + provider_name: "test", + }) + .parse(&resp) + { + Ok(creds) => Ok(SdkSuccess { + raw: operation::Response::new(resp.map(SdkBody::from)), + parsed: creds, + }), + Err(err) => Err(SdkError::ServiceError { + err, + raw: operation::Response::new(resp.map(SdkBody::from)), + }), + } + } + + #[test] + fn non_parseable_is_retriable() { + let bad_response = http::Response::builder() + .status(200) + .body("notjson") + .unwrap(); + + assert_eq!( + HttpCredentialRetryPolicy.classify(sdk_resp(bad_response).as_ref()), + RetryKind::Error(ErrorKind::ServerError) + ); + } + + #[test] + fn ok_response_not_retriable() { + let ok_response = http::Response::builder() + .status(200) + .body( + r#" { + "AccessKeyId" : "MUA...", + "SecretAccessKey" : "/7PC5om....", + "Token" : "AQoDY....=", + "Expiration" : "2016-02-25T06:03:31Z" + }"#, + ) + .unwrap(); + let sdk_result = sdk_resp(ok_response); + + assert_eq!( + HttpCredentialRetryPolicy.classify(sdk_result.as_ref()), + RetryKind::Unnecessary + ); + + assert!(sdk_result.is_ok(), "should be ok: {:?}", sdk_result) + } + + #[test] + fn explicit_error_not_retriable() { + let error_response = http::Response::builder() + .status(400) + .body(r#"{ "Code": "Error", "Message": "There was a problem, it was your fault" }"#) + .unwrap(); + let sdk_result = sdk_resp(error_response); + assert_eq!( + HttpCredentialRetryPolicy.classify(sdk_result.as_ref()), + RetryKind::UnretryableFailure + ); + let sdk_error = sdk_result.expect_err("should be error"); + + assert!( + matches!( + sdk_error, + SdkError::ServiceError { + err: CredentialsError::ProviderError { .. }, + .. + } + ), + "should be provider error: {}", + sdk_error + ); + } +} diff --git a/patch/aws-config/src/imds/client.rs b/patch/aws-config/src/imds/client.rs new file mode 100644 index 0000000000000..cc0de7c2265f8 --- /dev/null +++ b/patch/aws-config/src/imds/client.rs @@ -0,0 +1,1152 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Raw IMDSv2 Client +//! +//! Client for direct access to IMDSv2. + +use std::borrow::Cow; +use std::convert::TryFrom; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; +use std::time::Duration; + +use aws_http::user_agent::{ApiMetadata, AwsUserAgent, UserAgentStage}; +use aws_smithy_client::{erase::DynConnector, SdkSuccess}; +use aws_smithy_client::{retry, SdkError}; +use aws_smithy_http::body::SdkBody; +use aws_smithy_http::endpoint::Endpoint; +use aws_smithy_http::operation; +use aws_smithy_http::operation::{Metadata, Operation}; +use aws_smithy_http::response::ParseStrictResponse; +use aws_smithy_http::retry::ClassifyResponse; +use aws_smithy_http_tower::map_request::{ + AsyncMapRequestLayer, AsyncMapRequestService, MapRequestLayer, MapRequestService, +}; +use aws_smithy_types::retry::{ErrorKind, RetryKind}; +use aws_smithy_types::timeout; +use aws_types::os_shim_internal::{Env, Fs}; + +use bytes::Bytes; +use http::uri::InvalidUri; +use http::{Response, Uri}; +use tokio::sync::OnceCell; + +use crate::connector::expect_connector; +use crate::imds::client::token::TokenMiddleware; +use crate::profile::ProfileParseError; +use crate::provider_config::ProviderConfig; +use crate::{profile, PKG_VERSION}; +use aws_smithy_client::http_connector::HttpSettings; + +mod token; + +// 6 hours +const DEFAULT_TOKEN_TTL: Duration = Duration::from_secs(21_600); +const DEFAULT_ATTEMPTS: u32 = 4; +const DEFAULT_CONNECT_TIMEOUT: Option = Some(Duration::from_secs(1)); +const DEFAULT_READ_TIMEOUT: Option = Some(Duration::from_secs(1)); + +fn user_agent() -> AwsUserAgent { + AwsUserAgent::new_from_environment(Env::real(), ApiMetadata::new("imds", PKG_VERSION)) +} + +/// IMDSv2 Client +/// +/// Client for IMDSv2. This client handles fetching tokens, retrying on failure, and token +/// caching according to the specified token TTL. +/// +/// _Note: This client ONLY supports IMDSv2. It will not fallback to IMDSv1. See +/// [transitioning to IMDSv2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#instance-metadata-transition-to-version-2) +/// for more information._ +/// +/// # Client Configuration +/// The IMDS client can load configuration explicitly, via environment variables, or via +/// `~/.aws/config`. It will first attempt to resolve an endpoint override. If no endpoint +/// override exists, it will attempt to resolve an [`EndpointMode`]. If no +/// [`EndpointMode`] override exists, it will fallback to [`IpV4`](EndpointMode::IpV4). An exhaustive +/// list is below: +/// +/// ## Endpoint configuration list +/// 1. Explicit configuration of `Endpoint` via the [builder](Builder): +/// ```no_run +/// use aws_config::imds::client::Client; +/// use http::Uri; +/// # async fn docs() { +/// let client = Client::builder() +/// .endpoint(Uri::from_static("http://customidms:456/")) +/// .build() +/// .await; +/// # } +/// ``` +/// +/// 2. The `AWS_EC2_METADATA_SERVICE_ENDPOINT` environment variable. Note: If this environment variable +/// is set, it MUST contain to a valid URI or client construction will fail. +/// +/// 3. The `ec2_metadata_service_endpoint` field in `~/.aws/config`: +/// ```ini +/// [default] +/// # ... other configuration +/// ec2_metadata_service_endpoint = http://my-custom-endpoint:444 +/// ``` +/// +/// 4. An explicitly set endpoint mode: +/// ```no_run +/// use aws_config::imds::client::{Client, EndpointMode}; +/// # async fn docs() { +/// let client = Client::builder().endpoint_mode(EndpointMode::IpV6).build().await; +/// # } +/// ``` +/// +/// 5. An [endpoint mode](EndpointMode) loaded from the `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment +/// variable. Valid values: `IPv4`, `IPv6` +/// +/// 6. An [endpoint mode](EndpointMode) loaded from the `ec2_metadata_service_endpoint_mode` field in +/// `~/.aws/config`: +/// ```ini +/// [default] +/// # ... other configuration +/// ec2_metadata_service_endpoint_mode = IPv4 +/// ``` +/// +/// 7. The default value of `http://169.254.169.254` will be used. +/// +#[derive(Debug)] +pub struct Client { + endpoint: Endpoint, + inner: aws_smithy_client::Client, +} + +/// Client where build is sync, but usage is async +/// +/// Building an imds::Client is actually an async operation, however, for credentials and region +/// providers, we want build to always be a synchronous operation. This allows building to be deferred +/// and cached until request time. +#[derive(Debug)] +pub(super) struct LazyClient { + client: OnceCell>, + builder: Builder, +} + +impl LazyClient { + pub fn from_ready_client(client: Client) -> Self { + Self { + client: OnceCell::from(Ok(client)), + // the builder will never be used in this case + builder: Builder::default(), + } + } + pub(super) async fn client(&self) -> Result<&Client, &BuildError> { + let builder = &self.builder; + self.client + // the clone will only happen once when we actually construct it for the first time, + // after that, we will use the cache. + .get_or_init(|| async { + let client = builder.clone().build().await; + if let Err(err) = &client { + tracing::warn!(err = % err, "failed to create IMDS client") + } + client + }) + .await + .as_ref() + } +} + +impl Client { + /// IMDS client builder + pub fn builder() -> Builder { + Builder::default() + } + + /// Retrieve information from IMDS + /// + /// This method will handle loading and caching a session token, combining the `path` with the + /// configured IMDS endpoint, and retrying potential errors. + /// + /// For more information about IMDSv2 methods and functionality, see + /// [Instance metadata and user data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + /// + /// # Examples + /// + /// ```no_run + /// use aws_config::imds::client::Client; + /// # async fn docs() { + /// let client = Client::builder().build().await.expect("valid client"); + /// let ami_id = client + /// .get("/latest/meta-data/ami-id") + /// .await + /// .expect("failure communicating with IMDS"); + /// # } + /// ``` + pub async fn get(&self, path: &str) -> Result { + let operation = self.make_operation(path)?; + self.inner.call(operation).await.map_err(|err| match err { + SdkError::ConstructionFailure(err) => match err.downcast::() { + Ok(token_failure) => *token_failure, + Err(other) => ImdsError::Unexpected(other), + }, + SdkError::TimeoutError(err) => ImdsError::IoError(err), + SdkError::DispatchFailure(err) => ImdsError::IoError(err.into()), + SdkError::ResponseError { err, .. } => ImdsError::IoError(err), + SdkError::ServiceError { + err: InnerImdsError::BadStatus, + raw, + } => ImdsError::ErrorResponse { + response: raw.into_parts().0, + }, + SdkError::ServiceError { + err: InnerImdsError::InvalidUtf8, + .. + } => ImdsError::Unexpected("IMDS returned invalid UTF-8".into()), + }) + } + + /// Creates a aws_smithy_http Operation to for `path` + /// - Convert the path to a URI + /// - Set the base endpoint on the URI + /// - Add a user agent + fn make_operation( + &self, + path: &str, + ) -> Result, ImdsError> { + let mut base_uri: Uri = path.parse().map_err(|_| ImdsError::InvalidPath)?; + self.endpoint.set_endpoint(&mut base_uri, None); + let request = http::Request::builder() + .uri(base_uri) + .body(SdkBody::empty()) + .expect("valid request"); + let mut request = operation::Request::new(request); + request.properties_mut().insert(user_agent()); + Ok(Operation::new(request, ImdsGetResponseHandler) + .with_metadata(Metadata::new("get", "imds")) + .with_retry_policy(ImdsErrorPolicy)) + } +} + +/// An error retrieving metadata from IMDS +#[derive(Debug)] +#[non_exhaustive] +pub enum ImdsError { + /// An IMDSv2 Token could not be loaded + /// + /// Requests to IMDS must be accompanied by a token obtained via a `PUT` request. This is handled + /// transparently by the [`Client`]. + FailedToLoadToken(SdkError), + + /// The `path` was invalid for an IMDS request + /// + /// The `path` parameter must be a valid URI path segment, and it must begin with `/`. + InvalidPath, + + /// An error response was returned from IMDS + #[non_exhaustive] + ErrorResponse { + /// The returned raw response + response: http::Response, + }, + + /// IO Error + /// + /// An error occurred communication with IMDS + IoError(Box), + + /// An unexpected error occurred communicating with IMDS + Unexpected(Box), +} + +impl Display for ImdsError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ImdsError::FailedToLoadToken(inner) => { + write!(f, "Failed to load session token: {}", inner) + } + ImdsError::InvalidPath => write!( + f, + "IMDS path was not a valid URI. Hint: Does it begin with `/`?" + ), + ImdsError::ErrorResponse { response } => write!( + f, + "Error response from IMDS (code: {}). {:?}", + response.status().as_u16(), + response + ), + ImdsError::IoError(err) => { + write!(f, "An IO error occurred communicating with IMDS: {}", err) + } + ImdsError::Unexpected(err) => write!( + f, + "An unexpected error occurred communicating with IMDS: {}", + err + ), + } + } +} + +impl Error for ImdsError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match &self { + ImdsError::FailedToLoadToken(inner) => Some(inner), + _ => None, + } + } +} + +/// IMDS Middleware +/// +/// The IMDS middleware includes a token-loader & a UserAgent stage +#[derive(Clone, Debug)] +struct ImdsMiddleware { + token_loader: TokenMiddleware, +} + +impl tower::Layer for ImdsMiddleware { + type Service = AsyncMapRequestService, TokenMiddleware>; + + fn layer(&self, inner: S) -> Self::Service { + AsyncMapRequestLayer::for_mapper(self.token_loader.clone()) + .layer(MapRequestLayer::for_mapper(UserAgentStage::new()).layer(inner)) + } +} + +#[derive(Copy, Clone)] +struct ImdsGetResponseHandler; + +#[derive(Debug)] +enum InnerImdsError { + BadStatus, + InvalidUtf8, +} + +impl Display for InnerImdsError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + InnerImdsError::BadStatus => write!(f, "failing status code returned from IMDS"), + InnerImdsError::InvalidUtf8 => write!(f, "IMDS did not return valid UTF-8"), + } + } +} + +impl Error for InnerImdsError {} + +impl ParseStrictResponse for ImdsGetResponseHandler { + type Output = Result; + + fn parse(&self, response: &Response) -> Self::Output { + if response.status().is_success() { + std::str::from_utf8(response.body().as_ref()) + .map(|data| data.to_string()) + .map_err(|_| InnerImdsError::InvalidUtf8) + } else { + Err(InnerImdsError::BadStatus) + } + } +} + +/// IMDSv2 Endpoint Mode +/// +/// IMDS can be accessed in two ways: +/// 1. Via the IpV4 endpoint: `http://169.254.169.254` +/// 2. Via the Ipv6 endpoint: `http://[fd00:ec2::254]` +#[derive(Debug, Clone)] +#[non_exhaustive] +pub enum EndpointMode { + /// IpV4 mode: `http://169.254.169.254` + /// + /// This mode is the default unless otherwise specified. + IpV4, + /// IpV6 mode: `http://[fd00:ec2::254]` + IpV6, +} + +/// Invalid Endpoint Mode +#[derive(Debug, Clone)] +pub struct InvalidEndpointMode(String); + +impl Display for InvalidEndpointMode { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "`{}` is not a valid endpoint mode. Valid values are [`IPv4`, `IPv6`]", + &self.0 + ) + } +} + +impl Error for InvalidEndpointMode {} + +impl FromStr for EndpointMode { + type Err = InvalidEndpointMode; + + fn from_str(value: &str) -> Result { + match value { + _ if value.eq_ignore_ascii_case("ipv4") => Ok(EndpointMode::IpV4), + _ if value.eq_ignore_ascii_case("ipv6") => Ok(EndpointMode::IpV6), + other => Err(InvalidEndpointMode(other.to_owned())), + } + } +} + +impl EndpointMode { + /// IMDS URI for this endpoint mode + fn endpoint(&self) -> Uri { + match self { + EndpointMode::IpV4 => Uri::from_static("http://169.254.169.254"), + EndpointMode::IpV6 => Uri::from_static("http://[fd00:ec2::254]"), + } + } +} + +/// IMDSv2 Client Builder +#[derive(Default, Debug, Clone)] +pub struct Builder { + max_attempts: Option, + endpoint: Option, + mode_override: Option, + token_ttl: Option, + connect_timeout: Option, + read_timeout: Option, + config: Option, +} + +/// Error constructing IMDSv2 Client +#[derive(Debug)] +pub enum BuildError { + /// The endpoint mode was invalid + InvalidEndpointMode(InvalidEndpointMode), + + /// The AWS Profile (e.g. `~/.aws/config`) was invalid + InvalidProfile(ProfileParseError), + + /// The specified endpoint was not a valid URI + InvalidEndpointUri(InvalidUri), +} + +impl Display for BuildError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "failed to build IMDS client: ")?; + match self { + BuildError::InvalidEndpointMode(e) => write!(f, "{}", e), + BuildError::InvalidProfile(e) => write!(f, "{}", e), + BuildError::InvalidEndpointUri(e) => write!(f, "{}", e), + } + } +} + +impl Error for BuildError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + BuildError::InvalidEndpointMode(e) => Some(e), + BuildError::InvalidProfile(e) => Some(e), + BuildError::InvalidEndpointUri(e) => Some(e), + } + } +} + +impl Builder { + /// Override the number of retries for fetching tokens & metadata + /// + /// By default, 4 attempts will be made. + pub fn max_attempts(mut self, max_attempts: u32) -> Self { + self.max_attempts = Some(max_attempts); + self + } + + /// Configure generic options of the [`Client`] + /// + /// # Examples + /// ```no_run + /// # async fn test() { + /// use aws_config::imds::Client; + /// use aws_config::provider_config::ProviderConfig; + /// + /// let provider = Client::builder() + /// .configure(&ProviderConfig::with_default_region().await) + /// .build(); + /// # } + /// ``` + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.config = Some(provider_config.clone()); + self + } + + /// Override the endpoint for the [`Client`] + /// + /// By default, the client will resolve an endpoint from the environment, AWS config, and endpoint mode. + /// + /// See [`Client`] for more information. + pub fn endpoint(mut self, endpoint: impl Into) -> Self { + self.endpoint = Some(EndpointSource::Explicit(endpoint.into())); + self + } + + /// Override the endpoint mode for [`Client`] + /// + /// * When set to [`IpV4`](EndpointMode::IpV4), the endpoint will be `http://169.254.169.254`. + /// * When set to [`IpV6`](EndpointMode::IpV6), the endpoint will be `http://[fd00:ec2::254]`. + pub fn endpoint_mode(mut self, mode: EndpointMode) -> Self { + self.mode_override = Some(mode); + self + } + + /// Override the time-to-live for the session token + /// + /// Requests to IMDS utilize a session token for authentication. By default, session tokens last + /// for 6 hours. When the TTL for the token expires, a new token must be retrieved from the + /// metadata service. + pub fn token_ttl(mut self, ttl: Duration) -> Self { + self.token_ttl = Some(ttl); + self + } + + /// Override the connect timeout for IMDS + /// + /// This value defaults to 1 second + pub fn connect_timeout(mut self, timeout: Duration) -> Self { + self.connect_timeout = Some(timeout); + self + } + + /// Override the read timeout for IMDS + /// + /// This value defaults to 1 second + pub fn read_timeout(mut self, timeout: Duration) -> Self { + self.read_timeout = Some(timeout); + self + } + + /* TODO(https://github.com/awslabs/aws-sdk-rust/issues/339): Support customizing the port explicitly */ + /* + pub fn port(mut self, port: u32) -> Self { + self.port_override = Some(port); + self + }*/ + + pub(super) fn build_lazy(self) -> LazyClient { + LazyClient { + client: OnceCell::new(), + builder: self, + } + } + + /// Build an IMDSv2 Client + pub async fn build(self) -> Result { + let config = self.config.unwrap_or_default(); + let http_timeout_config = timeout::Http::new() + .with_connect_timeout(self.connect_timeout.or(DEFAULT_CONNECT_TIMEOUT).into()) + .with_read_timeout(self.read_timeout.or(DEFAULT_READ_TIMEOUT).into()); + let http_settings = HttpSettings::default().with_http_timeout_config(http_timeout_config); + let connector = expect_connector(config.connector(&http_settings)); + let endpoint_source = self + .endpoint + .unwrap_or_else(|| EndpointSource::Env(config.env(), config.fs())); + let endpoint = endpoint_source.endpoint(self.mode_override).await?; + let endpoint = Endpoint::immutable(endpoint); + let retry_config = retry::Config::default() + .with_max_attempts(self.max_attempts.unwrap_or(DEFAULT_ATTEMPTS)); + let timeout_config = timeout::Config::default(); + let token_loader = token::TokenMiddleware::new( + connector.clone(), + config.time_source(), + endpoint.clone(), + self.token_ttl.unwrap_or(DEFAULT_TOKEN_TTL), + retry_config.clone(), + timeout_config.clone(), + config.sleep(), + ); + let middleware = ImdsMiddleware { token_loader }; + let inner_client = aws_smithy_client::Builder::new() + .connector(connector.clone()) + .middleware(middleware) + .sleep_impl(config.sleep()) + .build() + .with_retry_config(retry_config) + .with_timeout_config(timeout_config); + + let client = Client { + endpoint, + inner: inner_client, + }; + Ok(client) + } +} + +mod env { + pub const ENDPOINT: &str = "AWS_EC2_METADATA_SERVICE_ENDPOINT"; + pub const ENDPOINT_MODE: &str = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"; +} + +mod profile_keys { + pub const ENDPOINT: &str = "ec2_metadata_service_endpoint"; + pub const ENDPOINT_MODE: &str = "ec2_metadata_service_endpoint_mode"; +} + +/// Endpoint Configuration Abstraction +#[derive(Debug, Clone)] +enum EndpointSource { + Explicit(Uri), + Env(Env, Fs), +} + +impl EndpointSource { + async fn endpoint(&self, mode_override: Option) -> Result { + match self { + EndpointSource::Explicit(uri) => { + if mode_override.is_some() { + tracing::warn!(endpoint = ?uri, mode = ?mode_override, + "Endpoint mode override was set in combination with an explicit endpoint. \ + The mode override will be ignored.") + } + Ok(uri.clone()) + } + EndpointSource::Env(env, fs) => { + // load an endpoint override from the environment + let profile = profile::load(fs, env) + .await + .map_err(BuildError::InvalidProfile)?; + let uri_override = if let Ok(uri) = env.get(env::ENDPOINT) { + Some(Cow::Owned(uri)) + } else { + profile.get(profile_keys::ENDPOINT).map(Cow::Borrowed) + }; + if let Some(uri) = uri_override { + return Uri::try_from(uri.as_ref()).map_err(BuildError::InvalidEndpointUri); + } + + // if not, load a endpoint mode from the environment + let mode = if let Some(mode) = mode_override { + mode + } else if let Ok(mode) = env.get(env::ENDPOINT_MODE) { + mode.parse::() + .map_err(BuildError::InvalidEndpointMode)? + } else if let Some(mode) = profile.get(profile_keys::ENDPOINT_MODE) { + mode.parse::() + .map_err(BuildError::InvalidEndpointMode)? + } else { + EndpointMode::IpV4 + }; + + Ok(mode.endpoint()) + } + } + } +} + +/// Error retrieving token from IMDS +#[derive(Debug)] +pub enum TokenError { + /// The token was invalid + /// + /// Because tokens must be eventually sent as a header, the token must be a valid header value. + InvalidToken, + + /// No TTL was sent + /// + /// The token response must include a time-to-live indicating the lifespan of the token. + NoTtl, + + /// The TTL was invalid + /// + /// The TTL must be a valid positive integer. + InvalidTtl, + + /// Invalid Parameters + /// + /// The request to load a token was malformed. This indicates an SDK bug. + InvalidParameters, + + /// Forbidden + /// + /// IMDS is disabled or has been disallowed via permissions. + Forbidden, +} + +impl Display for TokenError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + TokenError::InvalidToken => write!(f, "Invalid Token"), + TokenError::NoTtl => write!(f, "Token response did not contain a TTL header"), + TokenError::InvalidTtl => write!(f, "The returned TTL was invalid"), + TokenError::InvalidParameters => { + write!(f, "Invalid request parameters. This indicates an SDK bug.") + } + TokenError::Forbidden => write!( + f, + "Request forbidden: IMDS is disabled or the caller has insufficient permissions." + ), + } + } +} + +impl Error for TokenError {} + +#[derive(Clone)] +struct ImdsErrorPolicy; + +impl ImdsErrorPolicy { + fn classify(response: &operation::Response) -> RetryKind { + let status = response.http().status(); + match status { + _ if status.is_server_error() => RetryKind::Error(ErrorKind::ServerError), + // 401 indicates that the token has expired, this is retryable + _ if status.as_u16() == 401 => RetryKind::Error(ErrorKind::ServerError), + // This catch-all includes successful responses that fail to parse. These should not be retried. + _ => RetryKind::UnretryableFailure, + } + } +} + +/// IMDS Retry Policy +/// +/// Possible status codes: +/// - 200 (OK) +/// - 400 (Missing or invalid parameters) **Not Retryable** +/// - 401 (Unauthorized, expired token) **Retryable** +/// - 403 (IMDS disabled): **Not Retryable** +/// - 404 (Not found): **Not Retryable** +/// - >=500 (server error): **Retryable** +impl ClassifyResponse, SdkError> for ImdsErrorPolicy { + fn classify(&self, response: Result<&SdkSuccess, &SdkError>) -> RetryKind { + match response { + Ok(_) => RetryKind::Unnecessary, + Err(SdkError::ResponseError { raw, .. }) | Err(SdkError::ServiceError { raw, .. }) => { + ImdsErrorPolicy::classify(raw) + } + _ => RetryKind::UnretryableFailure, + } + } +} + +#[cfg(test)] +pub(crate) mod test { + use std::collections::HashMap; + use std::error::Error; + use std::io; + use std::time::{Duration, UNIX_EPOCH}; + + use aws_smithy_async::rt::sleep::TokioSleep; + use aws_smithy_client::erase::DynConnector; + use aws_smithy_client::test_connection::{capture_request, TestConnection}; + use aws_smithy_client::{SdkError, SdkSuccess}; + use aws_smithy_http::body::SdkBody; + use aws_smithy_http::operation; + use aws_smithy_types::retry::RetryKind; + use aws_types::os_shim_internal::{Env, Fs, ManualTimeSource, TimeSource}; + use http::header::USER_AGENT; + use http::Uri; + use serde::Deserialize; + use tracing_test::traced_test; + + use crate::imds::client::{Client, EndpointMode, ImdsErrorPolicy}; + use crate::provider_config::ProviderConfig; + + const TOKEN_A: &str = "AQAEAFTNrA4eEGx0AQgJ1arIq_Cc-t4tWt3fB0Hd8RKhXlKc5ccvhg=="; + const TOKEN_B: &str = "alternatetoken=="; + + pub(crate) fn token_request(base: &str, ttl: u32) -> http::Request { + http::Request::builder() + .uri(format!("{}/latest/api/token", base)) + .header("x-aws-ec2-metadata-token-ttl-seconds", ttl) + .method("PUT") + .body(SdkBody::empty()) + .unwrap() + } + + pub(crate) fn token_response(ttl: u32, token: &'static str) -> http::Response<&'static str> { + http::Response::builder() + .status(200) + .header("X-aws-ec2-metadata-token-ttl-seconds", ttl) + .body(token) + .unwrap() + } + + pub(crate) fn imds_request(path: &'static str, token: &str) -> http::Request { + http::Request::builder() + .uri(Uri::from_static(path)) + .method("GET") + .header("x-aws-ec2-metadata-token", token) + .body(SdkBody::empty()) + .unwrap() + } + + pub(crate) fn imds_response(body: &'static str) -> http::Response<&'static str> { + http::Response::builder().status(200).body(body).unwrap() + } + + pub(crate) async fn make_client(conn: &TestConnection) -> super::Client + where + SdkBody: From, + T: Send + 'static, + { + tokio::time::pause(); + super::Client::builder() + .configure( + &ProviderConfig::no_configuration() + .with_sleep(TokioSleep::new()) + .with_http_connector(DynConnector::new(conn.clone())), + ) + .build() + .await + .expect("valid client") + } + + #[tokio::test] + async fn client_caches_token() { + let connection = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, TOKEN_A), + ), + ( + imds_request("http://169.254.169.254/latest/metadata", TOKEN_A), + imds_response(r#"test-imds-output"#), + ), + ( + imds_request("http://169.254.169.254/latest/metadata2", TOKEN_A), + imds_response("output2"), + ), + ]); + let client = make_client(&connection).await; + // load once + let metadata = client.get("/latest/metadata").await.expect("failed"); + assert_eq!(metadata, "test-imds-output"); + // load again: the cached token should be used + let metadata = client.get("/latest/metadata2").await.expect("failed"); + assert_eq!(metadata, "output2"); + connection.assert_requests_match(&[]); + } + + #[tokio::test] + async fn token_can_expire() { + let connection = TestConnection::new(vec![ + ( + token_request("http://[fd00:ec2::254]", 600), + token_response(600, TOKEN_A), + ), + ( + imds_request("http://[fd00:ec2::254]/latest/metadata", TOKEN_A), + imds_response(r#"test-imds-output1"#), + ), + ( + token_request("http://[fd00:ec2::254]", 600), + token_response(600, TOKEN_B), + ), + ( + imds_request("http://[fd00:ec2::254]/latest/metadata", TOKEN_B), + imds_response(r#"test-imds-output2"#), + ), + ]); + let mut time_source = ManualTimeSource::new(UNIX_EPOCH); + tokio::time::pause(); + let client = super::Client::builder() + .configure( + &ProviderConfig::no_configuration() + .with_http_connector(DynConnector::new(connection.clone())) + .with_time_source(TimeSource::manual(&time_source)) + .with_sleep(TokioSleep::new()), + ) + .endpoint_mode(EndpointMode::IpV6) + .token_ttl(Duration::from_secs(600)) + .build() + .await + .expect("valid client"); + + let resp1 = client.get("/latest/metadata").await.expect("success"); + // now the cached credential has expired + time_source.advance(Duration::from_secs(600)); + let resp2 = client.get("/latest/metadata").await.expect("success"); + connection.assert_requests_match(&[]); + assert_eq!(resp1, "test-imds-output1"); + assert_eq!(resp2, "test-imds-output2"); + } + + /// Tokens are refreshed up to 120 seconds early to avoid using an expired token. + #[tokio::test] + async fn token_refresh_buffer() { + let connection = TestConnection::new(vec![ + ( + token_request("http://[fd00:ec2::254]", 600), + token_response(600, TOKEN_A), + ), + // t = 0 + ( + imds_request("http://[fd00:ec2::254]/latest/metadata", TOKEN_A), + imds_response(r#"test-imds-output1"#), + ), + // t = 400 (no refresh) + ( + imds_request("http://[fd00:ec2::254]/latest/metadata", TOKEN_A), + imds_response(r#"test-imds-output2"#), + ), + // t = 550 (within buffer) + ( + token_request("http://[fd00:ec2::254]", 600), + token_response(600, TOKEN_B), + ), + ( + imds_request("http://[fd00:ec2::254]/latest/metadata", TOKEN_B), + imds_response(r#"test-imds-output3"#), + ), + ]); + tokio::time::pause(); + let mut time_source = ManualTimeSource::new(UNIX_EPOCH); + let client = super::Client::builder() + .configure( + &ProviderConfig::no_configuration() + .with_http_connector(DynConnector::new(connection.clone())) + .with_time_source(TimeSource::manual(&time_source)), + ) + .endpoint_mode(EndpointMode::IpV6) + .token_ttl(Duration::from_secs(600)) + .build() + .await + .expect("valid client"); + + let resp1 = client.get("/latest/metadata").await.expect("success"); + // now the cached credential has expired + time_source.advance(Duration::from_secs(400)); + let resp2 = client.get("/latest/metadata").await.expect("success"); + time_source.advance(Duration::from_secs(150)); + let resp3 = client.get("/latest/metadata").await.expect("success"); + connection.assert_requests_match(&[]); + assert_eq!(resp1, "test-imds-output1"); + assert_eq!(resp2, "test-imds-output2"); + assert_eq!(resp3, "test-imds-output3"); + } + + /// 500 error during the GET should be retried + #[tokio::test] + #[traced_test] + async fn retry_500() { + let connection = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, TOKEN_A), + ), + ( + imds_request("http://169.254.169.254/latest/metadata", TOKEN_A), + http::Response::builder().status(500).body("").unwrap(), + ), + ( + imds_request("http://169.254.169.254/latest/metadata", TOKEN_A), + imds_response("ok"), + ), + ]); + let client = make_client(&connection).await; + assert_eq!(client.get("/latest/metadata").await.expect("success"), "ok"); + connection.assert_requests_match(&[]); + + // all requests should have a user agent header + for request in connection.requests().iter() { + assert!(request.actual.headers().get(USER_AGENT).is_some()); + } + } + + /// 500 error during token acquisition should be retried + #[tokio::test] + #[traced_test] + async fn retry_token_failure() { + let connection = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + http::Response::builder().status(500).body("").unwrap(), + ), + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, TOKEN_A), + ), + ( + imds_request("http://169.254.169.254/latest/metadata", TOKEN_A), + imds_response("ok"), + ), + ]); + let client = make_client(&connection).await; + assert_eq!(client.get("/latest/metadata").await.expect("success"), "ok"); + connection.assert_requests_match(&[]); + } + + /// 403 responses from IMDS during token acquisition MUST NOT be retried + #[tokio::test] + #[traced_test] + async fn no_403_retry() { + let connection = TestConnection::new(vec![( + token_request("http://169.254.169.254", 21600), + http::Response::builder().status(403).body("").unwrap(), + )]); + let client = make_client(&connection).await; + let err = client.get("/latest/metadata").await.expect_err("no token"); + assert!(format!("{}", err).contains("forbidden"), "{}", err); + connection.assert_requests_match(&[]); + } + + /// Successful responses should classify as `RetryKind::Unnecessary` + #[test] + fn successful_response_properly_classified() { + use aws_smithy_http::retry::ClassifyResponse; + + let policy = ImdsErrorPolicy; + fn response_200() -> operation::Response { + operation::Response::new(imds_response("").map(|_| SdkBody::empty())) + } + let success = SdkSuccess { + raw: response_200(), + parsed: (), + }; + assert_eq!( + RetryKind::Unnecessary, + policy.classify(Ok::<_, &SdkError<()>>(&success)) + ); + + // Emulate a failure to parse the response body (using an io error since it's easy to construct in a test) + let failure = SdkError::<()>::ResponseError { + err: Box::new(io::Error::new(io::ErrorKind::BrokenPipe, "fail to parse")), + raw: response_200(), + }; + assert_eq!( + RetryKind::UnretryableFailure, + policy.classify(Err::<&SdkSuccess<()>, _>(&failure)) + ); + } + + // since tokens are sent as headers, the tokens need to be valid header values + #[tokio::test] + async fn invalid_token() { + let connection = TestConnection::new(vec![( + token_request("http://169.254.169.254", 21600), + token_response(21600, "replaced").map(|_| vec![1, 0]), + )]); + let client = make_client(&connection).await; + let err = client.get("/latest/metadata").await.expect_err("no token"); + assert!(format!("{}", err).contains("Invalid Token"), "{}", err); + connection.assert_requests_match(&[]); + } + + #[tokio::test] + async fn non_utf8_response() { + let connection = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, TOKEN_A).map(SdkBody::from), + ), + ( + imds_request("http://169.254.169.254/latest/metadata", TOKEN_A), + http::Response::builder() + .status(200) + .body(SdkBody::from(vec![0xA0 as u8, 0xA1 as u8])) + .unwrap(), + ), + ]); + let client = make_client(&connection).await; + let err = client.get("/latest/metadata").await.expect_err("no token"); + assert!(format!("{}", err).contains("invalid UTF-8"), "{}", err); + connection.assert_requests_match(&[]); + } + + /// Verify that the end-to-end real client has a 1-second connect timeout + #[tokio::test] + #[cfg(any(feature = "rustls", feature = "native-tls"))] + async fn one_second_connect_timeout() { + use crate::imds::client::ImdsError; + use std::time::SystemTime; + + let client = Client::builder() + // 240.* can never be resolved + .endpoint(Uri::from_static("http://240.0.0.0")) + .build() + .await + .expect("valid client"); + let now = SystemTime::now(); + let resp = client + .get("/latest/metadata") + .await + .expect_err("240.0.0.0 will never resolve"); + let time_elapsed = now.elapsed().unwrap(); + assert!( + time_elapsed > Duration::from_secs(1), + "time_elapsed should be greater than 1s but was {:?}", + time_elapsed + ); + assert!( + time_elapsed < Duration::from_secs(2), + "time_elapsed should be less than 2s but was {:?}", + time_elapsed + ); + match resp { + ImdsError::FailedToLoadToken(err) if format!("{}", err).contains("timeout") => {} // ok, + other => panic!( + "wrong error, expected construction failure with TimedOutError inside: {}", + other + ), + } + } + + #[derive(Debug, Deserialize)] + struct ImdsConfigTest { + env: HashMap, + fs: HashMap, + endpoint_override: Option, + mode_override: Option, + result: Result, + docs: String, + } + + #[tokio::test] + async fn config_tests() -> Result<(), Box> { + let test_cases = std::fs::read_to_string("test-data/imds-config/imds-tests.json")?; + #[derive(Deserialize)] + struct TestCases { + tests: Vec, + } + + let test_cases: TestCases = serde_json::from_str(&test_cases)?; + let test_cases = test_cases.tests; + for test in test_cases { + check(test).await; + } + Ok(()) + } + + async fn check(test_case: ImdsConfigTest) { + let (server, watcher) = capture_request(None); + let provider_config = ProviderConfig::no_configuration() + .with_env(Env::from(test_case.env)) + .with_fs(Fs::from_map(test_case.fs)) + .with_http_connector(DynConnector::new(server)); + let mut imds_client = Client::builder().configure(&provider_config); + if let Some(endpoint_override) = test_case.endpoint_override { + imds_client = imds_client.endpoint(endpoint_override.parse::().unwrap()); + } + + if let Some(mode_override) = test_case.mode_override { + imds_client = imds_client.endpoint_mode(mode_override.parse().unwrap()); + } + + let imds_client = imds_client.build().await; + let (uri, imds_client) = match (&test_case.result, imds_client) { + (Ok(uri), Ok(client)) => (uri, client), + (Err(test), Ok(_client)) => panic!( + "test should fail: {} but a valid client was made. {}", + test, test_case.docs + ), + (Err(substr), Err(err)) => { + assert!( + format!("{}", err).contains(substr), + "`{}` did not contain `{}`", + err, + substr + ); + return; + } + (Ok(_uri), Err(e)) => panic!( + "a valid client should be made but: {}. {}", + e, test_case.docs + ), + }; + // this request will fail, we just want to capture the endpoint configuration + let _ = imds_client.get("/hello").await; + assert_eq!(&watcher.expect_request().uri().to_string(), uri); + } +} diff --git a/patch/aws-config/src/imds/client/token.rs b/patch/aws-config/src/imds/client/token.rs new file mode 100644 index 0000000000000..dcb185b75ce05 --- /dev/null +++ b/patch/aws-config/src/imds/client/token.rs @@ -0,0 +1,197 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! IMDS Token Middleware +//! Requests to IMDS are two part: +//! 1. A PUT request to the token API is made +//! 2. A GET request is made to the requested API. The Token is added as a header. +//! +//! This module implements a middleware that will: +//! - Load a token via the token API +//! - Cache the token according to the TTL +//! - Retry token loading when it fails +//! - Attach the token to the request in the `x-aws-ec2-metadata-token` header + +use std::fmt::{Debug, Formatter}; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; + +use aws_http::user_agent::UserAgentStage; +use aws_smithy_async::rt::sleep::AsyncSleep; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::retry; +use aws_smithy_http::body::SdkBody; +use aws_smithy_http::endpoint::Endpoint; +use aws_smithy_http::middleware::AsyncMapRequest; +use aws_smithy_http::operation; +use aws_smithy_http::operation::Operation; +use aws_smithy_http::operation::{Metadata, Request}; +use aws_smithy_http::response::ParseStrictResponse; +use aws_smithy_http_tower::map_request::MapRequestLayer; +use aws_smithy_types::timeout; +use aws_types::os_shim_internal::TimeSource; + +use http::{HeaderValue, Uri}; + +use crate::cache::ExpiringCache; +use crate::imds::client::{ImdsError, ImdsErrorPolicy, TokenError}; + +/// Token Refresh Buffer +/// +/// Tokens are cached to remove the need to reload the token between subsequent requests. To ensure +/// that a request never fails with a 401 (expired token), a buffer window exists during which the token +/// may not be expired, but will still be refreshed. +const TOKEN_REFRESH_BUFFER: Duration = Duration::from_secs(120); + +const X_AWS_EC2_METADATA_TOKEN_TTL_SECONDS: &str = "x-aws-ec2-metadata-token-ttl-seconds"; +const X_AWS_EC2_METADATA_TOKEN: &str = "x-aws-ec2-metadata-token"; + +/// IMDS Token +#[derive(Clone)] +struct Token { + value: HeaderValue, + expiry: SystemTime, +} + +/// Token Middleware +/// +/// Token middleware will load/cache a token when required and handle caching/expiry. +/// +/// It will attach the token to the incoming request on the `x-aws-ec2-metadata-token` header. +#[derive(Clone)] +pub(super) struct TokenMiddleware { + client: Arc>>, + token_parser: GetTokenResponseHandler, + token: ExpiringCache, + time_source: TimeSource, + endpoint: Endpoint, + token_ttl: Duration, +} + +impl Debug for TokenMiddleware { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "ImdsTokenMiddleware") + } +} + +impl TokenMiddleware { + pub(super) fn new( + connector: DynConnector, + time_source: TimeSource, + endpoint: Endpoint, + token_ttl: Duration, + retry_config: retry::Config, + timeout_config: timeout::Config, + sleep_impl: Option>, + ) -> Self { + let inner_client = aws_smithy_client::Builder::new() + .connector(connector) + .sleep_impl(sleep_impl) + .build() + .with_retry_config(retry_config) + .with_timeout_config(timeout_config); + let client = Arc::new(inner_client); + Self { + client, + token_parser: GetTokenResponseHandler { + time: time_source.clone(), + }, + token: ExpiringCache::new(TOKEN_REFRESH_BUFFER), + time_source, + endpoint, + token_ttl, + } + } + async fn add_token(&self, request: Request) -> Result { + let preloaded_token = self + .token + .yield_or_clear_if_expired(self.time_source.now()) + .await; + let token = match preloaded_token { + Some(token) => Ok(token), + None => { + self.token + .get_or_load(|| async move { self.get_token().await }) + .await + } + }?; + request.augment(|mut request, _| { + request + .headers_mut() + .insert(X_AWS_EC2_METADATA_TOKEN, token.value); + Ok(request) + }) + } + + async fn get_token(&self) -> Result<(Token, SystemTime), ImdsError> { + let mut uri = Uri::from_static("/latest/api/token"); + self.endpoint.set_endpoint(&mut uri, None); + let request = http::Request::builder() + .header( + X_AWS_EC2_METADATA_TOKEN_TTL_SECONDS, + self.token_ttl.as_secs(), + ) + .uri(uri) + .method("PUT") + .body(SdkBody::empty()) + .expect("valid HTTP request"); + let mut request = operation::Request::new(request); + request.properties_mut().insert(super::user_agent()); + + let operation = Operation::new(request, self.token_parser.clone()) + .with_retry_policy(ImdsErrorPolicy) + .with_metadata(Metadata::new("get-token", "imds")); + let response = self + .client + .call(operation) + .await + .map_err(ImdsError::FailedToLoadToken)?; + let expiry = response.expiry; + Ok((response, expiry)) + } +} + +impl AsyncMapRequest for TokenMiddleware { + type Error = ImdsError; + type Future = Pin> + Send + 'static>>; + + fn apply(&self, request: Request) -> Self::Future { + let this = self.clone(); + Box::pin(async move { this.add_token(request).await }) + } +} + +#[derive(Clone)] +struct GetTokenResponseHandler { + time: TimeSource, +} + +impl ParseStrictResponse for GetTokenResponseHandler { + type Output = Result; + + fn parse(&self, response: &http::Response) -> Self::Output { + match response.status().as_u16() { + 400 => return Err(TokenError::InvalidParameters), + 403 => return Err(TokenError::Forbidden), + _ => {} + } + let value = HeaderValue::from_maybe_shared(response.body().clone()) + .map_err(|_| TokenError::InvalidToken)?; + let ttl: u64 = response + .headers() + .get(X_AWS_EC2_METADATA_TOKEN_TTL_SECONDS) + .ok_or(TokenError::NoTtl)? + .to_str() + .map_err(|_| TokenError::InvalidTtl)? + .parse() + .map_err(|_parse_error| TokenError::InvalidTtl)?; + Ok(Token { + value, + expiry: self.time.now() + Duration::from_secs(ttl), + }) + } +} diff --git a/patch/aws-config/src/imds/credentials.rs b/patch/aws-config/src/imds/credentials.rs new file mode 100644 index 0000000000000..e06a48acf6a85 --- /dev/null +++ b/patch/aws-config/src/imds/credentials.rs @@ -0,0 +1,251 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! IMDSv2 Credentials Provider +//! +//! # Important +//! This credential provider will NOT fallback to IMDSv1. Ensure that IMDSv2 is enabled on your instances. + +use crate::imds; +use crate::imds::client::{ImdsError, LazyClient}; +use crate::json_credentials::{parse_json_credentials, JsonCredentials}; +use crate::provider_config::ProviderConfig; +use aws_smithy_client::SdkError; +use aws_types::credentials::{future, CredentialsError, ProvideCredentials}; +use aws_types::os_shim_internal::Env; +use aws_types::{credentials, Credentials}; +use std::borrow::Cow; + +/// IMDSv2 Credentials Provider +/// +/// _Note: This credentials provider will NOT fallback to the IMDSv1 flow._ +#[derive(Debug)] +pub struct ImdsCredentialsProvider { + client: LazyClient, + env: Env, + profile: Option, +} + +/// Builder for [`ImdsCredentialsProvider`] +#[derive(Default, Debug)] +pub struct Builder { + provider_config: Option, + profile_override: Option, + imds_override: Option, +} + +impl Builder { + /// Override the configuration used for this provider + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.provider_config = Some(provider_config.clone()); + self + } + + /// Override the [instance profile](instance-profile) used for this provider. + /// + /// When retrieving IMDS credentials, a call must first be made to + /// `/latest/meta-data/iam/security-credentials`. This returns the instance + /// profile used. By setting this parameter, retrieving the profile is skipped + /// and the provided value is used instead. + /// + /// [instance-profile]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile + pub fn profile(mut self, profile: impl Into) -> Self { + self.profile_override = Some(profile.into()); + self + } + + /// Override the IMDS client used for this provider + /// + /// The IMDS client will be loaded and configured via `~/.aws/config` and environment variables, + /// however, if necessary the entire client may be provided directly. + /// + /// For more information about IMDS client configuration loading see [`imds::Client`] + pub fn imds_client(mut self, client: imds::Client) -> Self { + if self.provider_config.is_some() { + tracing::warn!("provider config override by a full client override"); + } + self.imds_override = Some(client); + self + } + + /// Create an [`ImdsCredentialsProvider`] from this builder. + pub fn build(self) -> ImdsCredentialsProvider { + let provider_config = self.provider_config.unwrap_or_default(); + let env = provider_config.env(); + let client = self + .imds_override + .map(LazyClient::from_ready_client) + .unwrap_or_else(|| { + imds::Client::builder() + .configure(&provider_config) + .build_lazy() + }); + ImdsCredentialsProvider { + client, + env, + profile: self.profile_override, + } + } +} + +mod codes { + pub(super) const ASSUME_ROLE_UNAUTHORIZED_ACCESS: &str = "AssumeRoleUnauthorizedAccess"; +} + +impl ProvideCredentials for ImdsCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} + +impl ImdsCredentialsProvider { + /// Builder for [`ImdsCredentialsProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + fn imds_disabled(&self) -> bool { + match self.env.get(super::env::EC2_METADATA_DISABLED) { + Ok(value) => value.eq_ignore_ascii_case("true"), + _ => false, + } + } + + /// Load an inner IMDS client from the OnceCell + async fn client(&self) -> Result<&imds::Client, CredentialsError> { + self.client.client().await.map_err(|build_error| { + // need to format the build error since we don't own it and it can't be cloned + CredentialsError::invalid_configuration(format!("{}", build_error)) + }) + } + + /// Retrieve the instance profile from IMDS + async fn get_profile_uncached(&self) -> Result { + match self + .client() + .await? + .get("/latest/meta-data/iam/security-credentials") + .await + { + Ok(profile) => Ok(profile), + Err(ImdsError::ErrorResponse { response, .. }) if response.status().as_u16() == 404 => { + tracing::info!( + "received 404 from IMDS when loading profile information. \ + Hint: This instance may not have an IAM role associated." + ); + Err(CredentialsError::not_loaded("received 404 from IMDS")) + } + Err(ImdsError::FailedToLoadToken(SdkError::DispatchFailure(err))) => Err( + CredentialsError::not_loaded(format!("could not communicate with imds: {}", err)), + ), + Err(other) => Err(CredentialsError::provider_error(other)), + } + } + + async fn credentials(&self) -> credentials::Result { + if self.imds_disabled() { + tracing::debug!("IMDS disabled because $AWS_EC2_METADATA_DISABLED was set to `true`"); + return Err(CredentialsError::not_loaded( + "IMDS disabled by $AWS_ECS_METADATA_DISABLED", + )); + } + tracing::debug!("loading credentials from IMDS"); + let profile: Cow = match &self.profile { + Some(profile) => profile.into(), + None => self.get_profile_uncached().await?.into(), + }; + tracing::debug!(profile = %profile, "loaded profile"); + let credentials = self + .client() + .await? + .get(&format!( + "/latest/meta-data/iam/security-credentials/{}", + profile + )) + .await + .map_err(CredentialsError::provider_error)?; + match parse_json_credentials(&credentials) { + Ok(JsonCredentials::RefreshableCredentials { + access_key_id, + secret_access_key, + session_token, + expiration, + .. + }) => Ok(Credentials::new( + access_key_id, + secret_access_key, + Some(session_token.to_string()), + expiration.into(), + "IMDSv2", + )), + Ok(JsonCredentials::Error { code, message }) + if code == codes::ASSUME_ROLE_UNAUTHORIZED_ACCESS => + { + Err(CredentialsError::invalid_configuration(format!( + "Incorrect IMDS/IAM configuration: [{}] {}. \ + Hint: Does this role have a trust relationship with EC2?", + code, message + ))) + } + Ok(JsonCredentials::Error { code, message }) => { + Err(CredentialsError::provider_error(format!( + "Error retrieving credentials from IMDS: {} {}", + code, message + ))) + } + // got bad data from IMDS, should not occur during normal operation: + Err(invalid) => Err(CredentialsError::unhandled(invalid)), + } + } +} + +#[cfg(test)] +mod test { + use crate::imds::client::test::{ + imds_request, imds_response, make_client, token_request, token_response, + }; + use crate::imds::credentials::ImdsCredentialsProvider; + use aws_smithy_client::test_connection::TestConnection; + use aws_types::credentials::ProvideCredentials; + + const TOKEN_A: &str = "token_a"; + + #[tokio::test] + async fn profile_is_not_cached() { + let connection = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, TOKEN_A), + ), + ( + imds_request("http://169.254.169.254/latest/meta-data/iam/security-credentials", TOKEN_A), + imds_response(r#"profile-name"#), + ), + ( + imds_request("http://169.254.169.254/latest/meta-data/iam/security-credentials/profile-name", TOKEN_A), + imds_response("{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2021-09-20T21:42:26Z\",\n \"Type\" : \"AWS-HMAC\",\n \"AccessKeyId\" : \"ASIARTEST\",\n \"SecretAccessKey\" : \"testsecret\",\n \"Token\" : \"testtoken\",\n \"Expiration\" : \"2021-09-21T04:16:53Z\"\n}"), + ), + ( + imds_request("http://169.254.169.254/latest/meta-data/iam/security-credentials", TOKEN_A), + imds_response(r#"different-profile"#), + ), + ( + imds_request("http://169.254.169.254/latest/meta-data/iam/security-credentials/different-profile", TOKEN_A), + imds_response("{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2021-09-20T21:42:26Z\",\n \"Type\" : \"AWS-HMAC\",\n \"AccessKeyId\" : \"ASIARTEST2\",\n \"SecretAccessKey\" : \"testsecret\",\n \"Token\" : \"testtoken\",\n \"Expiration\" : \"2021-09-21T04:16:53Z\"\n}"), + ), + ]); + let client = ImdsCredentialsProvider::builder() + .imds_client(make_client(&connection).await) + .build(); + let creds1 = client.provide_credentials().await.expect("valid creds"); + let creds2 = client.provide_credentials().await.expect("valid creds"); + assert_eq!(creds1.access_key_id(), "ASIARTEST"); + assert_eq!(creds2.access_key_id(), "ASIARTEST2"); + connection.assert_requests_match(&[]); + } +} diff --git a/patch/aws-config/src/imds/mod.rs b/patch/aws-config/src/imds/mod.rs new file mode 100644 index 0000000000000..c6190a876bedf --- /dev/null +++ b/patch/aws-config/src/imds/mod.rs @@ -0,0 +1,19 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! IMDSv2 Client, credential, and region provider +//! +//! See [`client`] for more information. +pub mod client; + +pub mod credentials; +pub mod region; + +mod env { + pub(crate) const EC2_METADATA_DISABLED: &str = "AWS_EC2_METADATA_DISABLED"; +} + +#[doc(inline)] +pub use client::Client; diff --git a/patch/aws-config/src/imds/region.rs b/patch/aws-config/src/imds/region.rs new file mode 100644 index 0000000000000..ec7b5124616bf --- /dev/null +++ b/patch/aws-config/src/imds/region.rs @@ -0,0 +1,177 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! IMDS Region Provider +//! +//! Load region from IMDS from `/latest/meta-data/placement/region` +//! This provider has a 5 second timeout. + +use crate::imds; +use crate::imds::client::LazyClient; +use crate::meta::region::{future, ProvideRegion}; +use crate::provider_config::ProviderConfig; + +use aws_types::os_shim_internal::Env; +use aws_types::region::Region; + +use tracing::Instrument; + +/// IMDSv2 Region Provider +/// +/// This provider is included in the default region chain, so it does not need to be used manually. +#[derive(Debug)] +pub struct ImdsRegionProvider { + client: LazyClient, + env: Env, +} + +const REGION_PATH: &str = "/latest/meta-data/placement/region"; + +impl ImdsRegionProvider { + /// Builder for [`ImdsRegionProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + fn imds_disabled(&self) -> bool { + match self.env.get(super::env::EC2_METADATA_DISABLED) { + Ok(value) => value.eq_ignore_ascii_case("true"), + _ => false, + } + } + + /// Load a region from IMDS + /// + /// This provider uses the API `/latest/meta-data/placement/region` + pub async fn region(&self) -> Option { + if self.imds_disabled() { + tracing::debug!("not using IMDS to load region, IMDS is disabled"); + return None; + } + let client = self.client.client().await.ok()?; + match client.get(REGION_PATH).await { + Ok(region) => { + tracing::info!(region = % region, "loaded region from IMDS"); + Some(Region::new(region)) + } + Err(err) => { + tracing::warn!(err = % err, "failed to load region from IMDS"); + None + } + } + } +} + +impl ProvideRegion for ImdsRegionProvider { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::new( + self.region() + .instrument(tracing::debug_span!("imds_load_region")), + ) + } +} + +/// Builder for [`ImdsRegionProvider`] +#[derive(Default)] +pub struct Builder { + provider_config: Option, + imds_client_override: Option, +} + +impl Builder { + /// Set configuration options of the [`Builder`] + pub fn configure(self, provider_config: &ProviderConfig) -> Self { + Self { + provider_config: Some(provider_config.clone()), + ..self + } + } + + /// Override the IMDS client used to load the region + pub fn imds_client(mut self, imds_client: imds::Client) -> Self { + self.imds_client_override = Some(imds_client); + self + } + + /// Create an [`ImdsRegionProvider`] from this builder + pub fn build(self) -> ImdsRegionProvider { + let provider_config = self.provider_config.unwrap_or_default(); + let client = self + .imds_client_override + .map(LazyClient::from_ready_client) + .unwrap_or_else(|| { + imds::Client::builder() + .configure(&provider_config) + .build_lazy() + }); + ImdsRegionProvider { + client, + env: provider_config.env(), + } + } +} + +#[cfg(test)] +mod test { + use crate::imds::client::test::{imds_request, imds_response, token_request, token_response}; + use crate::imds::region::ImdsRegionProvider; + use crate::provider_config::ProviderConfig; + use aws_sdk_sts::Region; + use aws_smithy_async::rt::sleep::TokioSleep; + use aws_smithy_client::erase::DynConnector; + use aws_smithy_client::test_connection::TestConnection; + use aws_smithy_http::body::SdkBody; + use tracing_test::traced_test; + + #[tokio::test] + async fn load_region() { + let conn = TestConnection::new(vec![ + ( + token_request("http://169.254.169.254", 21600), + token_response(21600, "token"), + ), + ( + imds_request( + "http://169.254.169.254/latest/meta-data/placement/region", + "token", + ), + imds_response("eu-west-1"), + ), + ]); + let provider = ImdsRegionProvider::builder() + .configure( + &ProviderConfig::no_configuration() + .with_http_connector(DynConnector::new(conn)) + .with_sleep(TokioSleep::new()), + ) + .build(); + assert_eq!( + provider.region().await.expect("returns region"), + Region::new("eu-west-1") + ); + } + + #[traced_test] + #[tokio::test] + async fn no_region_imds_disabled() { + let conn = TestConnection::new(vec![( + token_request("http://169.254.169.254", 21600), + http::Response::builder() + .status(403) + .body(SdkBody::empty()) + .unwrap(), + )]); + let provider = ImdsRegionProvider::builder() + .configure( + &ProviderConfig::no_configuration() + .with_http_connector(DynConnector::new(conn)) + .with_sleep(TokioSleep::new()), + ) + .build(); + assert_eq!(provider.region().await, None); + assert!(logs_contain("failed to load region from IMDS")); + assert!(logs_contain("IMDS is disabled")); + } +} diff --git a/patch/aws-config/src/json_credentials.rs b/patch/aws-config/src/json_credentials.rs new file mode 100644 index 0000000000000..22a5f00c8bc16 --- /dev/null +++ b/patch/aws-config/src/json_credentials.rs @@ -0,0 +1,377 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_smithy_json::deserialize::token::skip_value; +use aws_smithy_json::deserialize::{json_token_iter, EscapeError, Token}; +use aws_smithy_types::date_time::Format; +use aws_smithy_types::DateTime; +use std::borrow::Cow; +use std::convert::TryFrom; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::time::SystemTime; + +#[derive(Debug)] +pub(crate) enum InvalidJsonCredentials { + /// The response did not contain valid JSON + JsonError(Box), + /// The response was missing a required field + MissingField(&'static str), + + /// A field was invalid + InvalidField { + field: &'static str, + err: Box, + }, + + /// Another unhandled error occurred + Other(Cow<'static, str>), +} + +impl From for InvalidJsonCredentials { + fn from(err: EscapeError) -> Self { + InvalidJsonCredentials::JsonError(err.into()) + } +} + +impl From for InvalidJsonCredentials { + fn from(err: aws_smithy_json::deserialize::Error) -> Self { + InvalidJsonCredentials::JsonError(err.into()) + } +} + +impl Display for InvalidJsonCredentials { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + InvalidJsonCredentials::JsonError(json) => { + write!(f, "invalid JSON in response: {}", json) + } + InvalidJsonCredentials::MissingField(field) => write!( + f, + "Expected field `{}` in response but it was missing", + field + ), + InvalidJsonCredentials::Other(msg) => write!(f, "{}", msg), + InvalidJsonCredentials::InvalidField { field, err } => { + write!(f, "Invalid field in response: `{}`. {}", field, err) + } + } + } +} + +impl Error for InvalidJsonCredentials {} + +#[non_exhaustive] +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum JsonCredentials<'a> { + RefreshableCredentials { + access_key_id: Cow<'a, str>, + secret_access_key: Cow<'a, str>, + session_token: Cow<'a, str>, + expiration: SystemTime, + }, + Error { + code: Cow<'a, str>, + message: Cow<'a, str>, + }, // TODO(https://github.com/awslabs/aws-sdk-rust/issues/340): Add support for static credentials: + // { + // "AccessKeyId" : "MUA...", + // "SecretAccessKey" : "/7PC5om...." + // } + + // TODO(https://github.com/awslabs/aws-sdk-rust/issues/340): Add support for Assume role credentials: + // { + // // fields to construct STS client: + // "Region": "sts-region-name", + // "AccessKeyId" : "MUA...", + // "Expiration" : "2016-02-25T06:03:31Z", // optional + // "SecretAccessKey" : "/7PC5om....", + // "Token" : "AQoDY....=", // optional + // // fields controlling the STS role: + // "RoleArn": "...", // required + // "RoleSessionName": "...", // required + // // and also: DurationSeconds, ExternalId, SerialNumber, TokenCode, Policy + // ... + // } +} + +/// Deserialize an IMDS response from a string +/// +/// There are two levels of error here: the top level distinguishes between a successfully parsed +/// response from the credential provider vs. something invalid / unexpected. The inner error +/// distinguishes between a successful response that contains credentials vs. an error with a code and +/// error message. +/// +/// Keys are case insensitive. +pub(crate) fn parse_json_credentials( + credentials_response: &str, +) -> Result { + let mut code = None; + let mut access_key_id = None; + let mut secret_access_key = None; + let mut session_token = None; + let mut expiration = None; + let mut message = None; + json_parse_loop(credentials_response.as_bytes(), |key, value| { + match key { + /* + "Code": "Success", + "Type": "AWS-HMAC", + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "....", + "LastUpdated" : "2009-11-23T0:00:00Z" + */ + c if c.eq_ignore_ascii_case("Code") => code = Some(value), + c if c.eq_ignore_ascii_case("AccessKeyId") => access_key_id = Some(value), + c if c.eq_ignore_ascii_case("SecretAccessKey") => secret_access_key = Some(value), + c if c.eq_ignore_ascii_case("Token") => session_token = Some(value), + c if c.eq_ignore_ascii_case("Expiration") => expiration = Some(value), + + // Error case handling: message will be set + c if c.eq_ignore_ascii_case("Message") => message = Some(value), + _ => {} + } + })?; + match code { + // IMDS does not appear to reply with a `Code` missing, but documentation indicates it + // may be possible + None | Some(Cow::Borrowed("Success")) => { + let access_key_id = + access_key_id.ok_or(InvalidJsonCredentials::MissingField("AccessKeyId"))?; + let secret_access_key = + secret_access_key.ok_or(InvalidJsonCredentials::MissingField("SecretAccessKey"))?; + let session_token = + session_token.ok_or(InvalidJsonCredentials::MissingField("Token"))?; + let expiration = + expiration.ok_or(InvalidJsonCredentials::MissingField("Expiration"))?; + let expiration = SystemTime::try_from( + DateTime::from_str(expiration.as_ref(), Format::DateTime).map_err(|err| { + InvalidJsonCredentials::InvalidField { + field: "Expiration", + err: err.into(), + } + })?, + ) + .map_err(|_| { + InvalidJsonCredentials::Other( + "credential expiration time cannot be represented by a SystemTime".into(), + ) + })?; + Ok(JsonCredentials::RefreshableCredentials { + access_key_id, + secret_access_key, + session_token, + expiration, + }) + } + Some(other) => Ok(JsonCredentials::Error { + code: other, + message: message.unwrap_or_else(|| "no message".into()), + }), + } +} + +pub(crate) fn json_parse_loop<'a>( + input: &'a [u8], + mut f: impl FnMut(Cow<'a, str>, Cow<'a, str>), +) -> Result<(), InvalidJsonCredentials> { + let mut tokens = json_token_iter(input).peekable(); + if !matches!(tokens.next().transpose()?, Some(Token::StartObject { .. })) { + return Err(InvalidJsonCredentials::JsonError( + "expected a JSON document starting with `{`".into(), + )); + } + loop { + match tokens.next().transpose()? { + Some(Token::EndObject { .. }) => break, + Some(Token::ObjectKey { key, .. }) => { + if let Some(Ok(Token::ValueString { value, .. })) = tokens.peek() { + let key = key.to_unescaped()?; + let value = value.to_unescaped()?; + f(key, value) + } + skip_value(&mut tokens)?; + } + other => { + return Err(InvalidJsonCredentials::Other( + format!("expected object key, found: {:?}", other).into(), + )); + } + } + } + if tokens.next().is_some() { + return Err(InvalidJsonCredentials::Other( + "found more JSON tokens after completing parsing".into(), + )); + } + Ok(()) +} + +#[cfg(test)] +mod test { + use crate::json_credentials::{ + parse_json_credentials, InvalidJsonCredentials, JsonCredentials, + }; + use std::time::{Duration, UNIX_EPOCH}; + + #[test] + fn json_credentials_success_response() { + let response = r#" + { + "Code" : "Success", + "LastUpdated" : "2021-09-17T20:57:08Z", + "Type" : "AWS-HMAC", + "AccessKeyId" : "ASIARTEST", + "SecretAccessKey" : "xjtest", + "Token" : "IQote///test", + "Expiration" : "2021-09-18T03:31:56Z" + }"#; + let parsed = parse_json_credentials(response).expect("valid JSON"); + assert_eq!( + parsed, + JsonCredentials::RefreshableCredentials { + access_key_id: "ASIARTEST".into(), + secret_access_key: "xjtest".into(), + session_token: "IQote///test".into(), + expiration: UNIX_EPOCH + Duration::from_secs(1631935916), + } + ) + } + + #[test] + fn json_credentials_invalid_json() { + let error = parse_json_credentials("404: not found").expect_err("no json"); + match error { + InvalidJsonCredentials::JsonError(_) => {} // ok. + err => panic!("incorrect error: {:?}", err), + } + } + + #[test] + fn json_credentials_not_json_object() { + let error = parse_json_credentials("[1,2,3]").expect_err("no json"); + match error { + InvalidJsonCredentials::JsonError(_) => {} // ok. + _ => panic!("incorrect error"), + } + } + + #[test] + fn json_credentials_missing_code() { + let resp = r#"{ + "LastUpdated" : "2021-09-17T20:57:08Z", + "Type" : "AWS-HMAC", + "AccessKeyId" : "ASIARTEST", + "SecretAccessKey" : "xjtest", + "Token" : "IQote///test", + "Expiration" : "2021-09-18T03:31:56Z" + }"#; + let parsed = parse_json_credentials(resp).expect("code not required"); + assert_eq!( + parsed, + JsonCredentials::RefreshableCredentials { + access_key_id: "ASIARTEST".into(), + secret_access_key: "xjtest".into(), + session_token: "IQote///test".into(), + expiration: UNIX_EPOCH + Duration::from_secs(1631935916), + } + ) + } + + #[test] + fn json_credentials_required_session_token() { + let resp = r#"{ + "LastUpdated" : "2021-09-17T20:57:08Z", + "Type" : "AWS-HMAC", + "AccessKeyId" : "ASIARTEST", + "SecretAccessKey" : "xjtest", + "Expiration" : "2021-09-18T03:31:56Z" + }"#; + let parsed = parse_json_credentials(resp).expect_err("token missing"); + assert_eq!( + format!("{}", parsed), + "Expected field `Token` in response but it was missing" + ); + } + + #[test] + fn json_credentials_missing_akid() { + let resp = r#"{ + "Code": "Success", + "LastUpdated" : "2021-09-17T20:57:08Z", + "Type" : "AWS-HMAC", + "SecretAccessKey" : "xjtest", + "Token" : "IQote///test", + "Expiration" : "2021-09-18T03:31:56Z" + }"#; + match parse_json_credentials(resp).expect_err("no code") { + InvalidJsonCredentials::MissingField("AccessKeyId") => {} // ok + resp => panic!("incorrect json_credentials response: {:?}", resp), + } + } + + #[test] + fn json_credentials_error_response() { + let response = r#"{ + "Code" : "AssumeRoleUnauthorizedAccess", + "Message" : "EC2 cannot assume the role integration-test.", + "LastUpdated" : "2021-09-17T20:46:56Z" + }"#; + let parsed = parse_json_credentials(response).expect("valid JSON"); + assert_eq!( + parsed, + JsonCredentials::Error { + code: "AssumeRoleUnauthorizedAccess".into(), + message: "EC2 cannot assume the role integration-test.".into(), + } + ); + } + + /// Validate the specific JSON response format sent by ECS + #[test] + fn json_credentials_ecs() { + // identical, but extra `RoleArn` field is present + let response = r#"{ + "RoleArn":"arn:aws:iam::123456789:role/ecs-task-role", + "AccessKeyId":"ASIARTEST", + "SecretAccessKey":"SECRETTEST", + "Token":"tokenEaCXVzLXdlc3QtMiJGMEQCIHt47W18eF4dYfSlmKGiwuJnqmIS3LMXNYfODBCEhcnaAiAnuhGOpcdIDxin4QFzhtgaCR2MpcVqR8NFJdMgOt0/xyrnAwhhEAEaDDEzNDA5NTA2NTg1NiIM9M9GT+c5UfV/8r7PKsQDUa9xE9Eprz5N+jgxbFSD2aJR2iyXCcP9Q1cOh4fdZhyw2WNmq9XnIa2tkzrreiQ5R2t+kzergJHO1KRZPfesarfJ879aWJCSocsEKh7xXwwzTsVXrNo5eWkpwTh64q+Ksz15eoaBhtrvnGvPx6SmXv7SToi/DTHFafJlT/T9jITACZvZXSE9zfLka26Rna3rI4g0ugowha//j1f/c1XuKloqshpZvMKc561om9Y5fqBv1fRiS2KhetGTcmz3wUqNQAk8Dq9oINS7cCtdIO0atqCK69UaKeJ9uKY8mzY9dFWw2IrkpOoXmA9r955iU0NOz/95jVJiPZ/8aE8vb0t67gQfzBUCfky+mGSGWAfPRXQlFa5AEulCTHPd7IcTVCtasG033oKEKgB8QnTxvM2LaPlwaaHo7MHGYXeUKbn9NRKd8m1ShwmAlr4oKp1vQp6cPHDTsdTfPTzh/ZAjUPs+ljQbAwqXbPQdUUPpOk0vltY8k6Im9EA0pf80iUNoqrixpmPsR2hzI/ybUwdh+QhvCSBx+J8KHqF6X92u4qAVYIxLy/LGZKT9YC6Kr9Gywn+Ro+EK/xl3axHPzNpbjRDJnbW3HrMw5LmmiwY6pgGWgmD6IOq4QYUtu1uhaLQZyoI5o5PWn+d3kqqxifu8D0ykldB3lQGdlJ2rjKJjCdx8fce1SoXao9cc4hiwn39hUPuTqzVwv2zbzCKmNggIpXP6gqyRtUCakf6tI7ZwqTb2S8KF3t4ElIP8i4cPdNoI0JHSC+sT4LDPpUcX1CjGxfvo55mBHJedW3LXve8TRj4UckFXT1gLuTnzqPMrC5AHz4TAt+uv", + "Expiration" : "2009-02-13T23:31:30Z" + }"#; + let parsed = parse_json_credentials(response).expect("valid JSON"); + use std::borrow::Cow; + assert!( + matches!( + &parsed, + JsonCredentials::RefreshableCredentials { + access_key_id: Cow::Borrowed("ASIARTEST"), + secret_access_key: Cow::Borrowed("SECRETTEST"), + session_token, + expiration + } if session_token.starts_with("token") && *expiration == UNIX_EPOCH + Duration::from_secs(1234567890) + ), + "{:?}", + parsed + ); + } + + #[test] + fn case_insensitive_code_parsing() { + let response = r#"{ + "code" : "AssumeRoleUnauthorizedAccess", + "message" : "EC2 cannot assume the role integration-test." + }"#; + let parsed = parse_json_credentials(response).expect("valid JSON"); + assert_eq!( + parsed, + JsonCredentials::Error { + code: "AssumeRoleUnauthorizedAccess".into(), + message: "EC2 cannot assume the role integration-test.".into(), + } + ); + } +} diff --git a/patch/aws-config/src/lib.rs b/patch/aws-config/src/lib.rs new file mode 100644 index 0000000000000..bdf407605c62a --- /dev/null +++ b/patch/aws-config/src/lib.rs @@ -0,0 +1,406 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#![warn(missing_docs)] + +//! `aws-config` provides implementations of region, credential resolution. +//! +//! These implementations can be used either via the default chain implementation +//! [`from_env`]/[`ConfigLoader`] or ad-hoc individual credential and region providers. +//! +//! [`ConfigLoader`](ConfigLoader) can combine different configuration sources into an AWS shared-config: +//! [`SdkConfig`](aws_types::SdkConfig). [`SdkConfig`](aws_types::SdkConfig) can be used configure +//! an AWS service client. +//! +//! # Examples +//! +//! Load default SDK configuration: +//! ```no_run +//! # mod aws_sdk_dynamodb { +//! # pub struct Client; +//! # impl Client { +//! # pub fn new(config: &aws_types::SdkConfig) -> Self { Client } +//! # } +//! # } +//! # async fn docs() { +//! let config = aws_config::load_from_env().await; +//! let client = aws_sdk_dynamodb::Client::new(&config); +//! # } +//! ``` +//! +//! Load SDK configuration with a region override: +//! ```no_run +//! # mod aws_sdk_dynamodb { +//! # pub struct Client; +//! # impl Client { +//! # pub fn new(config: &aws_types::SdkConfig) -> Self { Client } +//! # } +//! # } +//! # async fn docs() { +//! # use aws_config::meta::region::RegionProviderChain; +//! let region_provider = RegionProviderChain::default_provider().or_else("us-east-1"); +//! let config = aws_config::from_env().region(region_provider).load().await; +//! let client = aws_sdk_dynamodb::Client::new(&config); +//! # } +//! ``` + +#[allow(dead_code)] +const PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// Providers that implement the default AWS provider chain +pub mod default_provider; + +/// Providers that load configuration from environment variables +pub mod environment; + +/// Meta-providers that augment existing providers with new behavior +pub mod meta; + +pub mod profile; + +pub mod sts; + +#[cfg(test)] +mod test_case; + +pub mod web_identity_token; + +pub mod ecs; + +pub mod provider_config; + +mod cache; + +pub mod imds; + +mod json_credentials; + +mod fs_util; + +mod http_credential_provider; + +pub mod sso; + +pub mod connector; + +pub(crate) mod parsing; + +// Re-export types from smithy-types +pub use aws_smithy_types::retry::RetryConfig; +pub use aws_smithy_types::timeout; + +// Re-export types from aws-types +pub use aws_types::app_name::{AppName, InvalidAppName}; + +/// Create an environment loader for AWS Configuration +/// +/// # Examples +/// ```no_run +/// # async fn create_config() { +/// use aws_types::region::Region; +/// let config = aws_config::from_env().region("us-east-1").load().await; +/// # } +/// ``` +pub fn from_env() -> ConfigLoader { + ConfigLoader::default() +} + +/// Load a default configuration from the environment +/// +/// Convenience wrapper equivalent to `aws_config::from_env().load().await` +pub async fn load_from_env() -> aws_types::SdkConfig { + from_env().load().await +} + +/// Load default sources for all configuration with override support +pub use loader::ConfigLoader; + +mod loader { + use std::sync::Arc; + + use crate::connector::default_connector; + use aws_smithy_async::rt::sleep::{default_async_sleep, AsyncSleep}; + use aws_smithy_client::http_connector::{HttpConnector, HttpSettings}; + use aws_smithy_types::retry::RetryConfig; + use aws_smithy_types::timeout; + use aws_types::app_name::AppName; + use aws_types::credentials::{ProvideCredentials, SharedCredentialsProvider}; + use aws_types::SdkConfig; + + use crate::default_provider::{app_name, credentials, region, retry_config, timeout_config}; + use crate::meta::region::ProvideRegion; + use crate::provider_config::ProviderConfig; + + /// Load a cross-service [`SdkConfig`](aws_types::SdkConfig) from the environment + /// + /// This builder supports overriding individual components of the generated config. Overriding a component + /// will skip the standard resolution chain from **for that component**. For example, + /// if you override the region provider, _even if that provider returns None_, the default region provider + /// chain will not be used. + #[derive(Default, Debug)] + pub struct ConfigLoader { + app_name: Option, + credentials_provider: Option, + region: Option>, + retry_config: Option, + sleep: Option>, + timeout_config: Option, + provider_config: Option, + http_connector: Option, + } + + impl ConfigLoader { + /// Override the region used to build [`SdkConfig`](aws_types::SdkConfig). + /// + /// # Examples + /// ```no_run + /// # async fn create_config() { + /// use aws_types::region::Region; + /// let config = aws_config::from_env() + /// .region(Region::new("us-east-1")) + /// .load().await; + /// # } + /// ``` + pub fn region(mut self, region: impl ProvideRegion + 'static) -> Self { + self.region = Some(Box::new(region)); + self + } + + /// Override the retry_config used to build [`SdkConfig`](aws_types::SdkConfig). + /// + /// # Examples + /// ```no_run + /// # use aws_smithy_types::retry::RetryConfig; + /// # async fn create_config() { + /// let config = aws_config::from_env() + /// .retry_config(RetryConfig::new().with_max_attempts(2)) + /// .load().await; + /// # } + /// ``` + pub fn retry_config(mut self, retry_config: RetryConfig) -> Self { + self.retry_config = Some(retry_config); + self + } + + /// Override the timeout config used to build [`SdkConfig`](aws_types::SdkConfig). + /// **Note: This only sets timeouts for calls to AWS services.** Timeouts for the credentials + /// provider chain are configured separately. + /// + /// # Examples + /// ```no_run + /// # use std::time::Duration; + /// # async fn create_config() { + /// use aws_smithy_types::{timeout, tristate::TriState}; + /// + /// let api_timeout_config = timeout::Api::new() + /// .with_call_timeout(TriState::Set(Duration::from_secs(1))); + /// let timeout_config = timeout::Config::new().with_api_timeouts(api_timeout_config); + /// let config = aws_config::from_env() + /// .timeout_config(timeout_config) + /// .load() + /// .await; + /// # } + /// ``` + pub fn timeout_config(mut self, timeout_config: timeout::Config) -> Self { + self.timeout_config = Some(timeout_config); + self + } + + /// Override the sleep implementation for this [`ConfigLoader`]. The sleep implementation + /// is used to create timeout futures. + pub fn sleep_impl(mut self, sleep: impl AsyncSleep + 'static) -> Self { + // it's possible that we could wrapping an `Arc in an `Arc` and that's OK + self.sleep = Some(Arc::new(sleep)); + self + } + + /// Override the [`HttpConnector`] used to build [`SdkConfig`](aws_types::SdkConfig). + pub fn http_connector(mut self, http_connector: HttpConnector) -> Self { + self.http_connector = Some(http_connector); + self + } + + /// Override the credentials provider used to build [`SdkConfig`](aws_types::SdkConfig). + /// + /// # Examples + /// + /// Override the credentials provider but load the default value for region: + /// ```no_run + /// # use aws_types::Credentials; + /// # fn create_my_credential_provider() -> Credentials { + /// # Credentials::new("example", "example", None, None, "example") + /// # } + /// # async fn create_config() { + /// let config = aws_config::from_env() + /// .credentials_provider(create_my_credential_provider()) + /// .load() + /// .await; + /// # } + /// ``` + pub fn credentials_provider( + mut self, + credentials_provider: impl ProvideCredentials + 'static, + ) -> Self { + self.credentials_provider = Some(SharedCredentialsProvider::new(credentials_provider)); + self + } + + /// Set configuration for all sub-loaders (credentials, region etc.) + /// + /// Update the `ProviderConfig` used for all nested loaders. This can be used to override + /// the HTTPs connector used or to stub in an in memory `Env` or `Fs` for testing. + /// + /// # Examples + /// ```no_run + /// # async fn docs() { + /// use aws_config::provider_config::ProviderConfig; + /// let custom_https_connector = hyper_rustls::HttpsConnectorBuilder::new(). + /// with_webpki_roots() + /// .https_only() + /// .enable_http1() + /// .build(); + /// let provider_config = ProviderConfig::default().with_tcp_connector(custom_https_connector); + /// let shared_config = aws_config::from_env().configure(provider_config).load().await; + /// # } + /// ``` + pub fn configure(mut self, provider_config: ProviderConfig) -> Self { + self.provider_config = Some(provider_config); + self + } + + /// Load the default configuration chain + /// + /// If fields have been overridden during builder construction, the override values will be used. + /// + /// Otherwise, the default values for each field will be provided. + /// + /// NOTE: When an override is provided, the default implementation is **not** used as a fallback. + /// This means that if you provide a region provider that does not return a region, no region will + /// be set in the resulting [`SdkConfig`](aws_types::SdkConfig) + pub async fn load(self) -> SdkConfig { + let conf = self.provider_config.unwrap_or_default(); + let region = if let Some(provider) = self.region { + provider.region().await + } else { + region::Builder::default() + .configure(&conf) + .build() + .region() + .await + }; + + let retry_config = if let Some(retry_config) = self.retry_config { + retry_config + } else { + retry_config::default_provider() + .configure(&conf) + .retry_config() + .await + }; + + let app_name = if self.app_name.is_some() { + self.app_name + } else { + app_name::default_provider() + .configure(&conf) + .app_name() + .await + }; + + let sleep_impl = if self.sleep.is_none() { + if default_async_sleep().is_none() { + tracing::warn!( + "An implementation of AsyncSleep was requested by calling default_async_sleep \ + but no default was set. + This happened when ConfigLoader::load was called during Config construction. \ + You can fix this by setting a sleep_impl on the ConfigLoader before calling \ + load or by enabling the rt-tokio feature" + ); + } + default_async_sleep() + } else { + self.sleep + }; + + let http_connector = if let Some(http_connector) = self.http_connector { + http_connector + } else { + let timeouts = self.timeout_config.clone().unwrap_or_default(); + let settings = HttpSettings::default() + .with_http_timeout_config(timeouts.http_timeouts()) + .with_tcp_timeout_config(timeouts.tcp_timeouts()); + let sleep_impl = sleep_impl.clone(); + HttpConnector::Prebuilt(default_connector(&settings, sleep_impl)) + }; + + let timeout_config = if let Some(timeout_config) = self.timeout_config { + timeout_config + } else { + timeout_config::default_provider() + .configure(&conf) + .timeout_config() + .await + }; + + let credentials_provider = if let Some(provider) = self.credentials_provider { + provider + } else { + let mut builder = credentials::DefaultCredentialsChain::builder().configure(conf); + builder.set_region(region.clone()); + SharedCredentialsProvider::new(builder.build().await) + }; + + let mut builder = SdkConfig::builder() + .region(region) + .retry_config(retry_config) + .timeout_config(timeout_config) + .credentials_provider(credentials_provider) + .http_connector(http_connector); + + builder.set_app_name(app_name); + builder.set_sleep_impl(sleep_impl); + builder.build() + } + } + + #[cfg(test)] + mod test { + use crate::from_env; + use crate::provider_config::ProviderConfig; + use aws_smithy_client::erase::DynConnector; + use aws_smithy_client::never::NeverConnector; + use aws_types::credentials::ProvideCredentials; + use aws_types::os_shim_internal::Env; + + #[tokio::test] + async fn provider_config_used() { + let env = Env::from_slice(&[ + ("AWS_MAX_ATTEMPTS", "10"), + ("AWS_REGION", "us-west-4"), + ("AWS_ACCESS_KEY_ID", "akid"), + ("AWS_SECRET_ACCESS_KEY", "secret"), + ]); + let loader = from_env() + .configure( + ProviderConfig::empty() + .with_env(env) + .with_http_connector(DynConnector::new(NeverConnector::new())), + ) + .load() + .await; + assert_eq!(loader.retry_config().unwrap().max_attempts(), 10); + assert_eq!(loader.region().unwrap().as_ref(), "us-west-4"); + assert_eq!( + loader + .credentials_provider() + .unwrap() + .provide_credentials() + .await + .unwrap() + .access_key_id(), + "akid" + ); + } + } +} diff --git a/patch/aws-config/src/meta/credentials/chain.rs b/patch/aws-config/src/meta/credentials/chain.rs new file mode 100644 index 0000000000000..57cba8bd35ce7 --- /dev/null +++ b/patch/aws-config/src/meta/credentials/chain.rs @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::borrow::Cow; + +use aws_types::credentials::{self, future, CredentialsError, ProvideCredentials}; +use tracing::Instrument; + +/// Credentials provider that checks a series of inner providers +/// +/// Each provider will be evaluated in order: +/// * If a provider returns valid [`Credentials`](aws_types::Credentials) they will be returned immediately. +/// No other credential providers will be used. +/// * Otherwise, if a provider returns +/// [`CredentialsError::CredentialsNotLoaded`](aws_types::credentials::CredentialsError::CredentialsNotLoaded), +/// the next provider will be checked. +/// * Finally, if a provider returns any other error condition, an error will be returned immediately. +/// +/// # Examples +/// +/// ```no_run +/// # fn example() { +/// use aws_config::meta::credentials::CredentialsProviderChain; +/// use aws_config::environment::credentials::EnvironmentVariableCredentialsProvider; +/// use aws_config::profile::ProfileFileCredentialsProvider; +/// +/// let provider = CredentialsProviderChain::first_try("Environment", EnvironmentVariableCredentialsProvider::new()) +/// .or_else("Profile", ProfileFileCredentialsProvider::builder().build()); +/// # } +/// ``` +#[derive(Debug)] +pub struct CredentialsProviderChain { + providers: Vec<(Cow<'static, str>, Box)>, +} + +impl CredentialsProviderChain { + /// Create a `CredentialsProviderChain` that begins by evaluating this provider + pub fn first_try( + name: impl Into>, + provider: impl ProvideCredentials + 'static, + ) -> Self { + CredentialsProviderChain { + providers: vec![(name.into(), Box::new(provider))], + } + } + + /// Add a fallback provider to the credentials provider chain + pub fn or_else( + mut self, + name: impl Into>, + provider: impl ProvideCredentials + 'static, + ) -> Self { + self.providers.push((name.into(), Box::new(provider))); + self + } + + /// Add a fallback to the default provider chain + #[cfg(any(feature = "rustls", feature = "native-tls"))] + pub async fn or_default_provider(self) -> Self { + self.or_else( + "DefaultProviderChain", + crate::default_provider::credentials::default_provider().await, + ) + } + + /// Creates a credential provider chain that starts with the default provider + #[cfg(any(feature = "rustls", feature = "native-tls"))] + pub async fn default_provider() -> Self { + Self::first_try( + "DefaultProviderChain", + crate::default_provider::credentials::default_provider().await, + ) + } + + async fn credentials(&self) -> credentials::Result { + for (name, provider) in &self.providers { + let span = tracing::debug_span!("load_credentials", provider = %name); + match provider.provide_credentials().instrument(span).await { + Ok(credentials) => { + tracing::info!(provider = %name, "loaded credentials"); + return Ok(credentials); + } + Err(CredentialsError::CredentialsNotLoaded { context, .. }) => { + tracing::info!(provider = %name, context = %context, "provider in chain did not provide credentials"); + } + Err(e) => { + tracing::warn!(provider = %name, error = %e, "provider failed to provide credentials"); + return Err(e); + } + } + } + Err(CredentialsError::not_loaded( + "no providers in chain provided credentials", + )) + } +} + +impl ProvideCredentials for CredentialsProviderChain { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} diff --git a/patch/aws-config/src/meta/credentials/credential_fn.rs b/patch/aws-config/src/meta/credentials/credential_fn.rs new file mode 100644 index 0000000000000..bdc059c5b3465 --- /dev/null +++ b/patch/aws-config/src/meta/credentials/credential_fn.rs @@ -0,0 +1,141 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_types::credentials; +use aws_types::credentials::ProvideCredentials; +use std::fmt::{self, Debug, Formatter}; +use std::future::Future; +use std::marker::PhantomData; + +/// A [`ProvideCredentials`] implemented by a closure. +/// +/// See [`provide_credentials_fn`] for more details. +#[derive(Copy, Clone)] +pub struct ProvideCredentialsFn<'c, T> { + f: T, + phantom: PhantomData<&'c T>, +} + +impl Debug for ProvideCredentialsFn<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "ProvideCredentialsFn") + } +} + +impl<'c, T, F> ProvideCredentials for ProvideCredentialsFn<'c, T> +where + T: Fn() -> F + Send + Sync + 'c, + F: Future + Send + 'static, +{ + fn provide_credentials<'a>(&'a self) -> credentials::future::ProvideCredentials<'a> + where + Self: 'a, + { + credentials::future::ProvideCredentials::new((self.f)()) + } +} + +/// Returns a new credentials provider built with the given closure. This allows you +/// to create an [`ProvideCredentials`] implementation from an async block that returns +/// a [`credentials::Result`]. +/// +/// # Examples +/// +/// ```no_run +/// use aws_types::Credentials; +/// use aws_config::meta::credentials::provide_credentials_fn; +/// +/// async fn load_credentials() -> Credentials { +/// todo!() +/// } +/// +/// provide_credentials_fn(|| async { +/// // Async process to retrieve credentials goes here +/// let credentials = load_credentials().await; +/// Ok(credentials) +/// }); +/// ``` +pub fn provide_credentials_fn<'c, T, F>(f: T) -> ProvideCredentialsFn<'c, T> +where + T: Fn() -> F + Send + Sync + 'c, + F: Future + Send + 'static, +{ + ProvideCredentialsFn { + f, + phantom: Default::default(), + } +} + +#[cfg(test)] +mod test { + use crate::meta::credentials::credential_fn::provide_credentials_fn; + use async_trait::async_trait; + use aws_types::credentials::ProvideCredentials; + use aws_types::{credentials, Credentials}; + use std::fmt::{Debug, Formatter}; + + fn assert_send_sync() {} + + #[test] + fn creds_are_send_sync() { + assert_send_sync::() + } + + #[async_trait] + trait AnotherTrait: Send + Sync { + async fn creds(&self) -> Credentials; + } + + struct AnotherTraitWrapper { + inner: T, + } + + impl Debug for AnotherTraitWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "wrapper") + } + } + + impl ProvideCredentials for AnotherTraitWrapper { + fn provide_credentials<'a>(&'a self) -> credentials::future::ProvideCredentials<'a> + where + Self: 'a, + { + credentials::future::ProvideCredentials::new( + async move { Ok(self.inner.creds().await) }, + ) + } + } + + // Test that the closure passed to `provide_credentials_fn` is allowed to borrow things + #[tokio::test] + async fn provide_credentials_fn_closure_can_borrow() { + fn check_is_str_ref(_input: &str) {} + async fn test_async_provider(input: String) -> credentials::Result { + Ok(Credentials::new(&input, &input, None, None, "test")) + } + + let things_to_borrow = vec!["one".to_string(), "two".to_string()]; + + let mut providers = Vec::new(); + for thing in &things_to_borrow { + let provider = provide_credentials_fn(move || { + check_is_str_ref(thing); + test_async_provider(thing.into()) + }); + providers.push(provider); + } + + let (two, one) = (providers.pop().unwrap(), providers.pop().unwrap()); + assert_eq!( + "one", + one.provide_credentials().await.unwrap().access_key_id() + ); + assert_eq!( + "two", + two.provide_credentials().await.unwrap().access_key_id() + ); + } +} diff --git a/patch/aws-config/src/meta/credentials/lazy_caching.rs b/patch/aws-config/src/meta/credentials/lazy_caching.rs new file mode 100644 index 0000000000000..0154a2492b3ba --- /dev/null +++ b/patch/aws-config/src/meta/credentials/lazy_caching.rs @@ -0,0 +1,488 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Lazy, caching, credentials provider implementation + +use std::sync::Arc; +use std::time::Duration; + +use aws_smithy_async::future::timeout::Timeout; +use aws_smithy_async::rt::sleep::AsyncSleep; +use tracing::{trace_span, Instrument}; + +use aws_types::credentials::{future, CredentialsError, ProvideCredentials}; +use aws_types::os_shim_internal::TimeSource; + +use crate::cache::ExpiringCache; + +const DEFAULT_LOAD_TIMEOUT: Duration = Duration::from_secs(5); +const DEFAULT_CREDENTIAL_EXPIRATION: Duration = Duration::from_secs(15 * 60); +const DEFAULT_BUFFER_TIME: Duration = Duration::from_secs(10); + +/// `LazyCachingCredentialsProvider` implements [`ProvideCredentials`] by caching +/// credentials that it loads by calling a user-provided [`ProvideCredentials`] implementation. +/// +/// For example, you can provide an [`ProvideCredentials`] implementation that calls +/// AWS STS's AssumeRole operation to get temporary credentials, and `LazyCachingCredentialsProvider` +/// will cache those credentials until they expire. +#[derive(Debug)] +pub struct LazyCachingCredentialsProvider { + time: TimeSource, + sleeper: Arc, + cache: ExpiringCache, + loader: Arc, + load_timeout: Duration, + default_credential_expiration: Duration, +} + +impl LazyCachingCredentialsProvider { + fn new( + time: TimeSource, + sleeper: Arc, + loader: Arc, + load_timeout: Duration, + default_credential_expiration: Duration, + buffer_time: Duration, + ) -> Self { + LazyCachingCredentialsProvider { + time, + sleeper, + cache: ExpiringCache::new(buffer_time), + loader, + load_timeout, + default_credential_expiration, + } + } + + /// Returns a new `Builder` that can be used to construct the `LazyCachingCredentialsProvider`. + pub fn builder() -> builder::Builder { + builder::Builder::new() + } +} + +impl ProvideCredentials for LazyCachingCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials + where + Self: 'a, + { + let now = self.time.now(); + let loader = self.loader.clone(); + let timeout_future = self.sleeper.sleep(self.load_timeout); + let load_timeout = self.load_timeout; + let cache = self.cache.clone(); + let default_credential_expiration = self.default_credential_expiration; + + future::ProvideCredentials::new(async move { + // Attempt to get cached credentials, or clear the cache if they're expired + if let Some(credentials) = cache.yield_or_clear_if_expired(now).await { + tracing::debug!("loaded credentials from cache"); + Ok(credentials) + } else { + // If we didn't get credentials from the cache, then we need to try and load. + // There may be other threads also loading simultaneously, but this is OK + // since the futures are not eagerly executed, and the cache will only run one + // of them. + let span = trace_span!("lazy_load_credentials"); + let future = Timeout::new(loader.provide_credentials(), timeout_future); + cache + .get_or_load(|| { + async move { + let credentials = future.await.map_err(|_err| { + CredentialsError::provider_timed_out(load_timeout) + })??; + // If the credentials don't have an expiration time, then create a default one + let expiry = credentials + .expiry() + .unwrap_or(now + default_credential_expiration); + Ok((credentials, expiry)) + } + // Only instrument the the actual load future so that no span + // is opened if the cache decides not to execute it. + .instrument(span) + }) + .await + } + }) + } +} + +use aws_types::Credentials; +pub use builder::Builder; + +mod builder { + use std::sync::Arc; + use std::time::Duration; + + use aws_smithy_async::rt::sleep::{default_async_sleep, AsyncSleep}; + use aws_types::credentials::ProvideCredentials; + + use super::{ + LazyCachingCredentialsProvider, DEFAULT_BUFFER_TIME, DEFAULT_CREDENTIAL_EXPIRATION, + DEFAULT_LOAD_TIMEOUT, + }; + use crate::provider_config::ProviderConfig; + use aws_types::os_shim_internal::TimeSource; + + /// Builder for constructing a [`LazyCachingCredentialsProvider`]. + /// + /// # Examples + /// + /// ```no_run + /// use aws_types::Credentials; + /// use aws_config::meta::credentials::provide_credentials_fn; + /// use aws_config::meta::credentials::LazyCachingCredentialsProvider; + /// + /// let provider = LazyCachingCredentialsProvider::builder() + /// .load(provide_credentials_fn(|| async { + /// // An async process to retrieve credentials would go here: + /// Ok(Credentials::new("example", "example", None, None, "my_provider_name")) + /// })) + /// .build(); + /// ``` + #[derive(Default)] + pub struct Builder { + sleep: Option>, + time_source: Option, + load: Option>, + load_timeout: Option, + buffer_time: Option, + default_credential_expiration: Option, + } + + impl Builder { + /// Creates a new builder + pub fn new() -> Self { + Default::default() + } + + /// Override configuration for the [Builder] + pub fn configure(mut self, config: &ProviderConfig) -> Self { + self.sleep = config.sleep(); + self.time_source = Some(config.time_source()); + self + } + + /// An implementation of [`ProvideCredentials`] that will be used to load + /// the cached credentials once they're expired. + pub fn load(mut self, loader: impl ProvideCredentials + 'static) -> Self { + self.set_load(Some(loader)); + self + } + + /// An implementation of [`ProvideCredentials`] that will be used to load + /// the cached credentials once they're expired. + pub fn set_load(&mut self, loader: Option) -> &mut Self { + self.load = loader.map(|l| Arc::new(l) as Arc); + self + } + + /// Implementation of [`AsyncSleep`] to use for timeouts. + /// + /// This enables use of the `LazyCachingCredentialsProvider` with other async runtimes. + /// If using Tokio as the async runtime, this should be set to an instance of + /// [`TokioSleep`](aws_smithy_async::rt::sleep::TokioSleep). + pub fn sleep(mut self, sleep: impl AsyncSleep + 'static) -> Self { + self.set_sleep(Some(sleep)); + self + } + + /// Implementation of [`AsyncSleep`] to use for timeouts. + /// + /// This enables use of the `LazyCachingCredentialsProvider` with other async runtimes. + /// If using Tokio as the async runtime, this should be set to an instance of + /// [`TokioSleep`](aws_smithy_async::rt::sleep::TokioSleep). + pub fn set_sleep(&mut self, sleep: Option) -> &mut Self { + self.sleep = sleep.map(|s| Arc::new(s) as Arc); + self + } + + /// Timeout for the given [`ProvideCredentials`] implementation. + /// + /// Defaults to 5 seconds. + pub fn load_timeout(mut self, timeout: Duration) -> Self { + self.set_load_timeout(Some(timeout)); + self + } + + /// Timeout for the given [`ProvideCredentials`] implementation. + /// + /// Defaults to 5 seconds. + pub fn set_load_timeout(&mut self, timeout: Option) -> &mut Self { + self.load_timeout = timeout; + self + } + + /// Amount of time before the actual credential expiration time + /// where credentials are considered expired. + /// + /// For example, if credentials are expiring in 15 minutes, and the buffer time is 10 seconds, + /// then any requests made after 14 minutes and 50 seconds will load new credentials. + /// + /// Defaults to 10 seconds. + pub fn buffer_time(mut self, buffer_time: Duration) -> Self { + self.set_buffer_time(Some(buffer_time)); + self + } + + /// Amount of time before the actual credential expiration time + /// where credentials are considered expired. + /// + /// For example, if credentials are expiring in 15 minutes, and the buffer time is 10 seconds, + /// then any requests made after 14 minutes and 50 seconds will load new credentials. + /// + /// Defaults to 10 seconds. + pub fn set_buffer_time(&mut self, buffer_time: Option) -> &mut Self { + self.buffer_time = buffer_time; + self + } + + /// Default expiration time to set on credentials if they don't have an expiration time. + /// + /// This is only used if the given [`ProvideCredentials`] returns + /// [`Credentials`](aws_types::Credentials) that don't have their `expiry` set. + /// This must be at least 15 minutes. + /// + /// Defaults to 15 minutes. + pub fn default_credential_expiration(mut self, duration: Duration) -> Self { + self.set_default_credential_expiration(Some(duration)); + self + } + + /// Default expiration time to set on credentials if they don't have an expiration time. + /// + /// This is only used if the given [`ProvideCredentials`] returns + /// [`Credentials`](aws_types::Credentials) that don't have their `expiry` set. + /// This must be at least 15 minutes. + /// + /// Defaults to 15 minutes. + pub fn set_default_credential_expiration( + &mut self, + duration: Option, + ) -> &mut Self { + self.default_credential_expiration = duration; + self + } + + /// Creates the [`LazyCachingCredentialsProvider`]. + /// + /// # Panics + /// This will panic if no `sleep` implementation is given and if no default crate features + /// are used. By default, the [`TokioSleep`](aws_smithy_async::rt::sleep::TokioSleep) + /// implementation will be set automatically. + pub fn build(self) -> LazyCachingCredentialsProvider { + let default_credential_expiration = self + .default_credential_expiration + .unwrap_or(DEFAULT_CREDENTIAL_EXPIRATION); + assert!( + default_credential_expiration >= DEFAULT_CREDENTIAL_EXPIRATION, + "default_credential_expiration must be at least 15 minutes" + ); + LazyCachingCredentialsProvider::new( + self.time_source.unwrap_or_default(), + self.sleep.unwrap_or_else(|| { + default_async_sleep().expect("no default sleep implementation available") + }), + self.load.expect("load implementation is required"), + self.load_timeout.unwrap_or(DEFAULT_LOAD_TIMEOUT), + default_credential_expiration, + self.buffer_time.unwrap_or(DEFAULT_BUFFER_TIME), + ) + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + use std::time::{Duration, SystemTime, UNIX_EPOCH}; + + use aws_smithy_async::rt::sleep::TokioSleep; + use aws_types::credentials::{self, CredentialsError, ProvideCredentials}; + use aws_types::Credentials; + use tracing::info; + use tracing_test::traced_test; + + use crate::meta::credentials::credential_fn::provide_credentials_fn; + + use super::{ + LazyCachingCredentialsProvider, TimeSource, DEFAULT_BUFFER_TIME, + DEFAULT_CREDENTIAL_EXPIRATION, DEFAULT_LOAD_TIMEOUT, + }; + use aws_types::os_shim_internal::ManualTimeSource; + + fn test_provider( + time: TimeSource, + load_list: Vec, + ) -> LazyCachingCredentialsProvider { + let load_list = Arc::new(Mutex::new(load_list)); + LazyCachingCredentialsProvider::new( + time, + Arc::new(TokioSleep::new()), + Arc::new(provide_credentials_fn(move || { + let list = load_list.clone(); + async move { + let next = list.lock().unwrap().remove(0); + info!("refreshing the credentials to {:?}", next); + next + } + })), + DEFAULT_LOAD_TIMEOUT, + DEFAULT_CREDENTIAL_EXPIRATION, + DEFAULT_BUFFER_TIME, + ) + } + + fn epoch_secs(secs: u64) -> SystemTime { + SystemTime::UNIX_EPOCH + Duration::from_secs(secs) + } + + fn credentials(expired_secs: u64) -> Credentials { + Credentials::new("test", "test", None, Some(epoch_secs(expired_secs)), "test") + } + + async fn expect_creds(expired_secs: u64, provider: &LazyCachingCredentialsProvider) { + let creds = provider + .provide_credentials() + .await + .expect("expected credentials"); + assert_eq!(Some(epoch_secs(expired_secs)), creds.expiry()); + } + + #[traced_test] + #[tokio::test] + async fn initial_populate_credentials() { + let time = ManualTimeSource::new(UNIX_EPOCH); + let loader = Arc::new(provide_credentials_fn(|| async { + info!("refreshing the credentials"); + Ok(credentials(1000)) + })); + let provider = LazyCachingCredentialsProvider::new( + TimeSource::manual(&time), + Arc::new(TokioSleep::new()), + loader, + DEFAULT_LOAD_TIMEOUT, + DEFAULT_CREDENTIAL_EXPIRATION, + DEFAULT_BUFFER_TIME, + ); + assert_eq!( + epoch_secs(1000), + provider + .provide_credentials() + .await + .unwrap() + .expiry() + .unwrap() + ); + } + + #[traced_test] + #[tokio::test] + async fn reload_expired_credentials() { + let mut time = ManualTimeSource::new(epoch_secs(100)); + let provider = test_provider( + TimeSource::manual(&time), + vec![ + Ok(credentials(1000)), + Ok(credentials(2000)), + Ok(credentials(3000)), + ], + ); + + expect_creds(1000, &provider).await; + expect_creds(1000, &provider).await; + time.set_time(epoch_secs(1500)); + expect_creds(2000, &provider).await; + expect_creds(2000, &provider).await; + time.set_time(epoch_secs(2500)); + expect_creds(3000, &provider).await; + expect_creds(3000, &provider).await; + } + + #[traced_test] + #[tokio::test] + async fn load_failed_error() { + let mut time = ManualTimeSource::new(epoch_secs(100)); + let provider = test_provider( + TimeSource::manual(&time), + vec![ + Ok(credentials(1000)), + Err(CredentialsError::not_loaded("failed")), + ], + ); + + expect_creds(1000, &provider).await; + time.set_time(epoch_secs(1500)); + assert!(provider.provide_credentials().await.is_err()); + } + + #[traced_test] + #[test] + fn load_contention() { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_time() + .worker_threads(16) + .build() + .unwrap(); + + let time = ManualTimeSource::new(epoch_secs(0)); + let provider = Arc::new(test_provider( + TimeSource::manual(&time), + vec![ + Ok(credentials(500)), + Ok(credentials(1500)), + Ok(credentials(2500)), + Ok(credentials(3500)), + Ok(credentials(4500)), + ], + )); + + let locked_time = Arc::new(Mutex::new(time)); + + for i in 0..4 { + let mut tasks = Vec::new(); + for j in 0..50 { + let provider = provider.clone(); + let time = locked_time.clone(); + tasks.push(rt.spawn(async move { + let now = epoch_secs(i * 1000 + (4 * j)); + time.lock().unwrap().set_time(now); + + let creds = provider.provide_credentials().await.unwrap(); + assert!( + creds.expiry().unwrap() >= now, + "{:?} >= {:?}", + creds.expiry(), + now + ); + })); + } + for task in tasks { + rt.block_on(task).unwrap(); + } + } + } + + #[tokio::test] + #[traced_test] + async fn load_timeout() { + let time = ManualTimeSource::new(epoch_secs(100)); + let provider = LazyCachingCredentialsProvider::new( + TimeSource::manual(&time), + Arc::new(TokioSleep::new()), + Arc::new(provide_credentials_fn(|| async { + aws_smithy_async::future::never::Never::new().await; + Ok(credentials(1000)) + })), + Duration::from_millis(5), + DEFAULT_CREDENTIAL_EXPIRATION, + DEFAULT_BUFFER_TIME, + ); + + assert!(matches!( + provider.provide_credentials().await, + Err(CredentialsError::ProviderTimedOut { .. }) + )); + } +} diff --git a/patch/aws-config/src/meta/credentials/mod.rs b/patch/aws-config/src/meta/credentials/mod.rs new file mode 100644 index 0000000000000..8f5e504e48e81 --- /dev/null +++ b/patch/aws-config/src/meta/credentials/mod.rs @@ -0,0 +1,15 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Credential providers that augment an existing credentials providers to add functionality + +mod chain; +pub use chain::CredentialsProviderChain; + +mod credential_fn; +pub use credential_fn::provide_credentials_fn; + +pub mod lazy_caching; +pub use lazy_caching::LazyCachingCredentialsProvider; diff --git a/patch/aws-config/src/meta/mod.rs b/patch/aws-config/src/meta/mod.rs new file mode 100644 index 0000000000000..202443b32dec2 --- /dev/null +++ b/patch/aws-config/src/meta/mod.rs @@ -0,0 +1,8 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +pub mod region; + +pub mod credentials; diff --git a/patch/aws-config/src/meta/region.rs b/patch/aws-config/src/meta/region.rs new file mode 100644 index 0000000000000..5e80b55222e45 --- /dev/null +++ b/patch/aws-config/src/meta/region.rs @@ -0,0 +1,184 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Region providers that augment existing providers with new functionality + +use aws_types::region::Region; +use std::borrow::Cow; +use std::fmt::Debug; +use tracing::Instrument; + +/// Load a region by selecting the first from a series of region providers. +/// +/// # Examples +/// +/// ```no_run +/// # fn example() { +/// use aws_types::region::Region; +/// use std::env; +/// use aws_config::meta::region::RegionProviderChain; +/// +/// // region provider that first checks the `CUSTOM_REGION` environment variable, +/// // then checks the default provider chain, then falls back to us-east-2 +/// let provider = RegionProviderChain::first_try(env::var("CUSTOM_REGION").ok().map(Region::new)) +/// .or_default_provider() +/// .or_else(Region::new("us-east-2")); +/// # } +/// ``` +#[derive(Debug)] +pub struct RegionProviderChain { + providers: Vec>, +} + +impl RegionProviderChain { + /// Load a region from the provider chain + /// + /// The first provider to return a non-optional region will be selected + pub async fn region(&self) -> Option { + for provider in &self.providers { + if let Some(region) = provider + .region() + .instrument(tracing::info_span!("load_region", provider = ?provider)) + .await + { + return Some(region); + } + } + None + } + + /// Create a default provider chain that starts by checking this provider. + pub fn first_try(provider: impl ProvideRegion + 'static) -> Self { + RegionProviderChain { + providers: vec![Box::new(provider)], + } + } + + /// Add a fallback provider to the region provider chain. + pub fn or_else(mut self, fallback: impl ProvideRegion + 'static) -> Self { + self.providers.push(Box::new(fallback)); + self + } + + /// Create a region provider chain that starts by checking the default provider. + pub fn default_provider() -> Self { + Self::first_try(crate::default_provider::region::default_provider()) + } + + /// Fallback to the default provider + pub fn or_default_provider(mut self) -> Self { + self.providers + .push(Box::new(crate::default_provider::region::default_provider())); + self + } +} + +impl ProvideRegion for Option { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::ready(self.clone()) + } +} + +impl ProvideRegion for RegionProviderChain { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::new(RegionProviderChain::region(self)) + } +} + +/// Future wrapper returned by [`ProvideRegion`] +/// +/// Note: this module should only be used when implementing your own region providers. +pub mod future { + use std::future::Future; + use std::pin::Pin; + use std::task::{Context, Poll}; + + use aws_smithy_async::future::now_or_later::NowOrLater; + + use aws_types::region::Region; + + type BoxFuture<'a> = Pin> + Send + 'a>>; + /// Future returned by [`ProvideRegion`](super::ProvideRegion) + /// + /// - When wrapping an already loaded region, use [`ready`](ProvideRegion::ready). + /// - When wrapping an asynchronously loaded region, use [`new`](ProvideRegion::new). + pub struct ProvideRegion<'a>(NowOrLater, BoxFuture<'a>>); + impl<'a> ProvideRegion<'a> { + /// A future that wraps the given future + pub fn new(future: impl Future> + Send + 'a) -> Self { + Self(NowOrLater::new(Box::pin(future))) + } + + /// A future that resolves to a given region + pub fn ready(region: Option) -> Self { + Self(NowOrLater::ready(region)) + } + } + + impl Future for ProvideRegion<'_> { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::new(&mut self.0).poll(cx) + } + } +} + +/// Provide a [`Region`](Region) to use with AWS requests +/// +/// For most cases [`default_provider`](crate::default_provider::region::default_provider) will be the best option, implementing +/// a standard provider chain. +pub trait ProvideRegion: Send + Sync + Debug { + /// Load a region from this provider + fn region(&self) -> future::ProvideRegion; +} + +impl ProvideRegion for Region { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::ready(Some(self.clone())) + } +} + +impl<'a> ProvideRegion for &'a Region { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::ready(Some((*self).clone())) + } +} + +impl ProvideRegion for Box { + fn region(&self) -> future::ProvideRegion { + self.as_ref().region() + } +} + +impl ProvideRegion for &'static str { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::ready(Some(Region::new(Cow::Borrowed(*self)))) + } +} + +#[cfg(test)] +mod test { + use crate::meta::region::RegionProviderChain; + use aws_types::region::Region; + use futures_util::FutureExt; + + #[test] + fn provider_chain() { + let a = None; + let b = Some(Region::new("us-east-1")); + let chain = RegionProviderChain::first_try(a).or_else(b); + assert_eq!( + chain.region().now_or_never().expect("ready"), + Some(Region::new("us-east-1")) + ); + } + + #[test] + fn empty_chain() { + let chain = RegionProviderChain::first_try(None).or_else(None); + assert_eq!(chain.region().now_or_never().expect("ready"), None); + } +} diff --git a/patch/aws-config/src/parsing.rs b/patch/aws-config/src/parsing.rs new file mode 100644 index 0000000000000..bda59fdc4f4cc --- /dev/null +++ b/patch/aws-config/src/parsing.rs @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::borrow::Cow; +use std::time::Duration; + +use aws_smithy_types::timeout; + +/// Parse a given string as a [`Duration`] that will be used to set a timeout. This will return an +/// error result if the given string is negative, infinite, equal to zero, NaN, or if the string +/// can't be parsed as an `f32`. The `name` and `set_by` fields are used to provide context when an +/// error occurs +/// +/// # Example +/// +/// ```dont_run +/// # use std::time::Duration; +/// use aws_config::parsing::parse_str_as_timeout; +/// let duration = parse_str_as_timeout("8", "timeout".into(), "test_success".into()).unwrap(); +/// assert_eq!(duration, Duration::from_secs_f32(8.0)); +/// +/// // This line will panic with `InvalidTimeout { name: "timeout", reason: "timeout must not be less than or equal to zero", set_by: "test_error" }` +/// let _ = parse_str_as_timeout("-1.0", "timeout".into(), "test_error".into()).unwrap(); +/// ``` +pub(crate) fn parse_str_as_timeout( + timeout: &str, + name: Cow<'static, str>, + set_by: Cow<'static, str>, +) -> Result { + match timeout.parse::() { + Ok(timeout) if timeout <= 0.0 => Err(timeout::ConfigError::InvalidTimeout { + set_by, + name, + reason: "timeout must not be less than or equal to zero".into(), + }), + Ok(timeout) if timeout.is_nan() => Err(timeout::ConfigError::InvalidTimeout { + set_by, + name, + reason: "timeout must not be NaN".into(), + }), + Ok(timeout) if timeout.is_infinite() => Err(timeout::ConfigError::InvalidTimeout { + set_by, + name, + reason: "timeout must not be infinite".into(), + }), + Ok(timeout) => Ok(Duration::from_secs_f32(timeout)), + Err(err) => Err(timeout::ConfigError::ParseError { + set_by, + name, + source: Box::new(err), + }), + } +} + +#[cfg(test)] +mod tests { + use super::parse_str_as_timeout; + use std::time::Duration; + + #[test] + fn test_integer_timeouts_are_parseable() { + let duration = parse_str_as_timeout("8", "timeout".into(), "test".into()).unwrap(); + assert_eq!(duration, Duration::from_secs_f32(8.0)); + } + + #[test] + #[should_panic = r#"ParseError { name: "timeout", set_by: "test", source: ParseFloatError { kind: Invalid } }"#] + fn test_unparseable_timeouts_produce_an_error() { + let _ = parse_str_as_timeout( + "this sentence cant be parsed as a number", + "timeout".into(), + "test".into(), + ) + .unwrap(); + } + + #[test] + #[should_panic = r#"InvalidTimeout { name: "timeout", reason: "timeout must not be less than or equal to zero", set_by: "test" }"#] + fn test_negative_timeouts_are_invalid() { + let _ = parse_str_as_timeout("-1.0", "timeout".into(), "test".into()).unwrap(); + } + + #[test] + #[should_panic = r#"InvalidTimeout { name: "timeout", reason: "timeout must not be less than or equal to zero", set_by: "test" }"#] + fn test_setting_timeout_to_zero_is_invalid() { + let _ = parse_str_as_timeout("0", "timeout".into(), "test".into()).unwrap(); + } + + #[test] + #[should_panic = r#"InvalidTimeout { name: "timeout", reason: "timeout must not be NaN", set_by: "test" }"#] + fn test_nan_timeouts_are_invalid() { + let _ = parse_str_as_timeout("NaN", "timeout".into(), "test".into()).unwrap(); + } + + #[test] + #[should_panic = r#"InvalidTimeout { name: "timeout", reason: "timeout must not be infinite", set_by: "test" }"#] + fn test_infinite_timeouts_are_invalid() { + let _ = parse_str_as_timeout("inf", "timeout".into(), "test".into()).unwrap(); + } +} diff --git a/patch/aws-config/src/profile/app_name.rs b/patch/aws-config/src/profile/app_name.rs new file mode 100644 index 0000000000000..563fd61db2419 --- /dev/null +++ b/patch/aws-config/src/profile/app_name.rs @@ -0,0 +1,184 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load an app name from an AWS profile + +use crate::provider_config::ProviderConfig; +use aws_types::app_name::AppName; +use aws_types::os_shim_internal::{Env, Fs}; + +/// Loads an app name from a profile file +/// +/// This provider will attempt to shared AWS shared configuration and then read the +/// `sdk-ua-app-id` property from the active profile. +/// +/// # Examples +/// +/// **Loads "my-app" as the app name** +/// ```ini +/// [default] +/// sdk-ua-app-id = my-app +/// ``` +/// +/// **Loads "my-app" as the app name _if and only if_ the `AWS_PROFILE` environment variable +/// is set to `other`.** +/// ```ini +/// [profile other] +/// sdk-ua-app-id = my-app +/// ``` +/// +/// This provider is part of the [default app name provider chain](crate::default_provider::app_name). +#[derive(Debug, Default)] +pub struct ProfileFileAppNameProvider { + fs: Fs, + env: Env, + profile_override: Option, +} + +impl ProfileFileAppNameProvider { + /// Create a new [ProfileFileAppNameProvider} + /// + /// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [`Builder`]. + pub fn new() -> Self { + Self { + fs: Fs::real(), + env: Env::real(), + profile_override: None, + } + } + + /// [`Builder`] to construct a [`ProfileFileAppNameProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + /// Parses the profile config and attempts to find an app name. + pub async fn app_name(&self) -> Option { + let profile = super::parser::load(&self.fs, &self.env) + .await + .map_err(|err| tracing::warn!(err = %err, "failed to parse profile")) + .ok()?; + let selected_profile_name = self + .profile_override + .as_deref() + .unwrap_or_else(|| profile.selected_profile()); + let selected_profile = profile.get_profile(selected_profile_name)?; + selected_profile + .get("sdk-ua-app-id") + .map(|name| match AppName::new(name.to_owned()) { + Ok(app_name) => Some(app_name), + Err(err) => { + tracing::warn!(err = %err, "`sdk-ua-app-id` property in profile `{}` was invalid", selected_profile_name); + None + } + }) + .flatten() + } +} + +/// Builder for [ProfileFileAppNameProvider] +#[derive(Default)] +pub struct Builder { + config: Option, + profile_override: Option, +} + +impl Builder { + /// Override the configuration for this provider + pub fn configure(mut self, config: &ProviderConfig) -> Self { + self.config = Some(config.clone()); + self + } + + /// Override the profile name used by the [ProfileFileAppNameProvider] + pub fn profile_name(mut self, profile_name: impl Into) -> Self { + self.profile_override = Some(profile_name.into()); + self + } + + /// Build a [ProfileFileAppNameProvider] from this builder + pub fn build(self) -> ProfileFileAppNameProvider { + let conf = self.config.unwrap_or_default(); + ProfileFileAppNameProvider { + env: conf.env(), + fs: conf.fs(), + profile_override: self.profile_override, + } + } +} + +#[cfg(test)] +mod tests { + use super::ProfileFileAppNameProvider; + use crate::provider_config::ProviderConfig; + use crate::test_case::no_traffic_connector; + use aws_sdk_sts::AppName; + use aws_types::os_shim_internal::{Env, Fs}; + use tracing_test::traced_test; + + fn provider_config(config_contents: &str) -> ProviderConfig { + let fs = Fs::from_slice(&[("test_config", config_contents)]); + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "test_config")]); + ProviderConfig::empty() + .with_fs(fs) + .with_env(env) + .with_http_connector(no_traffic_connector()) + } + + fn default_provider(config_contents: &str) -> ProfileFileAppNameProvider { + ProfileFileAppNameProvider::builder() + .configure(&provider_config(config_contents)) + .build() + } + + #[tokio::test] + async fn no_app_name() { + assert_eq!(None, default_provider("[default]\n").app_name().await); + } + + #[tokio::test] + async fn app_name_default_profile() { + assert_eq!( + Some(AppName::new("test").unwrap()), + default_provider("[default]\nsdk-ua-app-id = test") + .app_name() + .await + ); + } + + #[tokio::test] + async fn app_name_other_profiles() { + let config = "\ + [default]\n\ + sdk-ua-app-id = test\n\ + \n\ + [profile other]\n\ + sdk-ua-app-id = bar\n + "; + assert_eq!( + Some(AppName::new("bar").unwrap()), + ProfileFileAppNameProvider::builder() + .profile_name("other") + .configure(&provider_config(config)) + .build() + .app_name() + .await + ); + } + + #[traced_test] + #[tokio::test] + async fn invalid_app_name() { + assert_eq!( + None, + default_provider("[default]\nsdk-ua-app-id = definitely invalid") + .app_name() + .await + ); + assert!(logs_contain( + "`sdk-ua-app-id` property in profile `default` was invalid" + )); + } +} diff --git a/patch/aws-config/src/profile/credentials.rs b/patch/aws-config/src/profile/credentials.rs new file mode 100644 index 0000000000000..9a6b4a494a593 --- /dev/null +++ b/patch/aws-config/src/profile/credentials.rs @@ -0,0 +1,496 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Profile File Based Credential Providers +//! +//! Profile file based providers combine two pieces: +//! +//! 1. Parsing and resolution of the assume role chain +//! 2. A user-modifiable hashmap of provider name to provider. +//! +//! Profile file based providers first determine the chain of providers that will be used to load +//! credentials. After determining and validating this chain, a `Vec` of providers will be created. +//! +//! Each subsequent provider will provide boostrap providers to the next provider in order to load +//! the final credentials. +//! +//! This module contains two sub modules: +//! - `repr` which contains an abstract representation of a provider chain and the logic to +//! build it from `~/.aws/credentials` and `~/.aws/config`. +//! - `exec` which contains a chain representation of providers to implement passing bootstrapped credentials +//! through a series of providers. + +use std::borrow::Cow; +use std::collections::HashMap; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::sync::Arc; + +use aws_types::credentials::{self, future, CredentialsError, ProvideCredentials}; + +use tracing::Instrument; + +use crate::profile::credentials::exec::named::NamedProviderFactory; +use crate::profile::credentials::exec::{ClientConfiguration, ProviderChain}; +use crate::profile::parser::ProfileParseError; +use crate::profile::Profile; +use crate::provider_config::ProviderConfig; + +mod exec; +mod repr; + +impl ProvideCredentials for ProfileFileCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::new(self.load_credentials().instrument(tracing::debug_span!( + "load_credentials", + provider = %"Profile" + ))) + } +} + +/// AWS Profile based credentials provider +/// +/// This credentials provider will load credentials from `~/.aws/config` and `~/.aws/credentials`. +/// The locations of these files are configurable via environment variables, see [below](#location-of-profile-files). +/// +/// Generally, this will be constructed via the default provider chain, however, it can be manually +/// constructed with the builder: +/// ```rust,no_run +/// use aws_config::profile::ProfileFileCredentialsProvider; +/// let provider = ProfileFileCredentialsProvider::builder().build(); +/// ``` +/// +/// _Note: Profile providers to not implement any caching. They will reload and reparse the profile +/// from the file system when called. See [lazy_caching](crate::meta::credentials::LazyCachingCredentialsProvider) for +/// more information about caching._ +/// +/// This provider supports several different credentials formats: +/// ### Credentials defined explicitly within the file +/// ```ini +/// [default] +/// aws_access_key_id = 123 +/// aws_secret_access_key = 456 +/// ``` +/// +/// ### Assume Role Credentials loaded from a credential source +/// ```ini +/// [default] +/// role_arn = arn:aws:iam::123456789:role/RoleA +/// credential_source = Environment +/// ``` +/// +/// NOTE: Currently only the `Environment` credential source is supported although it is possible to +/// provide custom sources: +/// ```no_run +/// use aws_types::credentials::{self, ProvideCredentials, future}; +/// use aws_config::profile::ProfileFileCredentialsProvider; +/// #[derive(Debug)] +/// struct MyCustomProvider; +/// impl MyCustomProvider { +/// async fn load_credentials(&self) -> credentials::Result { +/// todo!() +/// } +/// } +/// +/// impl ProvideCredentials for MyCustomProvider { +/// fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials where Self: 'a { +/// future::ProvideCredentials::new(self.load_credentials()) +/// } +/// } +/// # if cfg!(any(feature = "rustls", feature = "native-tls")) { +/// let provider = ProfileFileCredentialsProvider::builder() +/// .with_custom_provider("Custom", MyCustomProvider) +/// .build(); +/// } +/// ``` +/// +/// ### Assume role credentials from a source profile +/// ```ini +/// [default] +/// role_arn = arn:aws:iam::123456789:role/RoleA +/// source_profile = base +/// +/// [profile base] +/// aws_access_key_id = 123 +/// aws_secret_access_key = 456 +/// ``` +/// +/// Other more complex configurations are possible, consult `test-data/assume-role-tests.json`. +/// +/// ### Loading Credentials from SSO +/// ```ini +/// [default] +/// sso_start_url = https://example.com/start +/// sso_region = us-east-2 +/// sso_account_id = 123456789011 +/// sso_role_name = readOnly +/// region = us-west-2 +/// ``` +/// +/// SSO can also be used as a source profile for assume role chains. +/// +/// ## Location of Profile Files +/// * The location of the config file will be loaded from the `AWS_CONFIG_FILE` environment variable +/// with a fallback to `~/.aws/config` +/// * The location of the credentials file will be loaded from the `AWS_SHARED_CREDENTIALS_FILE` +/// environment variable with a fallback to `~/.aws/credentials` +/// +/// ## Home directory resolution +/// Home directory resolution is implemented to match the behavior of the CLI & Python. `~` is only +/// used for home directory resolution when it: +/// - Starts the path +/// - Is followed immediately by `/` or a platform specific separator. (On windows, `~/` and `~\` both +/// resolve to the home directory. +/// +/// When determining the home directory, the following environment variables are checked: +/// - `HOME` on all platforms +/// - `USERPROFILE` on Windows +/// - The concatenation of `HOMEDRIVE` and `HOMEPATH` on Windows (`$HOMEDRIVE$HOMEPATH`) +#[derive(Debug)] +pub struct ProfileFileCredentialsProvider { + factory: NamedProviderFactory, + client_config: ClientConfiguration, + provider_config: ProviderConfig, + profile_override: Option, +} + +impl ProfileFileCredentialsProvider { + /// Builder for this credentials provider + pub fn builder() -> Builder { + Builder::default() + } + + async fn load_credentials(&self) -> credentials::Result { + let inner_provider = build_provider_chain( + &self.provider_config, + &self.factory, + self.profile_override.as_deref(), + ) + .await + .map_err(|err| match err { + ProfileFileError::NoProfilesDefined + | ProfileFileError::ProfileDidNotContainCredentials { .. } => { + CredentialsError::not_loaded(err) + } + _ => CredentialsError::invalid_configuration(format!( + "ProfileFile provider could not be built: {}", + &err + )), + })?; + let mut creds = match inner_provider + .base() + .provide_credentials() + .instrument(tracing::debug_span!("load_base_credentials")) + .await + { + Ok(creds) => { + tracing::info!(creds = ?creds, "loaded base credentials"); + creds + } + Err(e) => { + tracing::warn!(error = %e, "failed to load base credentials"); + return Err(CredentialsError::provider_error(e)); + } + }; + for provider in inner_provider.chain().iter() { + let next_creds = provider + .credentials(creds, &self.client_config) + .instrument(tracing::debug_span!("load_assume_role", provider = ?provider)) + .await; + match next_creds { + Ok(next_creds) => { + tracing::info!(creds = ?next_creds, "loaded assume role credentials"); + creds = next_creds + } + Err(e) => { + tracing::warn!(provider = ?provider, "failed to load assume role credentials"); + return Err(CredentialsError::provider_error(e)); + } + } + } + Ok(creds) + } +} + +/// An Error building a Credential source from an AWS Profile +#[derive(Debug)] +#[non_exhaustive] +pub enum ProfileFileError { + /// The profile was not a valid AWS profile + #[non_exhaustive] + CouldNotParseProfile(ProfileParseError), + + /// No profiles existed (the profile was empty) + #[non_exhaustive] + NoProfilesDefined, + + /// The profile did not contain any credential information + #[non_exhaustive] + ProfileDidNotContainCredentials { + /// The name of the profile + profile: String, + }, + + /// The profile contained an infinite loop of `source_profile` references + #[non_exhaustive] + CredentialLoop { + /// Vec of profiles leading to the loop + profiles: Vec, + /// The next profile that caused the loop + next: String, + }, + + /// The profile was missing a credential source + #[non_exhaustive] + MissingCredentialSource { + /// The name of the profile + profile: String, + /// Error message + message: Cow<'static, str>, + }, + /// The profile contained an invalid credential source + #[non_exhaustive] + InvalidCredentialSource { + /// The name of the profile + profile: String, + /// Error message + message: Cow<'static, str>, + }, + /// The profile referred to a another profile by name that was not defined + #[non_exhaustive] + MissingProfile { + /// The name of the profile + profile: String, + /// Error message + message: Cow<'static, str>, + }, + /// The profile referred to `credential_source` that was not defined + #[non_exhaustive] + UnknownProvider { + /// The name of the provider + name: String, + }, +} + +impl ProfileFileError { + fn missing_field(profile: &Profile, field: &'static str) -> Self { + ProfileFileError::MissingProfile { + profile: profile.name().to_string(), + message: format!("`{}` was missing", field).into(), + } + } +} + +impl Display for ProfileFileError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ProfileFileError::CouldNotParseProfile(err) => { + write!(f, "could not parse profile file: {}", err) + } + ProfileFileError::CredentialLoop { profiles, next } => write!( + f, + "profile formed an infinite loop. first we loaded {:?}, \ + then attempted to reload {}", + profiles, next + ), + ProfileFileError::MissingCredentialSource { profile, message } => { + write!(f, "missing credential source in `{}`: {}", profile, message) + } + ProfileFileError::InvalidCredentialSource { profile, message } => { + write!(f, "invalid credential source in `{}`: {}", profile, message) + } + ProfileFileError::MissingProfile { profile, message } => { + write!(f, "profile `{}` was not defined: {}", profile, message) + } + ProfileFileError::UnknownProvider { name } => write!( + f, + "profile referenced `{}` provider but that provider is not supported", + name + ), + ProfileFileError::NoProfilesDefined => write!(f, "No profiles were defined"), + ProfileFileError::ProfileDidNotContainCredentials { profile } => write!( + f, + "profile `{}` did not contain credential information", + profile + ), + } + } +} + +impl Error for ProfileFileError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + ProfileFileError::CouldNotParseProfile(err) => Some(err), + _ => None, + } + } +} + +/// Builder for [`ProfileFileCredentialsProvider`] +#[derive(Default)] +pub struct Builder { + provider_config: Option, + profile_override: Option, + custom_providers: HashMap, Arc>, +} + +impl Builder { + /// Override the configuration for the [`ProfileFileCredentialsProvider`] + /// + /// # Examples + /// + /// ```no_run + /// # async fn test() { + /// use aws_config::profile::ProfileFileCredentialsProvider; + /// use aws_config::provider_config::ProviderConfig; + /// let provider = ProfileFileCredentialsProvider::builder() + /// .configure(&ProviderConfig::with_default_region().await) + /// .build(); + /// # } + /// ``` + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.provider_config = Some(provider_config.clone()); + self + } + + /// Adds a custom credential source + /// + /// # Examples + /// + /// ```no_run + /// use aws_types::credentials::{self, ProvideCredentials, future}; + /// use aws_config::profile::ProfileFileCredentialsProvider; + /// #[derive(Debug)] + /// struct MyCustomProvider; + /// impl MyCustomProvider { + /// async fn load_credentials(&self) -> credentials::Result { + /// todo!() + /// } + /// } + /// + /// impl ProvideCredentials for MyCustomProvider { + /// fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials where Self: 'a { + /// future::ProvideCredentials::new(self.load_credentials()) + /// } + /// } + /// + /// # if cfg!(any(feature = "rustls", feature = "native-tls")) { + /// let provider = ProfileFileCredentialsProvider::builder() + /// .with_custom_provider("Custom", MyCustomProvider) + /// .build(); + /// # } + /// ``` + pub fn with_custom_provider( + mut self, + name: impl Into>, + provider: impl ProvideCredentials + 'static, + ) -> Self { + self.custom_providers + .insert(name.into(), Arc::new(provider)); + self + } + + /// Override the profile name used by the [`ProfileFileCredentialsProvider`] + pub fn profile_name(mut self, profile_name: impl Into) -> Self { + self.profile_override = Some(profile_name.into()); + self + } + + /// Builds a [`ProfileFileCredentialsProvider`] + pub fn build(self) -> ProfileFileCredentialsProvider { + let build_span = tracing::debug_span!("build_profile_provider"); + let _enter = build_span.enter(); + let conf = self.provider_config.unwrap_or_default(); + let mut named_providers = self.custom_providers.clone(); + named_providers + .entry("Environment".into()) + .or_insert_with(|| { + Arc::new(crate::environment::credentials::EnvironmentVariableCredentialsProvider::new_with_env( + conf.env(), + )) + }); + + named_providers + .entry("Ec2InstanceMetadata".into()) + .or_insert_with(|| { + Arc::new( + crate::imds::credentials::ImdsCredentialsProvider::builder() + .configure(&conf) + .build(), + ) + }); + + named_providers + .entry("EcsContainer".into()) + .or_insert_with(|| { + Arc::new( + crate::ecs::EcsCredentialsProvider::builder() + .configure(&conf) + .build(), + ) + }); + let factory = exec::named::NamedProviderFactory::new(named_providers); + let core_client = conf.sts_client(); + + ProfileFileCredentialsProvider { + factory, + client_config: ClientConfiguration { + sts_client: core_client, + region: conf.region(), + }, + provider_config: conf, + profile_override: self.profile_override, + } + } +} + +async fn build_provider_chain( + provider_config: &ProviderConfig, + factory: &NamedProviderFactory, + profile_override: Option<&str>, +) -> Result { + let profile_set = super::parser::load(&provider_config.fs(), &provider_config.env()) + .await + .map_err(|err| { + tracing::warn!(err = %err, "failed to parse profile"); + ProfileFileError::CouldNotParseProfile(err) + })?; + let repr = repr::resolve_chain(&profile_set, profile_override)?; + tracing::info!(chain = ?repr, "constructed abstract provider from config file"); + exec::ProviderChain::from_repr(provider_config, repr, factory) +} + +#[cfg(test)] +mod test { + use tracing_test::traced_test; + + use crate::profile::credentials::Builder; + use crate::test_case::TestEnvironment; + + macro_rules! make_test { + ($name: ident) => { + #[traced_test] + #[tokio::test] + async fn $name() { + TestEnvironment::from_dir(concat!( + "./test-data/profile-provider/", + stringify!($name) + )) + .unwrap() + .execute(|conf| async move { Builder::default().configure(&conf).build() }) + .await + } + }; + } + + make_test!(e2e_assume_role); + make_test!(empty_config); + make_test!(retry_on_error); + make_test!(invalid_config); + make_test!(region_override); +} diff --git a/patch/aws-config/src/profile/credentials/exec.rs b/patch/aws-config/src/profile/credentials/exec.rs new file mode 100644 index 0000000000000..d56e34ed6c23a --- /dev/null +++ b/patch/aws-config/src/profile/credentials/exec.rs @@ -0,0 +1,232 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::sync::Arc; + +use aws_sdk_sts::operation::AssumeRole; +use aws_sdk_sts::{Config, Credentials}; +use aws_types::region::Region; + +use super::repr::{self, BaseProvider}; + +use crate::profile::credentials::ProfileFileError; +use crate::provider_config::ProviderConfig; +use crate::sso::{SsoConfig, SsoCredentialsProvider}; +use crate::sts; +use crate::web_identity_token::{StaticConfiguration, WebIdentityTokenCredentialsProvider}; +use aws_sdk_sts::middleware::DefaultMiddleware; +use aws_smithy_client::erase::DynConnector; +use aws_types::credentials::{self, CredentialsError, ProvideCredentials}; + +use std::fmt::Debug; + +#[derive(Debug)] +pub struct AssumeRoleProvider { + role_arn: String, + external_id: Option, + session_name: Option, +} + +#[derive(Debug)] +pub struct ClientConfiguration { + pub(crate) sts_client: aws_smithy_client::Client, + pub(crate) region: Option, +} + +impl AssumeRoleProvider { + pub async fn credentials( + &self, + input_credentials: Credentials, + client_config: &ClientConfiguration, + ) -> credentials::Result { + let config = Config::builder() + .credentials_provider(input_credentials) + .region(client_config.region.clone()) + .build(); + let session_name = &self + .session_name + .as_ref() + .cloned() + .unwrap_or_else(|| sts::util::default_session_name("assume-role-from-profile")); + let operation = AssumeRole::builder() + .role_arn(&self.role_arn) + .set_external_id(self.external_id.clone()) + .role_session_name(session_name) + .build() + .expect("operation is valid") + .make_operation(&config) + .await + .expect("valid operation"); + let assume_role_creds = client_config + .sts_client + .call(operation) + .await + .map_err(CredentialsError::provider_error)? + .credentials; + sts::util::into_credentials(assume_role_creds, "AssumeRoleProvider") + } +} + +#[derive(Debug)] +pub(super) struct ProviderChain { + base: Arc, + chain: Vec, +} + +impl ProviderChain { + pub fn base(&self) -> &dyn ProvideCredentials { + self.base.as_ref() + } + + pub fn chain(&self) -> &[AssumeRoleProvider] { + self.chain.as_slice() + } +} + +impl ProviderChain { + pub fn from_repr( + provider_config: &ProviderConfig, + repr: repr::ProfileChain, + factory: &named::NamedProviderFactory, + ) -> Result { + let base = match repr.base() { + BaseProvider::NamedSource(name) => { + factory + .provider(name) + .ok_or(ProfileFileError::UnknownProvider { + name: name.to_string(), + })? + } + BaseProvider::AccessKey(key) => Arc::new(key.clone()), + BaseProvider::WebIdentityTokenRole { + role_arn, + web_identity_token_file, + session_name, + } => { + let provider = WebIdentityTokenCredentialsProvider::builder() + .static_configuration(StaticConfiguration { + web_identity_token_file: web_identity_token_file.into(), + role_arn: role_arn.to_string(), + session_name: session_name.map(|sess| sess.to_string()).unwrap_or_else( + || sts::util::default_session_name("web-identity-token-profile"), + ), + }) + .configure(provider_config) + .build(); + Arc::new(provider) + } + BaseProvider::Sso { + sso_account_id, + sso_region, + sso_role_name, + sso_start_url, + } => { + let sso_config = SsoConfig { + account_id: sso_account_id.to_string(), + role_name: sso_role_name.to_string(), + start_url: sso_start_url.to_string(), + region: Region::new(sso_region.to_string()), + }; + Arc::new(SsoCredentialsProvider::new(provider_config, sso_config)) + } + }; + tracing::info!(base = ?repr.base(), "first credentials will be loaded from {:?}", repr.base()); + let chain = repr + .chain() + .iter() + .map(|role_arn| { + tracing::info!(role_arn = ?role_arn, "which will be used to assume a role"); + AssumeRoleProvider { + role_arn: role_arn.role_arn.into(), + external_id: role_arn.external_id.map(|id| id.into()), + session_name: role_arn.session_name.map(|id| id.into()), + } + }) + .collect(); + Ok(ProviderChain { base, chain }) + } +} + +pub mod named { + use std::collections::HashMap; + use std::sync::Arc; + + use aws_types::credentials::ProvideCredentials; + use std::borrow::Cow; + + #[derive(Debug)] + pub struct NamedProviderFactory { + providers: HashMap, Arc>, + } + + fn lower_cow(mut inp: Cow) -> Cow { + if !inp.chars().all(|c| c.is_ascii_lowercase()) { + inp.to_mut().make_ascii_lowercase(); + } + inp + } + + impl NamedProviderFactory { + pub fn new(providers: HashMap, Arc>) -> Self { + let providers = providers + .into_iter() + .map(|(k, v)| (lower_cow(k), v)) + .collect(); + Self { providers } + } + + pub fn provider(&self, name: &str) -> Option> { + self.providers.get(&lower_cow(Cow::Borrowed(name))).cloned() + } + } +} + +#[cfg(test)] +mod test { + use crate::profile::credentials::exec::named::NamedProviderFactory; + use crate::profile::credentials::exec::ProviderChain; + use crate::profile::credentials::repr::{BaseProvider, ProfileChain}; + use crate::provider_config::ProviderConfig; + use crate::test_case::no_traffic_connector; + + use aws_types::Credentials; + use std::collections::HashMap; + use std::sync::Arc; + + #[test] + fn providers_case_insensitive() { + let mut base = HashMap::new(); + base.insert( + "Environment".into(), + Arc::new(Credentials::new("key", "secret", None, None, "test")) as _, + ); + let provider = NamedProviderFactory::new(base); + assert!(provider.provider("environment").is_some()); + assert!(provider.provider("envIROnment").is_some()); + assert!(provider.provider(" envIROnment").is_none()); + assert!(provider.provider("Environment").is_some()); + } + + #[test] + fn error_on_unknown_provider() { + let factory = NamedProviderFactory::new(HashMap::new()); + let chain = ProviderChain::from_repr( + &ProviderConfig::empty().with_http_connector(no_traffic_connector()), + ProfileChain { + base: BaseProvider::NamedSource("floozle"), + chain: vec![], + }, + &factory, + ); + let err = chain.expect_err("no source by that name"); + assert!( + format!("{}", err).contains( + "profile referenced `floozle` provider but that provider is not supported" + ), + "`{}` did not match expected error", + err + ); + } +} diff --git a/patch/aws-config/src/profile/credentials/repr.rs b/patch/aws-config/src/profile/credentials/repr.rs new file mode 100644 index 0000000000000..5e7ab250b1b0d --- /dev/null +++ b/patch/aws-config/src/profile/credentials/repr.rs @@ -0,0 +1,492 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Flattened Representation of an AssumeRole chain +//! +//! Assume Role credentials in profile files can chain together credentials from multiple +//! different providers with subsequent credentials being used to configure subsequent providers. +//! +//! This module can parse and resolve the profile chain into a flattened representation with +//! 1-credential-per row (as opposed to a direct profile file representation which can combine +//! multiple actions into the same profile). + +use crate::profile::credentials::ProfileFileError; +use crate::profile::{Profile, ProfileSet}; +use aws_types::Credentials; + +/// Chain of Profile Providers +/// +/// Within a profile file, a chain of providers is produced. Starting with a base provider, +/// subsequent providers use the credentials from previous providers to perform their task. +/// +/// ProfileChain is a direct representation of the Profile. It can contain named providers +/// that don't actually have implementations. +#[derive(Debug)] +pub struct ProfileChain<'a> { + pub(crate) base: BaseProvider<'a>, + pub(crate) chain: Vec>, +} + +impl<'a> ProfileChain<'a> { + pub fn base(&self) -> &BaseProvider<'a> { + &self.base + } + + pub fn chain(&self) -> &[RoleArn<'a>] { + self.chain.as_slice() + } +} + +/// A base member of the profile chain +/// +/// Base providers do not require input credentials to provide their own credentials, +/// e.g. IMDS, ECS, Environment variables +#[derive(Debug, Clone)] +#[non_exhaustive] +pub enum BaseProvider<'a> { + /// A profile that specifies a named credential source + /// Eg: `credential_source = Ec2InstanceMetadata` + /// + /// The following profile produces two separate `ProfileProvider` rows: + /// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")` + /// 2. `RoleArn { role_arn: "...", ... } + /// ```ini + /// [profile assume-role] + /// role_arn = arn:aws:iam::123456789:role/MyRole + /// credential_source = Ec2InstanceMetadata + /// ``` + NamedSource(&'a str), + + /// A profile with explicitly configured access keys + /// + /// Example + /// ```ini + /// [profile C] + /// aws_access_key_id = abc123 + /// aws_secret_access_key = def456 + /// ``` + AccessKey(Credentials), + + WebIdentityTokenRole { + role_arn: &'a str, + web_identity_token_file: &'a str, + session_name: Option<&'a str>, + }, + + /// An SSO Provider + Sso { + sso_account_id: &'a str, + sso_region: &'a str, + sso_role_name: &'a str, + sso_start_url: &'a str, + }, +} + +/// A profile that specifies a role to assume +/// +/// A RoleArn can only be created from either a profile with `source_profile` +/// or one with `credential_source`. +#[derive(Debug)] +pub struct RoleArn<'a> { + /// Role to assume + pub role_arn: &'a str, + /// external_id parameter to pass to the assume role provider + pub external_id: Option<&'a str>, + + /// session name parameter to pass to the assume role provider + pub session_name: Option<&'a str>, +} + +/// Resolve a ProfileChain from a ProfileSet or return an error +pub fn resolve_chain<'a>( + profile_set: &'a ProfileSet, + profile_override: Option<&str>, +) -> Result, ProfileFileError> { + if profile_set.is_empty() { + return Err(ProfileFileError::NoProfilesDefined); + } + let mut source_profile_name = + profile_override.unwrap_or_else(|| profile_set.selected_profile()); + let mut visited_profiles = vec![]; + let mut chain = vec![]; + let base = loop { + // Get the next profile in the chain + let profile = profile_set.get_profile(source_profile_name).ok_or( + ProfileFileError::MissingProfile { + profile: source_profile_name.into(), + message: format!( + "could not find source profile {} referenced from {}", + source_profile_name, + visited_profiles.last().unwrap_or(&"the root profile") + ) + .into(), + }, + )?; + // If the profile we just got is one we've already seen, we're in a loop and + // need to break out with a CredentialLoop error + if visited_profiles.contains(&source_profile_name) { + return Err(ProfileFileError::CredentialLoop { + profiles: visited_profiles + .into_iter() + .map(|s| s.to_string()) + .collect(), + next: source_profile_name.to_string(), + }); + } + // otherwise, store the name of the profile in case we see it again later + visited_profiles.push(source_profile_name); + // After the first item in the chain, we will prioritize static credentials if they exist + if visited_profiles.len() > 1 { + let try_static = static_creds_from_profile(profile); + if let Ok(static_credentials) = try_static { + break BaseProvider::AccessKey(static_credentials); + } + } + + let next_profile = { + // The existence of a `role_arn` is the only signal that multiple profiles will be chained. + // We check for one here and then process the profile accordingly as either a "chain provider" + // or a "base provider" + if let Some(role_provider) = role_arn_from_profile(profile) { + let next = chain_provider(profile)?; + chain.push(role_provider); + next + } else { + break base_provider(profile).map_err(|err| { + // It's possible for base_provider to return a `ProfileFileError::ProfileDidNotContainCredentials` + // if we're still looking at the first provider we want to surface it. However, + // if we're looking at any provider after the first we want to instead return a `ProfileFileError::InvalidCredentialSource` + if visited_profiles.len() == 1 { + err + } else { + ProfileFileError::InvalidCredentialSource { + profile: profile.name().into(), + message: format!("could not load source profile: {}", err).into(), + } + } + })?; + } + }; + + match next_profile { + NextProfile::SelfReference => { + // self referential profile, don't go through the loop because it will error + // on the infinite loop check. Instead, reload this profile as a base profile + // and exit. + break base_provider(profile)?; + } + NextProfile::Named(name) => source_profile_name = name, + } + }; + chain.reverse(); + Ok(ProfileChain { base, chain }) +} + +mod role { + pub const ROLE_ARN: &str = "role_arn"; + pub const EXTERNAL_ID: &str = "external_id"; + pub const SESSION_NAME: &str = "role_session_name"; + + pub const CREDENTIAL_SOURCE: &str = "credential_source"; + pub const SOURCE_PROFILE: &str = "source_profile"; +} + +mod sso { + pub const ACCOUNT_ID: &str = "sso_account_id"; + pub const REGION: &str = "sso_region"; + pub const ROLE_NAME: &str = "sso_role_name"; + pub const START_URL: &str = "sso_start_url"; +} + +mod web_identity_token { + pub const TOKEN_FILE: &str = "web_identity_token_file"; +} + +mod static_credentials { + pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id"; + pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key"; + pub const AWS_SESSION_TOKEN: &str = "aws_session_token"; +} +const PROVIDER_NAME: &str = "ProfileFile"; + +fn base_provider(profile: &Profile) -> Result { + // the profile must define either a `CredentialsSource` or a concrete set of access keys + match profile.get(role::CREDENTIAL_SOURCE) { + Some(source) => Ok(BaseProvider::NamedSource(source)), + None => web_identity_token_from_profile(profile) + .or_else(|| sso_from_profile(profile)) + .unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))), + } +} + +enum NextProfile<'a> { + SelfReference, + Named(&'a str), +} + +fn chain_provider(profile: &Profile) -> Result { + let (source_profile, credential_source) = ( + profile.get(role::SOURCE_PROFILE), + profile.get(role::CREDENTIAL_SOURCE), + ); + match (source_profile, credential_source) { + (Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource { + profile: profile.name().to_string(), + message: "profile contained both source_profile and credential_source. \ + Only one or the other can be defined" + .into(), + }), + (None, None) => Err(ProfileFileError::InvalidCredentialSource { + profile: profile.name().to_string(), + message: + "profile must contain `source_profile` or `credential_source` but neither were defined" + .into(), + }), + (Some(source_profile), None) if source_profile == profile.name() => { + Ok(NextProfile::SelfReference) + } + (Some(source_profile), None) => Ok(NextProfile::Named(source_profile)), + // we want to loop back into this profile and pick up the credential source + (None, Some(_credential_source)) => Ok(NextProfile::SelfReference), + } +} + +fn role_arn_from_profile(profile: &Profile) -> Option { + // Web Identity Tokens are root providers, not chained roles + if profile.get(web_identity_token::TOKEN_FILE).is_some() { + return None; + } + let role_arn = profile.get(role::ROLE_ARN)?; + let session_name = profile.get(role::SESSION_NAME); + let external_id = profile.get(role::EXTERNAL_ID); + Some(RoleArn { + role_arn, + external_id, + session_name, + }) +} + +fn sso_from_profile(profile: &Profile) -> Option> { + /* + Sample: + [profile sample-profile] + sso_account_id = 012345678901 + sso_region = us-east-1 + sso_role_name = SampleRole + sso_start_url = https://d-abc123.awsapps.com/start-beta + */ + let account_id = profile.get(sso::ACCOUNT_ID); + let region = profile.get(sso::REGION); + let role_name = profile.get(sso::ROLE_NAME); + let start_url = profile.get(sso::START_URL); + if [account_id, region, role_name, start_url] + .iter() + .all(|field| field.is_none()) + { + return None; + } + let missing_field = |s| move || ProfileFileError::missing_field(profile, s); + let parse_profile = || { + let sso_account_id = account_id.ok_or_else(missing_field(sso::ACCOUNT_ID))?; + let sso_region = region.ok_or_else(missing_field(sso::REGION))?; + let sso_role_name = role_name.ok_or_else(missing_field(sso::ROLE_NAME))?; + let sso_start_url = start_url.ok_or_else(missing_field(sso::START_URL))?; + Ok(BaseProvider::Sso { + sso_account_id, + sso_region, + sso_role_name, + sso_start_url, + }) + }; + Some(parse_profile()) +} + +fn web_identity_token_from_profile( + profile: &Profile, +) -> Option> { + let session_name = profile.get(role::SESSION_NAME); + match ( + profile.get(role::ROLE_ARN), + profile.get(web_identity_token::TOKEN_FILE), + ) { + (Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole { + role_arn, + web_identity_token_file: token_file, + session_name, + })), + (None, None) => None, + (Some(_role_arn), None) => None, + (None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource { + profile: profile.name().to_string(), + message: "`web_identity_token_file` was specified but `role_arn` was missing".into(), + })), + } +} + +/// Load static credentials from a profile +/// +/// Example: +/// ```ini +/// [profile B] +/// aws_access_key_id = abc123 +/// aws_secret_access_key = def456 +/// ``` +fn static_creds_from_profile(profile: &Profile) -> Result { + use static_credentials::*; + let access_key = profile.get(AWS_ACCESS_KEY_ID); + let secret_key = profile.get(AWS_SECRET_ACCESS_KEY); + let session_token = profile.get(AWS_SESSION_TOKEN); + // If all three fields are missing return a `ProfileFileError::ProfileDidNotContainCredentials` + if let (None, None, None) = (access_key, secret_key, session_token) { + return Err(ProfileFileError::ProfileDidNotContainCredentials { + profile: profile.name().to_string(), + }); + } + // Otherwise, check to make sure the access and secret keys are defined + let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource { + profile: profile.name().to_string(), + message: "profile missing aws_access_key_id".into(), + })?; + let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource { + profile: profile.name().to_string(), + message: "profile missing aws_secret_access_key".into(), + })?; + // There might not be an active session token so we don't error out if it's missing + Ok(Credentials::new( + access_key, + secret_key, + session_token.map(|s| s.to_string()), + None, + PROVIDER_NAME, + )) +} + +#[cfg(test)] +mod tests { + use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain}; + use crate::profile::ProfileSet; + use serde::Deserialize; + use std::collections::HashMap; + use std::error::Error; + use std::fs; + + #[test] + fn run_test_cases() -> Result<(), Box> { + let test_cases: Vec = + serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?; + for test_case in test_cases { + print!("checking: {}...", test_case.docs); + check(test_case); + println!("ok") + } + Ok(()) + } + + fn check(test_case: TestCase) { + let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile); + let actual = resolve_chain(&source, None); + let expected = test_case.output; + match (expected, actual) { + (TestOutput::Error(s), Err(e)) => assert!( + format!("{}", e).contains(&s), + "expected\n{}\nto contain\n{}\n", + e, + s + ), + (TestOutput::ProfileChain(expected), Ok(actual)) => { + assert_eq!(to_test_output(actual), expected) + } + (expected, actual) => panic!( + "error/success mismatch. Expected:\n {:?}\nActual:\n {:?}", + &expected, actual + ), + } + } + + #[derive(Deserialize)] + struct TestCase { + docs: String, + input: TestInput, + output: TestOutput, + } + + #[derive(Deserialize)] + struct TestInput { + profile: HashMap>, + selected_profile: String, + } + + fn to_test_output(profile_chain: ProfileChain) -> Vec { + let mut output = vec![]; + match profile_chain.base { + BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())), + BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey { + access_key_id: creds.access_key_id().into(), + secret_access_key: creds.secret_access_key().into(), + session_token: creds.session_token().map(|tok| tok.to_string()), + }), + BaseProvider::WebIdentityTokenRole { + role_arn, + web_identity_token_file, + session_name, + } => output.push(Provider::WebIdentityToken { + role_arn: role_arn.into(), + web_identity_token_file: web_identity_token_file.into(), + role_session_name: session_name.map(|sess| sess.to_string()), + }), + BaseProvider::Sso { + sso_account_id, + sso_region, + sso_role_name, + sso_start_url, + } => output.push(Provider::Sso { + sso_account_id: sso_account_id.into(), + sso_region: sso_region.into(), + sso_role_name: sso_role_name.into(), + sso_start_url: sso_start_url.into(), + }), + }; + for role in profile_chain.chain { + output.push(Provider::AssumeRole { + role_arn: role.role_arn.into(), + external_id: role.external_id.map(ToString::to_string), + role_session_name: role.session_name.map(ToString::to_string), + }) + } + output + } + + #[derive(Deserialize, Debug, PartialEq, Eq)] + enum TestOutput { + ProfileChain(Vec), + Error(String), + } + + #[derive(Deserialize, Debug, Eq, PartialEq)] + enum Provider { + AssumeRole { + role_arn: String, + external_id: Option, + role_session_name: Option, + }, + AccessKey { + access_key_id: String, + secret_access_key: String, + session_token: Option, + }, + NamedSource(String), + WebIdentityToken { + role_arn: String, + web_identity_token_file: String, + role_session_name: Option, + }, + Sso { + sso_account_id: String, + sso_region: String, + sso_role_name: String, + sso_start_url: String, + }, + } +} diff --git a/patch/aws-config/src/profile/mod.rs b/patch/aws-config/src/profile/mod.rs new file mode 100644 index 0000000000000..fc7cb666ff3e6 --- /dev/null +++ b/patch/aws-config/src/profile/mod.rs @@ -0,0 +1,24 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load configuration from AWS Profiles +//! +//! AWS profiles are typically stored in `~/.aws/config` and `~/.aws/credentials`. For more details +//! see the [`load`](parser::load) function. + +mod parser; +#[doc(inline)] +pub use parser::{load, Profile, ProfileParseError, ProfileSet, Property}; + +pub mod app_name; +pub mod credentials; +pub mod region; +pub mod retry_config; +pub mod timeout_config; + +#[doc(inline)] +pub use credentials::ProfileFileCredentialsProvider; +#[doc(inline)] +pub use region::ProfileFileRegionProvider; diff --git a/patch/aws-config/src/profile/parser.rs b/patch/aws-config/src/profile/parser.rs new file mode 100644 index 0000000000000..df282565f277a --- /dev/null +++ b/patch/aws-config/src/profile/parser.rs @@ -0,0 +1,387 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +mod normalize; +mod parse; +mod source; + +use crate::profile::parser::parse::parse_profile_file; +use crate::profile::parser::source::{FileKind, Source}; +use aws_types::os_shim_internal::{Env, Fs}; +use std::borrow::Cow; +use std::collections::HashMap; + +pub use self::parse::ProfileParseError; + +/// Read & parse AWS config files +/// +/// Loads AWS config file from the filesystem, parses them, and converts them into a [`ProfileSet`](ProfileSet). +/// +/// Although the basic behavior is straightforward, there are number of nuances to maintain backwards +/// compatibility with other SDKs enumerated below. +/// +/// ## Location of Profile Files +/// * The location of the config file will be loaded from the `AWS_CONFIG_FILE` environment variable +/// with a fallback to `~/.aws/config` +/// * The location of the credentials file will be loaded from the `AWS_SHARED_CREDENTIALS_FILE` +/// environment variable with a fallback to `~/.aws/credentials` +/// +/// ## Home directory resolution +/// Home directory resolution is implemented to match the behavior of the CLI & Python. `~` is only +/// used for home directory resolution when it: +/// - Starts the path +/// - Is followed immediately by `/` or a platform specific separator. (On windows, `~/` and `~\` both +/// resolve to the home directory. +/// +/// When determining the home directory, the following environment variables are checked: +/// - `HOME` on all platforms +/// - `USERPROFILE` on Windows +/// - The concatenation of `HOMEDRIVE` and `HOMEPATH` on Windows (`$HOMEDRIVE$HOMEPATH`) +/// +/// ## Profile file syntax +/// +/// Profile files have a form similar to `.ini` but with a several edge cases. These behaviors exist +/// to match existing parser implementations, ensuring consistent behavior across AWS SDKs. These +/// cases fully enumerated in `test-data/profile-parser-tests.json`. +/// +/// ### The config file `~/.aws/config` +/// ```ini +/// # ~/.aws/config +/// [profile default] +/// key = value +/// +/// # profiles must begin with `profile` +/// [profile other] +/// key = value2 +/// ``` +/// +/// ### The credentials file `~/.aws/credentials` +/// The main difference is that in ~/.aws/credentials, profiles MUST NOT be prefixed with profile: +/// ```ini +/// [default] +/// aws_access_key_id = 123 +/// +/// [other] +/// aws_access_key_id = 456 +/// ``` +pub async fn load(fs: &Fs, env: &Env) -> Result { + let source = source::load(env, fs).await; + ProfileSet::parse(source) +} + +/// A top-level configuration source containing multiple named profiles +#[derive(Debug, Eq, Clone, PartialEq)] +pub struct ProfileSet { + profiles: HashMap, + selected_profile: Cow<'static, str>, +} + +impl ProfileSet { + #[doc(hidden)] + /// Create a new Profile set directly from a HashMap + /// + /// This method creates a ProfileSet directly from a hashmap with no normalization. + /// + /// ## Warning + /// + /// This is probably not what you want! In general, [`load`](load) should be used instead + /// because it will perform input normalization. However, for tests which operate on the + /// normalized profile, this method exists to facilitate easy construction of a ProfileSet + pub fn new( + profiles: HashMap>, + selected_profile: impl Into>, + ) -> Self { + let mut base = ProfileSet::empty(); + base.selected_profile = selected_profile.into(); + for (name, profile) in profiles { + base.profiles.insert( + name.clone(), + Profile::new( + name, + profile + .into_iter() + .map(|(k, v)| (k.clone(), Property::new(k, v))) + .collect(), + ), + ); + } + base + } + + /// Retrieves a key-value pair from the currently selected profile + pub fn get(&self, key: &str) -> Option<&str> { + self.profiles + .get(self.selected_profile.as_ref()) + .and_then(|profile| profile.get(key)) + } + + /// Retrieves a named profile from the profile set + pub fn get_profile(&self, profile_name: &str) -> Option<&Profile> { + self.profiles.get(profile_name) + } + + /// Returns the name of the currently selected profile + pub fn selected_profile(&self) -> &str { + self.selected_profile.as_ref() + } + + /// Returns true if no profiles are contained in this profile set + pub fn is_empty(&self) -> bool { + self.profiles.is_empty() + } + + /// Returns the names of the profiles in this profile set + pub fn profiles(&self) -> impl Iterator { + self.profiles.keys().map(String::as_ref) + } + + fn parse(source: Source) -> Result { + let mut base = ProfileSet::empty(); + base.selected_profile = source.profile; + + normalize::merge_in( + &mut base, + parse_profile_file(&source.config_file)?, + FileKind::Config, + ); + normalize::merge_in( + &mut base, + parse_profile_file(&source.credentials_file)?, + FileKind::Credentials, + ); + Ok(base) + } + + fn empty() -> Self { + Self { + profiles: Default::default(), + selected_profile: "default".into(), + } + } +} + +/// An individual configuration profile +/// +/// An AWS config may be composed of a multiple named profiles within a [`ProfileSet`](ProfileSet) +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Profile { + name: String, + properties: HashMap, +} + +impl Profile { + /// Create a new profile + pub fn new(name: String, properties: HashMap) -> Self { + Self { name, properties } + } + + /// The name of this profile + pub fn name(&self) -> &str { + &self.name + } + + /// Returns a reference to the property named `name` + pub fn get(&self, name: &str) -> Option<&str> { + self.properties.get(name).map(|prop| prop.value()) + } +} + +/// Key-Value property pair +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Property { + key: String, + value: String, +} + +impl Property { + /// Value of this property + pub fn value(&self) -> &str { + &self.value + } + + /// Name of this property + pub fn key(&self) -> &str { + &self.key + } + + /// Creates a new property + pub fn new(key: String, value: String) -> Self { + Property { key, value } + } +} + +#[cfg(test)] +mod test { + use crate::profile::parser::source::{File, Source}; + use crate::profile::ProfileSet; + use arbitrary::{Arbitrary, Unstructured}; + use serde::Deserialize; + use std::collections::HashMap; + use std::error::Error; + use std::fs; + use tracing_test::traced_test; + + /// Run all tests from `test-data/profile-parser-tests.json` + /// + /// These represent the bulk of the test cases and reach 100% coverage of the parser. + #[test] + #[traced_test] + fn run_tests() -> Result<(), Box> { + let tests = fs::read_to_string("test-data/profile-parser-tests.json")?; + let tests: ParserTests = serde_json::from_str(&tests)?; + for (i, test) in tests.tests.into_iter().enumerate() { + eprintln!("test: {}", i); + check(test); + } + Ok(()) + } + + #[test] + fn empty_source_empty_profile() { + let source = make_source(ParserInput { + config_file: Some("".to_string()), + credentials_file: Some("".to_string()), + }); + + let profile_set = ProfileSet::parse(source).expect("empty profiles are valid"); + assert!(profile_set.is_empty()); + } + + #[test] + fn profile_names_are_exposed() { + let source = make_source(ParserInput { + config_file: Some("[profile foo]\n[profile bar]".to_string()), + credentials_file: Some("".to_string()), + }); + + let profile_set = ProfileSet::parse(source).expect("profiles loaded"); + + let mut profile_names: Vec<_> = profile_set.profiles().collect(); + profile_names.sort(); + assert_eq!(profile_names, vec!["bar", "foo"]); + } + + /// Run all tests from the fuzzing corpus to validate coverage + #[test] + #[ignore] + fn run_fuzz_tests() -> Result<(), Box> { + let fuzz_corpus = fs::read_dir("fuzz/corpus/profile-parser")? + .map(|res| res.map(|entry| entry.path())) + .collect::, _>>()?; + for file in fuzz_corpus { + let raw = fs::read(file)?; + let mut unstructured = Unstructured::new(&raw); + let (conf, creds): (Option<&str>, Option<&str>) = + Arbitrary::arbitrary(&mut unstructured)?; + let profile_source = Source { + config_file: File { + path: "~/.aws/config".to_string(), + contents: conf.unwrap_or_default().to_string(), + }, + credentials_file: File { + path: "~/.aws/config".to_string(), + contents: creds.unwrap_or_default().to_string(), + }, + profile: "default".into(), + }; + // don't care if parse fails, just don't panic + let _ = ProfileSet::parse(profile_source); + } + + Ok(()) + } + + // for test comparison purposes, flatten a profile into a hashmap + fn flatten(profile: ProfileSet) -> HashMap> { + profile + .profiles + .into_iter() + .map(|(_name, profile)| { + ( + profile.name, + profile + .properties + .into_iter() + .map(|(_, prop)| (prop.key, prop.value)) + .collect(), + ) + }) + .collect() + } + + fn make_source(input: ParserInput) -> Source { + Source { + config_file: File { + path: "~/.aws/config".to_string(), + contents: input.config_file.unwrap_or_default(), + }, + credentials_file: File { + path: "~/.aws/credentials".to_string(), + contents: input.credentials_file.unwrap_or_default(), + }, + profile: "default".into(), + } + } + + // wrapper to generate nicer errors during test failure + fn check(test_case: ParserTest) { + let copy = test_case.clone(); + let parsed = ProfileSet::parse(make_source(test_case.input)); + let res = match (parsed.map(flatten), &test_case.output) { + (Ok(actual), ParserOutput::Profiles(expected)) if &actual != expected => Err(format!( + "mismatch:\nExpected: {:#?}\nActual: {:#?}", + expected, actual + )), + (Ok(_), ParserOutput::Profiles(_)) => Ok(()), + (Err(msg), ParserOutput::ErrorContaining(substr)) => { + if format!("{}", msg).contains(substr) { + Ok(()) + } else { + Err(format!("Expected {} to contain {}", msg, substr)) + } + } + (Ok(output), ParserOutput::ErrorContaining(err)) => Err(format!( + "expected an error: {} but parse succeeded:\n{:#?}", + err, output + )), + (Err(err), ParserOutput::Profiles(_expected)) => { + Err(format!("Expected to succeed but got: {}", err)) + } + }; + if let Err(e) = res { + eprintln!("Test case failed: {:#?}", copy); + eprintln!("failure: {}", e); + panic!("test failed") + } + } + + #[derive(Deserialize, Debug)] + #[serde(rename_all = "camelCase")] + struct ParserTests { + tests: Vec, + } + + #[derive(Deserialize, Debug, Clone)] + #[serde(rename_all = "camelCase")] + struct ParserTest { + _name: String, + input: ParserInput, + output: ParserOutput, + } + + #[derive(Deserialize, Debug, Clone)] + #[serde(rename_all = "camelCase")] + enum ParserOutput { + Profiles(HashMap>), + ErrorContaining(String), + } + + #[derive(Deserialize, Debug, Clone)] + #[serde(rename_all = "camelCase")] + struct ParserInput { + config_file: Option, + credentials_file: Option, + } +} diff --git a/patch/aws-config/src/profile/parser/normalize.rs b/patch/aws-config/src/profile/parser/normalize.rs new file mode 100644 index 0000000000000..4ccd9d5de12b8 --- /dev/null +++ b/patch/aws-config/src/profile/parser/normalize.rs @@ -0,0 +1,241 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::borrow::Cow; +use std::collections::HashMap; + +use crate::profile::parser::parse::{RawProfileSet, WHITESPACE}; +use crate::profile::parser::source::FileKind; +use crate::profile::{Profile, ProfileSet, Property}; + +const DEFAULT: &str = "default"; +const PROFILE_PREFIX: &str = "profile"; + +#[derive(Eq, PartialEq, Hash, Debug)] +struct ProfileName<'a> { + name: &'a str, + has_profile_prefix: bool, +} + +impl ProfileName<'_> { + fn parse(input: &str) -> ProfileName { + let input = input.trim_matches(WHITESPACE); + let (name, has_profile_prefix) = match input.strip_prefix(PROFILE_PREFIX) { + // profilefoo isn't considered as having the profile prefix + Some(stripped) if stripped.starts_with(WHITESPACE) => (stripped.trim(), true), + _ => (input, false), + }; + ProfileName { + name, + has_profile_prefix, + } + } + + /// Validate a ProfileName for a given file key + /// + /// 1. `name` must ALWAYS be a valid identifier + /// 2. For Config files, the profile must either be `default` or it must have a profile prefix + /// 3. For credentials files, the profile name MUST NOT have a profile prefix + fn valid_for(self, kind: FileKind) -> Result { + if validate_identifier(self.name).is_err() { + return Err(format!( + "profile `{}` ignored because `{}` was not a valid identifier", + &self.name, &self.name + )); + } + match (self.name, kind, self.has_profile_prefix) { + (_, FileKind::Config, true) => Ok(self), + (DEFAULT, FileKind::Config, false) => Ok(self), + (_not_default, FileKind::Config, false) => Err(format!( + "profile `{}` ignored because config profiles must be of the form `[profile ]`", + self.name + )), + (_, FileKind::Credentials, true) => Err(format!( + "profile `{}` ignored because credential profiles must NOT begin with `profile`", + self.name + )), + (_, FileKind::Credentials, false) => Ok(self), + } + } +} + +/// Normalize a raw profile into a `MergedProfile` +/// +/// This function follows the following rules, codified in the tests & the reference Java implementation +/// - When the profile is a config file, strip `profile` and trim whitespace (`profile foo` => `foo`) +/// - Profile names are validated (see `validate_profile_name`) +/// - A profile named `profile default` takes priority over a profile named `default`. +/// - Profiles with identical names are merged +pub fn merge_in(base: &mut ProfileSet, raw_profile_set: RawProfileSet, kind: FileKind) { + // parse / validate profile names + let validated_profiles = raw_profile_set + .into_iter() + .map(|(name, profile)| (ProfileName::parse(name).valid_for(kind), profile)); + + // remove invalid profiles & emit warning + // valid_profiles contains only valid profiles but it may contain `[profile default]` and `[default]` + // which must be filtered later + let valid_profiles = validated_profiles + .filter_map(|(name, profile)| match name { + Ok(profile_name) => Some((profile_name, profile)), + Err(e) => { + tracing::warn!("{}", e); + None + } + }) + .collect::>(); + // if a `[profile default]` exists then we should ignore `[default]` + let ignore_unprefixed_default = valid_profiles + .iter() + .any(|(profile, _)| profile.name == DEFAULT && profile.has_profile_prefix); + + for (profile_name, raw_profile) in valid_profiles { + // When normalizing profiles, profiles should be merged. However, `[profile default]` and + // `[default]` are considered two separate profiles. Furthermore, `[profile default]` fully + // replaces any contents of `[default]`! + if ignore_unprefixed_default + && profile_name.name == DEFAULT + && !profile_name.has_profile_prefix + { + tracing::warn!("profile `default` ignored because `[profile default]` was found which takes priority"); + continue; + } + let profile = base + .profiles + .entry(profile_name.name.to_string()) + .or_insert_with(|| Profile::new(profile_name.name.to_string(), Default::default())); + merge_into_base(profile, raw_profile) + } +} + +fn merge_into_base<'a>(target: &mut Profile, profile: HashMap<&str, Cow<'a, str>>) { + for (k, v) in profile { + match validate_identifier(k) { + Ok(k) => { + target + .properties + .insert(k.to_owned(), Property::new(k.to_owned(), v.into())); + } + Err(_) => { + tracing::warn!(profile = %&target.name, key = ?k, "key ignored because `{}` was not a valid identifier", k); + } + } + } +} + +/// Validate that a string is a valid identifier +/// +/// Identifiers must match `[A-Za-z0-9_\-/.%@:\+]+` +fn validate_identifier(input: &str) -> Result<&str, ()> { + input + .chars() + .all(|ch| { + ch.is_ascii_alphanumeric() + || ['_', '-', '/', '.', '%', '@', ':', '+'] + .iter() + .any(|c| *c == ch) + }) + .then(|| input) + .ok_or(()) +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use tracing_test::traced_test; + + use crate::profile::parser::parse::RawProfileSet; + use crate::profile::parser::source::FileKind; + use crate::profile::ProfileSet; + + use super::{merge_in, ProfileName}; + use crate::profile::parser::normalize::validate_identifier; + + #[test] + fn profile_name_parsing() { + assert_eq!( + ProfileName::parse("profile name"), + ProfileName { + name: "name", + has_profile_prefix: true + } + ); + assert_eq!( + ProfileName::parse("name"), + ProfileName { + name: "name", + has_profile_prefix: false + } + ); + assert_eq!( + ProfileName::parse("profile\tname"), + ProfileName { + name: "name", + has_profile_prefix: true + } + ); + assert_eq!( + ProfileName::parse("profile name "), + ProfileName { + name: "name", + has_profile_prefix: true + } + ); + assert_eq!( + ProfileName::parse("profilename"), + ProfileName { + name: "profilename", + has_profile_prefix: false + } + ); + assert_eq!( + ProfileName::parse(" whitespace "), + ProfileName { + name: "whitespace", + has_profile_prefix: false + } + ); + } + + #[test] + fn test_validate_identifier() { + assert_eq!( + Ok("some-thing:long/the_one%only.foo@bar+"), + validate_identifier("some-thing:long/the_one%only.foo@bar+") + ); + assert_eq!(Err(()), validate_identifier("foo!bar")); + } + + #[test] + #[traced_test] + fn ignored_key_generates_warning() { + let mut profile: RawProfileSet = HashMap::new(); + profile.insert("default", { + let mut out = HashMap::new(); + out.insert("invalid key", "value".into()); + out + }); + let mut base = ProfileSet::empty(); + merge_in(&mut base, profile, FileKind::Config); + assert!(base + .get_profile("default") + .expect("contains default profile") + .properties + .is_empty()); + assert!(logs_contain( + "key ignored because `invalid key` was not a valid identifier" + )); + } + + #[test] + #[traced_test] + fn invalid_profile_generates_warning() { + let mut profile: RawProfileSet = HashMap::new(); + profile.insert("foo", HashMap::new()); + merge_in(&mut ProfileSet::empty(), profile, FileKind::Config); + assert!(logs_contain("profile `foo` ignored")); + } +} diff --git a/patch/aws-config/src/profile/parser/parse.rs b/patch/aws-config/src/profile/parser/parse.rs new file mode 100644 index 0000000000000..bb4fccd6eb99e --- /dev/null +++ b/patch/aws-config/src/profile/parser/parse.rs @@ -0,0 +1,346 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Profile file parsing +//! +//! This file implements profile file parsing at a very literal level. Prior to actually being used, +//! profiles must be normalized into a canonical form. Constructions that will eventually be +//! deemed invalid are accepted during parsing such as: +//! - keys that are invalid identifiers: `a b = c` +//! - profiles with invalid names +//! - profile name normalization (`profile foo` => `foo`) + +use crate::profile::parser::source::File; +use std::borrow::Cow; +use std::collections::HashMap; +use std::error::Error; +use std::fmt::{self, Display, Formatter}; + +/// A set of profiles that still carries a reference to the underlying data +pub type RawProfileSet<'a> = HashMap<&'a str, HashMap<&'a str, Cow<'a, str>>>; + +/// Characters considered to be whitespace by the spec +/// +/// Profile parsing is actually quite strict about what is and is not whitespace, so use this instead +/// of `.is_whitespace()` / `.trim()` +pub const WHITESPACE: &[char] = &[' ', '\t']; +const COMMENT: &[char] = &['#', ';']; + +/// Location for use during error reporting +#[derive(Clone, Debug, Eq, PartialEq)] +struct Location { + line_number: usize, + path: String, +} + +/// An error encountered while parsing a profile +#[derive(Debug, Clone)] +pub struct ProfileParseError { + /// Location where this error occurred + location: Location, + + /// Error message + message: String, +} + +impl Display for ProfileParseError { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "error parsing {} on line {}:\n {}", + self.location.path, self.location.line_number, self.message + ) + } +} + +impl Error for ProfileParseError {} + +/// Validate that a line represents a valid subproperty +/// +/// - Subproperties looks like regular properties (`k=v`) that are nested within an existing property. +/// - Subproperties must be validated for compatibility with other SDKs, but they are not actually +/// parsed into structured data. +fn validate_subproperty(value: &str, location: Location) -> Result<(), ProfileParseError> { + if value.trim_matches(WHITESPACE).is_empty() { + Ok(()) + } else { + parse_property_line(value) + .map_err(|err| err.into_error("sub-property", location)) + .map(|_| ()) + } +} + +fn is_empty_line(line: &str) -> bool { + line.trim_matches(WHITESPACE).is_empty() +} + +fn is_comment_line(line: &str) -> bool { + line.starts_with(COMMENT) +} + +/// Parser for profile files +struct Parser<'a> { + /// In-progress profile representation + data: RawProfileSet<'a>, + + /// Parser state + state: State<'a>, + + /// Parser source location + /// + /// Location is tracked to facilitate error reporting + location: Location, +} + +enum State<'a> { + Starting, + ReadingProfile { + profile: &'a str, + property: Option<&'a str>, + is_subproperty: bool, + }, +} + +/// Parse `file` into a `RawProfileSet` +pub fn parse_profile_file(file: &File) -> Result { + let mut parser = Parser { + data: HashMap::new(), + state: State::Starting, + location: Location { + line_number: 0, + path: file.path.to_string(), + }, + }; + parser.parse_profile(&file.contents)?; + Ok(parser.data) +} + +impl<'a> Parser<'a> { + /// Parse `file` containing profile data into `self.data`. + fn parse_profile(&mut self, file: &'a str) -> Result<(), ProfileParseError> { + for (line_number, line) in file.lines().enumerate() { + self.location.line_number = line_number + 1; // store a 1-indexed line number + if is_empty_line(line) || is_comment_line(line) { + continue; + } + if line.starts_with('[') { + self.read_profile_line(line)?; + } else if line.starts_with(WHITESPACE) { + self.read_property_continuation(line)?; + } else { + self.read_property_line(line)?; + } + } + Ok(()) + } + + /// Parse a property line like `a = b` + /// + /// A property line is only valid when we're within a profile definition, `[profile foo]` + fn read_property_line(&mut self, line: &'a str) -> Result<(), ProfileParseError> { + let location = &self.location; + let (current_profile, name) = match &self.state { + State::Starting => return Err(self.make_error("Expected a profile definition")), + State::ReadingProfile { profile, .. } => ( + self.data.get_mut(*profile).expect("profile must exist"), + profile, + ), + }; + let (k, v) = parse_property_line(line) + .map_err(|err| err.into_error("property", location.clone()))?; + self.state = State::ReadingProfile { + profile: name, + property: Some(k), + is_subproperty: v.is_empty(), + }; + current_profile.insert(k, v.into()); + Ok(()) + } + + /// Create a location-tagged error message + fn make_error(&self, message: &str) -> ProfileParseError { + ProfileParseError { + location: self.location.clone(), + message: message.into(), + } + } + + /// Parse the lines of a property after the first line. + /// + /// This is triggered by lines that start with whitespace. + fn read_property_continuation(&mut self, line: &'a str) -> Result<(), ProfileParseError> { + let current_property = match &self.state { + State::Starting => return Err(self.make_error("Expected a profile definition")), + State::ReadingProfile { + profile, + property: Some(property), + is_subproperty, + } => { + if *is_subproperty { + validate_subproperty(line, self.location.clone())?; + } + self.data + .get_mut(*profile) + .expect("profile must exist") + .get_mut(*property) + .expect("property must exist") + } + State::ReadingProfile { + profile: _, + property: None, + .. + } => return Err(self.make_error("Expected a property definition, found continuation")), + }; + let line = line.trim_matches(WHITESPACE); + let current_property = current_property.to_mut(); + current_property.push('\n'); + current_property.push_str(line); + Ok(()) + } + + fn read_profile_line(&mut self, line: &'a str) -> Result<(), ProfileParseError> { + let line = prepare_line(line, false); + let profile_name = line + .strip_prefix('[') + .ok_or_else(|| self.make_error("Profile definition must start with ]"))? + .strip_suffix(']') + .ok_or_else(|| self.make_error("Profile definition must end with ']'"))?; + if !self.data.contains_key(profile_name) { + self.data.insert(profile_name, Default::default()); + } + self.state = State::ReadingProfile { + profile: profile_name, + property: None, + is_subproperty: false, + }; + Ok(()) + } +} + +/// Error encountered while parsing a property +#[derive(Debug, Eq, PartialEq)] +enum PropertyError { + NoEquals, + NoName, +} + +impl PropertyError { + fn into_error(self, ctx: &str, location: Location) -> ProfileParseError { + let mut ctx = ctx.to_string(); + match self { + PropertyError::NoName => { + ctx.get_mut(0..1).unwrap().make_ascii_uppercase(); + ProfileParseError { + location, + message: format!("{} did not have a name", ctx), + } + } + PropertyError::NoEquals => ProfileParseError { + location, + message: format!("Expected an '=' sign defining a {}", ctx), + }, + } + } +} + +/// Parse a property line into a key-value pair +fn parse_property_line(line: &str) -> Result<(&str, &str), PropertyError> { + let line = prepare_line(line, true); + let (k, v) = line.split_once('=').ok_or(PropertyError::NoEquals)?; + let k = k.trim_matches(WHITESPACE); + let v = v.trim_matches(WHITESPACE); + if k.is_empty() { + return Err(PropertyError::NoName); + } + Ok((k, v)) +} + +/// Prepare a line for parsing +/// +/// Because leading whitespace is significant, this method should only be called after determining +/// whether a line represents a property (no whitespace) or a sub-property (whitespace). +/// This function preprocesses a line to simplify parsing: +/// 1. Strip leading and trailing whitespace +/// 2. Remove trailing comments +/// +/// Depending on context, comment characters may need to be preceded by whitespace to be considered +/// comments. +fn prepare_line(line: &str, comments_need_whitespace: bool) -> &str { + let line = line.trim_matches(WHITESPACE); + let mut prev_char_whitespace = false; + let mut comment_idx = None; + for (idx, chr) in line.char_indices() { + if (COMMENT.contains(&chr)) && (prev_char_whitespace || !comments_need_whitespace) { + comment_idx = Some(idx); + break; + } + prev_char_whitespace = chr.is_whitespace(); + } + comment_idx + .map(|idx| &line[..idx]) + .unwrap_or(line) + // trimming the comment might result in more whitespace that needs to be handled + .trim_matches(WHITESPACE) +} + +#[cfg(test)] +mod test { + use super::{parse_profile_file, prepare_line, Location}; + use crate::profile::parser::parse::{parse_property_line, PropertyError}; + use crate::profile::parser::source::File; + + // most test cases covered by the JSON test suite + + #[test] + fn property_parsing() { + assert_eq!(parse_property_line("a = b"), Ok(("a", "b"))); + assert_eq!(parse_property_line("a=b"), Ok(("a", "b"))); + assert_eq!(parse_property_line("a = b "), Ok(("a", "b"))); + assert_eq!(parse_property_line(" a = b "), Ok(("a", "b"))); + assert_eq!(parse_property_line(" a = b 🐱 "), Ok(("a", "b 🐱"))); + assert_eq!(parse_property_line("a b"), Err(PropertyError::NoEquals)); + assert_eq!(parse_property_line("= b"), Err(PropertyError::NoName)); + assert_eq!(parse_property_line("a = "), Ok(("a", ""))); + assert_eq!( + parse_property_line("something_base64=aGVsbG8gZW50aHVzaWFzdGljIHJlYWRlcg=="), + Ok(("something_base64", "aGVsbG8gZW50aHVzaWFzdGljIHJlYWRlcg==")) + ); + } + + #[test] + fn prepare_line_strips_comments() { + assert_eq!( + prepare_line("name = value # Comment with # sign", true), + "name = value" + ); + + assert_eq!( + prepare_line("name = value#Comment # sign", true), + "name = value#Comment" + ); + + assert_eq!( + prepare_line("name = value#Comment # sign", false), + "name = value" + ); + } + + #[test] + fn error_line_numbers() { + let file = File { + path: "~/.aws/config".into(), + contents: "[default\nk=v".into(), + }; + let err = parse_profile_file(&file).expect_err("parsing should fail"); + assert_eq!(err.message, "Profile definition must end with ']'"); + assert_eq!( + err.location, + Location { + path: "~/.aws/config".into(), + line_number: 1 + } + ) + } +} diff --git a/patch/aws-config/src/profile/parser/source.rs b/patch/aws-config/src/profile/parser/source.rs new file mode 100644 index 0000000000000..8bfcfb4584090 --- /dev/null +++ b/patch/aws-config/src/profile/parser/source.rs @@ -0,0 +1,349 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use crate::fs_util::{home_dir, Os}; +use aws_types::os_shim_internal; +use std::borrow::Cow; +use std::io::ErrorKind; +use std::path::{Component, Path, PathBuf}; +use tracing::Instrument; + +/// In-memory source of profile data +pub struct Source { + /// Contents and path of ~/.aws/config + pub config_file: File, + + /// Contents and path of ~/.aws/credentials + pub credentials_file: File, + + /// Profile to use + /// + /// Overridden via `$AWS_PROFILE`, defaults to `default` + pub profile: Cow<'static, str>, +} + +/// In-memory configuration file +pub struct File { + pub path: String, + pub contents: String, +} + +#[derive(Clone, Copy)] +pub enum FileKind { + Config, + Credentials, +} + +impl FileKind { + fn default_path(&self) -> &'static str { + match &self { + FileKind::Credentials => "~/.aws/credentials", + FileKind::Config => "~/.aws/config", + } + } + + fn override_environment_variable(&self) -> &'static str { + match &self { + FileKind::Config => "AWS_CONFIG_FILE", + FileKind::Credentials => "AWS_SHARED_CREDENTIALS_FILE", + } + } +} + +/// Load a [Source](Source) from a given environment and filesystem. +pub async fn load(proc_env: &os_shim_internal::Env, fs: &os_shim_internal::Fs) -> Source { + let home = home_dir(proc_env, Os::real()); + let config = load_config_file(FileKind::Config, &home, fs, proc_env) + .instrument(tracing::debug_span!("load_config_file")) + .await; + let credentials = load_config_file(FileKind::Credentials, &home, fs, proc_env) + .instrument(tracing::debug_span!("load_credentials_file")) + .await; + + Source { + config_file: config, + credentials_file: credentials, + profile: proc_env + .get("AWS_PROFILE") + .map(Cow::Owned) + .unwrap_or(Cow::Borrowed("default")), + } +} + +/// Loads an AWS Config file +/// +/// Both the default & the overriding patterns may contain `~/` which MUST be expanded to the users +/// home directory in a platform-aware way (see [`expand_home`](expand_home)) +/// +/// Arguments: +/// * `kind`: The type of config file to load +/// * `home_directory`: Home directory to use during home directory expansion +/// * `fs`: Filesystem abstraction +/// * `environment`: Process environment abstraction +async fn load_config_file( + kind: FileKind, + home_directory: &Option, + fs: &os_shim_internal::Fs, + environment: &os_shim_internal::Env, +) -> File { + let path = environment + .get(kind.override_environment_variable()) + .map(Cow::Owned) + .ok() + .unwrap_or_else(|| kind.default_path().into()); + let expanded = expand_home(path.as_ref(), home_directory, environment); + if path != expanded.to_string_lossy() { + tracing::debug!(before = ?path, after = ?expanded, "home directory expanded"); + } + // read the data at the specified path + // if the path does not exist, log a warning but pretend it was actually an empty file + let data = match fs.read_to_end(&expanded).await { + Ok(data) => data, + Err(e) => { + match e.kind() { + ErrorKind::NotFound if path == kind.default_path() => { + tracing::debug!(path = %path, "config file not found") + } + ErrorKind::NotFound if path != kind.default_path() => { + // in the case where the user overrode the path with an environment variable, + // log more loudly than the case where the default path was missing + tracing::warn!(path = %path, env = %kind.override_environment_variable(), "config file overridden via environment variable not found") + } + _other => tracing::warn!(path = %path, error = %e, "failed to read config file"), + }; + Default::default() + } + }; + // if the file is not valid utf-8, log a warning and use an empty file instead + let data = match String::from_utf8(data) { + Ok(data) => data, + Err(e) => { + tracing::warn!(path = %path, error = %e, "config file did not contain utf-8 encoded data"); + Default::default() + } + }; + tracing::debug!(path = %path, size = ?data.len(), "config file loaded"); + File { + // lossy is OK here, the name of this file is just for debugging purposes + path: expanded.to_string_lossy().into(), + contents: data, + } +} + +fn expand_home( + path: impl AsRef, + home_dir: &Option, + environment: &os_shim_internal::Env, +) -> PathBuf { + let path = path.as_ref(); + let mut components = path.components(); + let start = components.next(); + match start { + None => path.into(), // empty path, + Some(Component::Normal(s)) if s == "~" => { + // do homedir replacement + let path = match home_dir { + Some(dir) => { + tracing::debug!(home = ?dir, path = ?path, "performing home directory substitution"); + dir.clone() + } + None => { + // Lambdas don't have home directories and emitting this warning is not helpful + // to users running the SDK from within a Lambda. This warning will be silenced + // if we determine that that is the case. + let is_likely_running_on_a_lambda = + check_is_likely_running_on_a_lambda(environment); + if !is_likely_running_on_a_lambda { + tracing::warn!( + "could not determine home directory but home expansion was requested" + ); + } + // if we can't determine the home directory, just leave it as `~` + "~".into() + } + }; + let mut path: PathBuf = path.into(); + // rewrite the path using system-specific path separators + for component in components { + path.push(component); + } + path + } + // Finally, handle the case where it doesn't begin with some version of `~/`: + // NOTE: in this case we aren't performing path rewriting. This is correct because + // this path comes from an environment variable on the target + // platform, so in that case, the separators should already be correct. + _other => path.into(), + } +} + +/// Returns true or false based on whether or not this code is likely running inside an AWS Lambda. +/// [Lambdas set many environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime) +/// that we can check. +fn check_is_likely_running_on_a_lambda(environment: &aws_types::os_shim_internal::Env) -> bool { + // LAMBDA_TASK_ROOT – The path to your Lambda function code. + environment.get("LAMBDA_TASK_ROOT").is_ok() +} + +#[cfg(test)] +mod tests { + use crate::profile::parser::source::{expand_home, load, load_config_file, FileKind}; + use aws_types::os_shim_internal::{Env, Fs}; + use serde::Deserialize; + use std::collections::HashMap; + use std::error::Error; + use std::fs; + + #[test] + fn only_expand_home_prefix() { + // ~ is only expanded as a single component (currently) + let path = "~aws/config"; + let environment = Env::from_slice(&[]); + assert_eq!( + expand_home(&path, &None, &environment).to_str().unwrap(), + "~aws/config" + ); + } + + #[derive(Deserialize, Debug)] + #[serde(rename_all = "camelCase")] + struct SourceTests { + tests: Vec, + } + + #[derive(Deserialize, Debug)] + #[serde(rename_all = "camelCase")] + struct TestCase { + name: String, + environment: HashMap, + platform: String, + profile: Option, + config_location: String, + credentials_location: String, + } + + /// Run all tests from file-location-tests.json + #[test] + fn run_tests() -> Result<(), Box> { + let tests = fs::read_to_string("test-data/file-location-tests.json")?; + let tests: SourceTests = serde_json::from_str(&tests)?; + for (i, test) in tests.tests.into_iter().enumerate() { + eprintln!("test: {}", i); + check(test) + .now_or_never() + .expect("these futures should never poll"); + } + Ok(()) + } + + use futures_util::FutureExt; + use tracing_test::traced_test; + + #[traced_test] + #[test] + fn logs_produced_default() { + let env = Env::from_slice(&[("HOME", "/user/name")]); + let mut fs = HashMap::new(); + fs.insert( + "/user/name/.aws/config".to_string(), + "[default]\nregion = us-east-1", + ); + + let fs = Fs::from_map(fs); + + let _src = load(&env, &fs).now_or_never(); + assert!(logs_contain("config file loaded")); + assert!(logs_contain("performing home directory substitution")); + } + + #[traced_test] + #[test] + fn load_config_file_should_not_emit_warning_on_lambda() { + let env = Env::from_slice(&[("LAMBDA_TASK_ROOT", "/")]); + let fs = Fs::from_slice(&[]); + + let _src = load_config_file(FileKind::Config, &None, &fs, &env).now_or_never(); + assert!(!logs_contain( + "could not determine home directory but home expansion was requested" + )); + } + + async fn check(test_case: TestCase) { + let fs = Fs::real(); + let env = Env::from(test_case.environment); + let platform_matches = (cfg!(windows) && test_case.platform == "windows") + || (!cfg!(windows) && test_case.platform != "windows"); + if platform_matches { + let source = load(&env, &fs).await; + if let Some(expected_profile) = test_case.profile { + assert_eq!(source.profile, expected_profile, "{}", &test_case.name); + } + assert_eq!( + source.config_file.path, test_case.config_location, + "{}", + &test_case.name + ); + assert_eq!( + source.credentials_file.path, test_case.credentials_location, + "{}", + &test_case.name + ) + } else { + println!( + "NOTE: ignoring test case for {} which does not apply to our platform: \n {}", + &test_case.platform, &test_case.name + ) + } + } + + #[test] + #[cfg_attr(windows, ignore)] + fn test_expand_home() { + let path = "~/.aws/config"; + let environment = Env::from_slice(&[]); + assert_eq!( + expand_home(&path, &Some("/user/foo".to_string()), &environment) + .to_str() + .unwrap(), + "/user/foo/.aws/config" + ); + } + + #[test] + fn expand_home_no_home() { + let environment = Env::from_slice(&[]); + // there is an edge case around expansion when no home directory exists + // if no home directory can be determined, leave the path as is + if !cfg!(windows) { + assert_eq!( + expand_home("~/config", &None, &environment) + .to_str() + .unwrap(), + "~/config" + ) + } else { + assert_eq!( + expand_home("~/config", &None, &environment) + .to_str() + .unwrap(), + "~\\config" + ) + } + } + + /// Test that a linux oriented path expands on windows + #[test] + #[cfg_attr(not(windows), ignore)] + fn test_expand_home_windows() { + let path = "~/.aws/config"; + let environment = Env::from_slice(&[]); + assert_eq!( + expand_home(&path, &Some("C:\\Users\\name".to_string()), &environment) + .to_str() + .unwrap(), + "C:\\Users\\name\\.aws\\config" + ); + } +} diff --git a/patch/aws-config/src/profile/region.rs b/patch/aws-config/src/profile/region.rs new file mode 100644 index 0000000000000..832835cec438b --- /dev/null +++ b/patch/aws-config/src/profile/region.rs @@ -0,0 +1,260 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load a region from an AWS profile + +use crate::meta::region::{future, ProvideRegion}; +use crate::provider_config::ProviderConfig; +use aws_types::os_shim_internal::{Env, Fs}; +use aws_types::region::Region; + +use super::ProfileSet; + +/// Load a region from a profile file +/// +/// This provider will attempt to load AWS shared configuration, then read the `region` property +/// from the active profile. +/// +/// # Examples +/// +/// **Loads "us-west-2" as the region** +/// ```ini +/// [default] +/// region = us-west-2 +/// ``` +/// +/// **Loads `us-east-1` as the region _if and only if_ the `AWS_PROFILE` environment variable is set +/// to `other`.** +/// +/// ```ini +/// [profile other] +/// region = us-east-1 +/// ``` +/// +/// This provider is part of the [default region provider chain](crate::default_provider::region). +#[derive(Debug, Default)] +pub struct ProfileFileRegionProvider { + fs: Fs, + env: Env, + profile_override: Option, +} + +/// Builder for [ProfileFileRegionProvider] +#[derive(Default)] +pub struct Builder { + config: Option, + profile_override: Option, +} + +impl Builder { + /// Override the configuration for this provider + pub fn configure(mut self, config: &ProviderConfig) -> Self { + self.config = Some(config.clone()); + self + } + + /// Override the profile name used by the [ProfileFileRegionProvider] + pub fn profile_name(mut self, profile_name: impl Into) -> Self { + self.profile_override = Some(profile_name.into()); + self + } + + /// Build a [ProfileFileRegionProvider] from this builder + pub fn build(self) -> ProfileFileRegionProvider { + let conf = self.config.unwrap_or_default(); + ProfileFileRegionProvider { + env: conf.env(), + fs: conf.fs(), + profile_override: self.profile_override, + } + } +} + +impl ProfileFileRegionProvider { + /// Create a new [ProfileFileRegionProvider] + /// + /// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [`Builder`]. + pub fn new() -> Self { + Self { + fs: Fs::real(), + env: Env::real(), + profile_override: None, + } + } + + /// [`Builder`] to construct a [`ProfileFileRegionProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + async fn region(&self) -> Option { + let profile_set = super::parser::load(&self.fs, &self.env) + .await + .map_err(|err| tracing::warn!(err = %err, "failed to parse profile")) + .ok()?; + + resolve_profile_chain_for_region(&profile_set, self.profile_override.as_deref()) + } +} + +fn resolve_profile_chain_for_region( + profile_set: &'_ ProfileSet, + profile_override: Option<&str>, +) -> Option { + if profile_set.is_empty() { + return None; + } + + let mut selected_profile = profile_override.unwrap_or_else(|| profile_set.selected_profile()); + let mut visited_profiles = vec![]; + + loop { + let profile = profile_set.get_profile(selected_profile)?; + // Check to see if we're in a loop and return if that's true. + // Else, add the profile we're currently checking to our list of visited profiles. + if visited_profiles.contains(&selected_profile) { + return None; + } else { + visited_profiles.push(selected_profile); + } + + // Attempt to get region and source_profile for current profile + let selected_profile_region = profile + .get("region") + .map(|region| Region::new(region.to_owned())); + let source_profile = profile.get("source_profile"); + + // Check to see what we got + match (selected_profile_region, source_profile) { + // Profile had a region specified, return it :D + (Some(region), _) => { + return Some(region); + } + // No region specified, source_profile is self-referential so we return to avoid infinite loop + (None, Some(source_profile)) if source_profile == selected_profile => { + return None; + } + // No region specified, no source_profile specified so we return empty-handed + (None, None) => { + return None; + } + // No region specified, check source profile for a region in next loop iteration + (None, Some(source_profile)) => { + selected_profile = source_profile; + } + } + } +} + +impl ProvideRegion for ProfileFileRegionProvider { + fn region(&self) -> future::ProvideRegion { + future::ProvideRegion::new(self.region()) + } +} + +#[cfg(test)] +mod test { + use crate::profile::ProfileFileRegionProvider; + use crate::provider_config::ProviderConfig; + use crate::test_case::no_traffic_connector; + use aws_sdk_sts::Region; + use aws_types::os_shim_internal::{Env, Fs}; + use futures_util::FutureExt; + use tracing_test::traced_test; + + fn provider_config(dir_name: &str) -> ProviderConfig { + let fs = Fs::from_test_dir(format!("test-data/profile-provider/{}/fs", dir_name), "/"); + let env = Env::from_slice(&[("HOME", "/home")]); + ProviderConfig::empty() + .with_fs(fs) + .with_env(env) + .with_http_connector(no_traffic_connector()) + } + + #[traced_test] + #[test] + fn load_region() { + let provider = ProfileFileRegionProvider::builder() + .configure(&provider_config("region_override")) + .build(); + assert_eq!( + provider.region().now_or_never().unwrap(), + Some(Region::from_static("us-east-1")) + ); + } + + #[test] + fn load_region_env_profile_override() { + let conf = provider_config("region_override").with_env(Env::from_slice(&[ + ("HOME", "/home"), + ("AWS_PROFILE", "base"), + ])); + let provider = ProfileFileRegionProvider::builder() + .configure(&conf) + .build(); + assert_eq!( + provider.region().now_or_never().unwrap(), + Some(Region::from_static("us-east-1")) + ); + } + + #[test] + fn load_region_nonexistent_profile() { + let conf = provider_config("region_override").with_env(Env::from_slice(&[ + ("HOME", "/home"), + ("AWS_PROFILE", "doesnotexist"), + ])); + let provider = ProfileFileRegionProvider::builder() + .configure(&conf) + .build(); + assert_eq!(provider.region().now_or_never().unwrap(), None); + } + + #[test] + fn load_region_explicit_override() { + let conf = provider_config("region_override"); + let provider = ProfileFileRegionProvider::builder() + .configure(&conf) + .profile_name("base") + .build(); + assert_eq!( + provider.region().now_or_never().unwrap(), + Some(Region::from_static("us-east-1")) + ); + } + + #[tokio::test] + async fn load_region_from_source_profile() { + let config = r#" +[profile credentials] +aws_access_key_id = test-access-key-id +aws_secret_access_key = test-secret-access-key +aws_session_token = test-session-token +region = us-east-1 + +[profile needs-source] +source_profile = credentials +role_arn = arn:aws:iam::123456789012:role/test +"# + .trim(); + + let fs = Fs::from_slice(&[("test_config", config)]); + let env = Env::from_slice(&[("AWS_CONFIG_FILE", "test_config")]); + let provider_config = ProviderConfig::empty() + .with_fs(fs) + .with_env(env) + .with_http_connector(no_traffic_connector()); + + assert_eq!( + Some(Region::new("us-east-1")), + ProfileFileRegionProvider::builder() + .profile_name("needs-source") + .configure(&provider_config) + .build() + .region() + .await + ); + } +} diff --git a/patch/aws-config/src/profile/retry_config.rs b/patch/aws-config/src/profile/retry_config.rs new file mode 100644 index 0000000000000..2280933106ed5 --- /dev/null +++ b/patch/aws-config/src/profile/retry_config.rs @@ -0,0 +1,156 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load retry configuration properties from an AWS profile + +use std::str::FromStr; + +use aws_smithy_types::retry::{RetryConfigBuilder, RetryConfigErr, RetryMode}; +use aws_types::os_shim_internal::{Env, Fs}; + +use crate::provider_config::ProviderConfig; + +/// Load retry configuration properties from a profile file +/// +/// This provider will attempt to load AWS shared configuration, then read retry configuration properties +/// from the active profile. +/// +/// # Examples +/// +/// **Loads 2 as the `max_attempts` to make when sending a request** +/// ```ini +/// [default] +/// max_attempts = 2 +/// ``` +/// +/// **Loads `standard` as the `retry_mode` _if and only if_ the `other` profile is selected.** +/// +/// ```ini +/// [profile other] +/// retry_mode = standard +/// ``` +/// +/// This provider is part of the [default retry_config provider chain](crate::default_provider::retry_config). +#[derive(Debug, Default)] +pub struct ProfileFileRetryConfigProvider { + fs: Fs, + env: Env, + profile_override: Option, +} + +/// Builder for [ProfileFileRetryConfigProvider] +#[derive(Default)] +pub struct Builder { + config: Option, + profile_override: Option, +} + +impl Builder { + /// Override the configuration for this provider + pub fn configure(mut self, config: &ProviderConfig) -> Self { + self.config = Some(config.clone()); + self + } + + /// Override the profile name used by the [ProfileFileRetryConfigProvider] + pub fn profile_name(mut self, profile_name: impl Into) -> Self { + self.profile_override = Some(profile_name.into()); + self + } + + /// Build a [ProfileFileRetryConfigProvider] from this builder + pub fn build(self) -> ProfileFileRetryConfigProvider { + let conf = self.config.unwrap_or_default(); + ProfileFileRetryConfigProvider { + env: conf.env(), + fs: conf.fs(), + profile_override: self.profile_override, + } + } +} + +impl ProfileFileRetryConfigProvider { + /// Create a new [ProfileFileRetryConfigProvider] + /// + /// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [Builder]. + pub fn new() -> Self { + Self { + fs: Fs::real(), + env: Env::real(), + profile_override: None, + } + } + + /// [Builder] to construct a [ProfileFileRetryConfigProvider] + pub fn builder() -> Builder { + Builder::default() + } + + /// Attempt to create a new RetryConfigBuilder from a profile file. + pub async fn retry_config_builder(&self) -> Result { + let profile = match super::parser::load(&self.fs, &self.env).await { + Ok(profile) => profile, + Err(err) => { + tracing::warn!(err = %err, "failed to parse profile"); + // return an empty builder + return Ok(RetryConfigBuilder::new()); + } + }; + + let selected_profile = self + .profile_override + .as_deref() + .unwrap_or_else(|| profile.selected_profile()); + let selected_profile = match profile.get_profile(selected_profile) { + Some(profile) => profile, + None => { + // Only warn if the user specified a profile name to use. + if self.profile_override.is_some() { + tracing::warn!("failed to get selected '{}' profile", selected_profile); + } + // return an empty builder + return Ok(RetryConfigBuilder::new()); + } + }; + + let max_attempts = match selected_profile.get("max_attempts") { + Some(max_attempts) => match max_attempts.parse::() { + Ok(max_attempts) if max_attempts == 0 => { + return Err(RetryConfigErr::MaxAttemptsMustNotBeZero { + set_by: "aws profile".into(), + }); + } + Ok(max_attempts) => Some(max_attempts), + Err(source) => { + return Err(RetryConfigErr::FailedToParseMaxAttempts { + set_by: "aws profile".into(), + source, + }); + } + }, + None => None, + }; + + let retry_mode = match selected_profile.get("retry_mode") { + Some(retry_mode) => match RetryMode::from_str(retry_mode) { + Ok(retry_mode) => Some(retry_mode), + Err(retry_mode_err) => { + return Err(RetryConfigErr::InvalidRetryMode { + set_by: "aws profile".into(), + source: retry_mode_err, + }); + } + }, + None => None, + }; + + let mut retry_config_builder = RetryConfigBuilder::new(); + retry_config_builder + .set_max_attempts(max_attempts) + .set_mode(retry_mode); + + Ok(retry_config_builder) + } +} diff --git a/patch/aws-config/src/profile/timeout_config.rs b/patch/aws-config/src/profile/timeout_config.rs new file mode 100644 index 0000000000000..3ce69eac9be1e --- /dev/null +++ b/patch/aws-config/src/profile/timeout_config.rs @@ -0,0 +1,183 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load timeout configuration properties from an AWS profile + +use crate::parsing::parse_str_as_timeout; +use crate::profile::Profile; +use crate::provider_config::ProviderConfig; + +use aws_smithy_types::timeout; +use aws_smithy_types::tristate::TriState; +use aws_types::os_shim_internal::{Env, Fs}; + +use std::time::Duration; + +// Currently unsupported timeouts +const PROFILE_VAR_CONNECT_TIMEOUT: &str = "connect_timeout"; +const PROFILE_VAR_TLS_NEGOTIATION_TIMEOUT: &str = "tls_negotiation_timeout"; +const PROFILE_VAR_READ_TIMEOUT: &str = "read_timeout"; + +// Supported timeouts +const PROFILE_VAR_API_CALL_ATTEMPT_TIMEOUT: &str = "api_call_attempt_timeout"; +const PROFILE_VAR_API_CALL_TIMEOUT: &str = "api_call_timeout"; + +/// Load timeout configuration properties from a profile file +/// +/// This provider will attempt to load AWS shared configuration, then read timeout configuration +/// properties from the active profile. Timeout values represent the number of seconds before timing +/// out and must be non-negative floats or integers. NaN and infinity are also invalid. If at least +/// one of these values is valid, construction will succeed. +/// +/// # Examples +/// +/// **Sets timeouts for the `default` profile** +/// ```ini +/// [default] +/// api_call_attempt_timeout = 2 +/// api_call_timeout = 3 +/// ``` +/// +/// **Sets the `api_call_attempt_timeout` to 0.5 seconds _if and only if_ the `other` profile is selected.** +/// +/// ```ini +/// [profile other] +/// api_call_attempt_timeout = 0.5 +/// ``` +/// +/// This provider is part of the [default timeout config provider chain](crate::default_provider::timeout_config). +#[derive(Debug, Default)] +pub struct ProfileFileTimeoutConfigProvider { + fs: Fs, + env: Env, + profile_override: Option, +} + +/// Builder for [`ProfileFileTimeoutConfigProvider`] +#[derive(Default)] +pub struct Builder { + config: Option, + profile_override: Option, +} + +impl Builder { + /// Override the configuration for this provider + pub fn configure(mut self, config: &ProviderConfig) -> Self { + self.config = Some(config.clone()); + self + } + + /// Override the profile name used by the [`ProfileFileTimeoutConfigProvider`] + pub fn profile_name(mut self, profile_name: impl Into) -> Self { + self.profile_override = Some(profile_name.into()); + self + } + + /// Build a [`ProfileFileTimeoutConfigProvider`] from this builder + pub fn build(self) -> ProfileFileTimeoutConfigProvider { + let conf = self.config.unwrap_or_default(); + ProfileFileTimeoutConfigProvider { + env: conf.env(), + fs: conf.fs(), + profile_override: self.profile_override, + } + } +} + +impl ProfileFileTimeoutConfigProvider { + /// Create a new [`ProfileFileTimeoutConfigProvider`] + /// + /// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [`Builder`]. + pub fn new() -> Self { + Self { + fs: Fs::real(), + env: Env::real(), + profile_override: None, + } + } + + /// [`Builder`] to construct a [`ProfileFileTimeoutConfigProvider`] + pub fn builder() -> Builder { + Builder::default() + } + + /// Attempt to create a new [`timeout::Config`](aws_smithy_types::timeout::Config) from a profile file. + pub async fn timeout_config(&self) -> Result { + let profile = match super::parser::load(&self.fs, &self.env).await { + Ok(profile) => profile, + Err(err) => { + tracing::warn!(err = %err, "failed to parse profile, skipping it"); + // return an empty builder + return Ok(Default::default()); + } + }; + + let selected_profile = self + .profile_override + .as_deref() + .unwrap_or_else(|| profile.selected_profile()); + let selected_profile = match profile.get_profile(selected_profile) { + Some(profile) => profile, + None => { + // Only warn if the user specified a profile name to use. + if self.profile_override.is_some() { + tracing::warn!( + "failed to get selected '{}' profile, skipping it", + selected_profile + ); + } + // return an empty config + return Ok(timeout::Config::new()); + } + }; + + // Warn users that set unsupported timeouts in their profile + for timeout in [ + PROFILE_VAR_CONNECT_TIMEOUT, + PROFILE_VAR_TLS_NEGOTIATION_TIMEOUT, + PROFILE_VAR_READ_TIMEOUT, + ] { + warn_if_unsupported_timeout_is_set(selected_profile, timeout); + } + + let api_call_attempt_timeout = construct_timeout_from_profile_var( + selected_profile, + PROFILE_VAR_API_CALL_ATTEMPT_TIMEOUT, + )?; + let api_call_timeout = + construct_timeout_from_profile_var(selected_profile, PROFILE_VAR_API_CALL_TIMEOUT)?; + + let api_timeouts = timeout::Api::new() + .with_call_timeout(api_call_timeout) + .with_call_attempt_timeout(api_call_attempt_timeout); + + // Only API-related timeouts are currently supported + Ok(timeout::Config::new().with_api_timeouts(api_timeouts)) + } +} + +fn construct_timeout_from_profile_var( + profile: &Profile, + var: &'static str, +) -> Result, timeout::ConfigError> { + let profile_name = format!("aws profile [{}]", profile.name()); + match profile.get(var) { + Some(timeout) => { + parse_str_as_timeout(timeout, var.into(), profile_name.into()).map(TriState::Set) + } + None => Ok(TriState::Unset), + } +} + +fn warn_if_unsupported_timeout_is_set(profile: &Profile, var: &'static str) { + if profile.get(var).is_some() { + tracing::warn!( + "Profile '{}' set {} timeout but that feature is currently unimplemented so the setting will be ignored. \ + To help us prioritize support for this feature, please upvote aws-sdk-rust#151 (https://github.com/awslabs/aws-sdk-rust/issues/151)", + profile.name(), + var + ) + } +} diff --git a/patch/aws-config/src/provider_config.rs b/patch/aws-config/src/provider_config.rs new file mode 100644 index 0000000000000..6b3243e43357c --- /dev/null +++ b/patch/aws-config/src/provider_config.rs @@ -0,0 +1,271 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Configuration Options for Credential Providers + +use aws_smithy_async::rt::sleep::{default_async_sleep, AsyncSleep}; +use aws_smithy_client::erase::DynConnector; +use aws_types::os_shim_internal::{Env, Fs, TimeSource}; +use aws_types::{ + http_connector::{HttpConnector, HttpSettings}, + region::Region, +}; + +use std::error::Error; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use crate::connector::default_connector; +use http::Uri; +use hyper::client::connect::Connection; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// Configuration options for Credential Providers +/// +/// Most credential providers builders offer a `configure` method which applies general provider configuration +/// options. +/// +/// To use a region from the default region provider chain use [`ProviderConfig::with_default_region`]. +/// Otherwise, use [`ProviderConfig::without_region`]. Note that some credentials providers require a region +/// to be explicitly set. +#[derive(Clone)] +pub struct ProviderConfig { + env: Env, + fs: Fs, + time_source: TimeSource, + connector: HttpConnector, + sleep: Option>, + region: Option, +} + +impl Debug for ProviderConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ProviderConfig") + .field("env", &self.env) + .field("fs", &self.fs) + .field("sleep", &self.sleep) + .field("region", &self.region) + .finish() + } +} + +impl Default for ProviderConfig { + fn default() -> Self { + let connector = HttpConnector::ConnectorFn(Arc::new( + |settings: &HttpSettings, sleep: Option>| { + default_connector(settings, sleep) + }, + )); + + Self { + env: Env::default(), + fs: Fs::default(), + time_source: TimeSource::default(), + connector, + sleep: default_async_sleep(), + region: None, + } + } +} + +#[cfg(test)] +impl ProviderConfig { + /// ProviderConfig with all configuration removed + /// + /// Unlike [`ProviderConfig::empty`] where `env` and `fs` will use their non-mocked implementations, + /// this method will use an empty mock environment and an empty mock file system. + pub fn no_configuration() -> Self { + use aws_types::os_shim_internal::ManualTimeSource; + use std::collections::HashMap; + use std::time::UNIX_EPOCH; + Self { + env: Env::from_slice(&[]), + fs: Fs::from_raw_map(HashMap::new()), + time_source: TimeSource::manual(&ManualTimeSource::new(UNIX_EPOCH)), + connector: HttpConnector::Prebuilt(None), + sleep: None, + region: None, + } + } +} + +impl ProviderConfig { + /// Create a default provider config with the region unset. + /// + /// Using this option means that you may need to set a region manually. + /// + /// This constructor will use a default value for the HTTPS connector and Sleep implementation + /// when they are enabled as crate features which is usually the correct option. To construct + /// a `ProviderConfig` without these fields set, use [`ProviderConfig::empty`]. + /// + /// + /// # Examples + /// ```no_run + /// # #[cfg(any(feature = "rustls", feature = "native-tls"))] + /// # fn example() { + /// use aws_config::provider_config::ProviderConfig; + /// use aws_sdk_sts::Region; + /// use aws_config::web_identity_token::WebIdentityTokenCredentialsProvider; + /// let conf = ProviderConfig::without_region().with_region(Some(Region::new("us-east-1"))); + /// + /// let credential_provider = WebIdentityTokenCredentialsProvider::builder().configure(&conf).build(); + /// # } + /// ``` + pub fn without_region() -> Self { + Self::default() + } + + /// Constructs a ProviderConfig with no fields set + pub fn empty() -> Self { + ProviderConfig { + env: Env::default(), + fs: Fs::default(), + time_source: TimeSource::default(), + connector: HttpConnector::Prebuilt(None), + sleep: None, + region: None, + } + } + + /// Create a default provider config with the region region automatically loaded from the default chain. + /// + /// # Examples + /// ```no_run + /// # async fn test() { + /// use aws_config::provider_config::ProviderConfig; + /// use aws_sdk_sts::Region; + /// use aws_config::web_identity_token::WebIdentityTokenCredentialsProvider; + /// let conf = ProviderConfig::with_default_region().await; + /// let credential_provider = WebIdentityTokenCredentialsProvider::builder().configure(&conf).build(); + /// } + /// ``` + pub async fn with_default_region() -> Self { + Self::without_region().load_default_region().await + } + + // When all crate features are disabled, these accessors are unused + + #[allow(dead_code)] + pub(crate) fn env(&self) -> Env { + self.env.clone() + } + + #[allow(dead_code)] + pub(crate) fn fs(&self) -> Fs { + self.fs.clone() + } + + #[allow(dead_code)] + pub(crate) fn time_source(&self) -> TimeSource { + self.time_source.clone() + } + + #[allow(dead_code)] + pub(crate) fn default_connector(&self) -> Option { + self.connector + .connector(&HttpSettings::default(), self.sleep.clone()) + } + + #[allow(dead_code)] + pub(crate) fn connector(&self, settings: &HttpSettings) -> Option { + self.connector.connector(settings, self.sleep.clone()) + } + + #[allow(dead_code)] + pub(crate) fn sleep(&self) -> Option> { + self.sleep.clone() + } + + #[allow(dead_code)] + pub(crate) fn region(&self) -> Option { + self.region.clone() + } + + /// Override the region for the configuration + pub fn with_region(mut self, region: Option) -> Self { + self.region = region; + self + } + + /// Use the [default region chain](crate::default_provider::region) to set the + /// region for this configuration + /// + /// Note: the `env` and `fs` already set on this provider will be used when loading the default region. + pub async fn load_default_region(self) -> Self { + use crate::default_provider::region::DefaultRegionChain; + let provider_chain = DefaultRegionChain::builder().configure(&self).build(); + self.with_region(provider_chain.region().await) + } + + // these setters are doc(hidden) because they only exist for tests + + #[doc(hidden)] + pub fn with_fs(self, fs: Fs) -> Self { + ProviderConfig { fs, ..self } + } + + #[doc(hidden)] + pub fn with_env(self, env: Env) -> Self { + ProviderConfig { env, ..self } + } + + #[doc(hidden)] + pub fn with_time_source(self, time_source: TimeSource) -> Self { + ProviderConfig { + time_source, + ..self + } + } + + /// Override the HTTPS connector for this configuration + /// + /// **Warning**: Use of this method will prevent you from taking advantage of the HTTP connect timeouts. + /// Consider [`ProviderConfig::with_tcp_connector`]. + /// + /// # Stability + /// This method is expected to change to support HTTP configuration. + pub fn with_http_connector(self, connector: DynConnector) -> Self { + ProviderConfig { + connector: HttpConnector::Prebuilt(Some(connector)), + ..self + } + } + + /// Override the TCP connector for this configuration + /// + /// This connector MUST provide an HTTPS encrypted connection. + /// + /// # Stability + /// This method may change to support HTTP configuration. + pub fn with_tcp_connector(self, connector: C) -> Self + where + C: Clone + Send + Sync + 'static, + C: tower::Service, + C::Response: Connection + AsyncRead + AsyncWrite + Send + Unpin + 'static, + C::Future: Unpin + Send + 'static, + C::Error: Into>, + { + let connector_fn = move |settings: &HttpSettings, sleep: Option>| { + let mut builder = aws_smithy_client::hyper_ext::Adapter::builder() + .timeout(&settings.http_timeout_config); + if let Some(sleep) = sleep { + builder = builder.sleep_impl(sleep); + }; + Some(DynConnector::new(builder.build(connector.clone()))) + }; + ProviderConfig { + connector: HttpConnector::ConnectorFn(Arc::new(connector_fn)), + ..self + } + } + + /// Override the sleep implementation for this configuration + pub fn with_sleep(self, sleep: impl AsyncSleep + 'static) -> Self { + ProviderConfig { + sleep: Some(Arc::new(sleep)), + ..self + } + } +} diff --git a/patch/aws-config/src/sso.rs b/patch/aws-config/src/sso.rs new file mode 100644 index 0000000000000..638de13c44fb4 --- /dev/null +++ b/patch/aws-config/src/sso.rs @@ -0,0 +1,454 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! SSO Credentials Provider +//! +//! This credentials provider enables loading credentials from `~/.aws/sso/cache`. For more information, +//! see [Using AWS SSO Credentials](https://docs.aws.amazon.com/toolkit-for-vscode/latest/userguide/sso-credentials.html) +//! +//! This provider is included automatically when profiles are loaded. + +use crate::fs_util::{home_dir, Os}; +use crate::json_credentials::{json_parse_loop, InvalidJsonCredentials}; +use crate::provider_config::ProviderConfig; + +use aws_sdk_sso::middleware::DefaultMiddleware as SsoMiddleware; +use aws_sdk_sso::model::RoleCredentials; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_types::date_time::Format; +use aws_smithy_types::DateTime; +use aws_types::credentials::{CredentialsError, ProvideCredentials}; +use aws_types::os_shim_internal::{Env, Fs}; +use aws_types::region::Region; +use aws_types::{credentials, Credentials}; + +use std::convert::TryInto; +use std::error::Error; +use std::fmt::{Display, Formatter}; +use std::io; +use std::path::PathBuf; + +use zeroize::Zeroizing; + +impl crate::provider_config::ProviderConfig { + pub(crate) fn sso_client( + &self, + ) -> aws_smithy_client::Client { + use crate::connector::expect_connector; + use aws_smithy_client::http_connector::HttpSettings; + + aws_smithy_client::Builder::<(), SsoMiddleware>::new() + .connector(expect_connector(self.connector(&HttpSettings::default()))) + .sleep_impl(self.sleep()) + .build() + } +} + +/// SSO Credentials Provider +/// +/// _Note: This provider is part of the default credentials chain and is integrated with the profile-file provider._ +/// +/// This credentials provider will use cached SSO tokens stored in `~/.aws/sso/cache/.json`. +/// `` is computed based on the configured [`start_url`](Builder::start_url). +#[derive(Debug)] +pub struct SsoCredentialsProvider { + fs: Fs, + env: Env, + sso_config: SsoConfig, + client: aws_smithy_client::Client, +} + +impl SsoCredentialsProvider { + /// Creates a builder for [`SsoCredentialsProvider`] + pub fn builder() -> Builder { + Builder::new() + } + + pub(crate) fn new(provider_config: &ProviderConfig, sso_config: SsoConfig) -> Self { + let fs = provider_config.fs(); + let env = provider_config.env(); + + SsoCredentialsProvider { + fs, + env, + client: provider_config.sso_client(), + sso_config, + } + } + + async fn credentials(&self) -> credentials::Result { + load_sso_credentials(&self.sso_config, &self.client, &self.env, &self.fs).await + } +} + +impl ProvideCredentials for SsoCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> credentials::future::ProvideCredentials<'a> + where + Self: 'a, + { + credentials::future::ProvideCredentials::new(self.credentials()) + } +} + +/// Builder for [`SsoCredentialsProvider`] +#[derive(Default, Debug, Clone)] +pub struct Builder { + provider_config: Option, + account_id: Option, + role_name: Option, + start_url: Option, + region: Option, +} + +impl Builder { + /// Create a new builder for [`SsoCredentialsProvider`] + pub fn new() -> Self { + Self::default() + } + + /// Override the configuration used for this provider + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.provider_config = Some(provider_config.clone()); + self + } + + /// Set the account id used for SSO + pub fn account_id(mut self, account_id: impl Into) -> Self { + self.account_id = Some(account_id.into()); + self + } + + /// Set the role name used for SSO + pub fn role_name(mut self, role_name: impl Into) -> Self { + self.role_name = Some(role_name.into()); + self + } + + /// Set the start URL used for SSO + pub fn start_url(mut self, start_url: impl Into) -> Self { + self.start_url = Some(start_url.into()); + self + } + + /// Set the region used for SSO + pub fn region(mut self, region: Region) -> Self { + self.region = Some(region); + self + } + + /// Construct an SsoCredentialsProvider from the builder + /// + /// # Panics + /// This method will panic if the any of the following required fields are unset: + /// - [`start_url`](Self::start_url) + /// - [`role_name`](Self::role_name) + /// - [`account_id`](Self::account_id) + /// - [`region`](Self::region) + pub fn build(self) -> SsoCredentialsProvider { + let provider_config = self.provider_config.unwrap_or_default(); + let sso_config = SsoConfig { + account_id: self.account_id.expect("account_id must be set"), + role_name: self.role_name.expect("role_name must be set"), + start_url: self.start_url.expect("start_url must be set"), + region: self.region.expect("region must be set"), + }; + SsoCredentialsProvider::new(&provider_config, sso_config) + } +} + +#[derive(Debug)] +pub(crate) enum LoadTokenError { + InvalidCredentials(InvalidJsonCredentials), + NoHomeDirectory, + IoError { err: io::Error, path: PathBuf }, +} + +impl Display for LoadTokenError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + LoadTokenError::InvalidCredentials(err) => { + write!(f, "SSO Token was invalid (expected JSON): {}", err) + } + LoadTokenError::NoHomeDirectory => write!(f, "Could not resolve a home directory"), + LoadTokenError::IoError { err, path } => { + write!(f, "failed to read `{}`: {}", path.display(), err) + } + } + } +} + +impl Error for LoadTokenError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + LoadTokenError::InvalidCredentials(err) => Some(err as _), + LoadTokenError::NoHomeDirectory => None, + LoadTokenError::IoError { err, .. } => Some(err as _), + } + } +} + +#[derive(Debug)] +pub(crate) struct SsoConfig { + pub(crate) account_id: String, + pub(crate) role_name: String, + pub(crate) start_url: String, + pub(crate) region: Region, +} + +async fn load_sso_credentials( + sso_config: &SsoConfig, + sso: &aws_smithy_client::Client, + env: &Env, + fs: &Fs, +) -> credentials::Result { + let token = load_token(&sso_config.start_url, env, fs) + .await + .map_err(CredentialsError::provider_error)?; + let config = aws_sdk_sso::Config::builder() + .region(sso_config.region.clone()) + .build(); + let operation = aws_sdk_sso::operation::GetRoleCredentials::builder() + .role_name(&sso_config.role_name) + .access_token(&*token.access_token) + .account_id(&sso_config.account_id) + .build() + .map_err(|err| { + CredentialsError::unhandled(format!("could not construct SSO token input: {}", err)) + })? + .make_operation(&config) + .await + .map_err(CredentialsError::unhandled)?; + let resp = sso + .call(operation) + .await + .map_err(CredentialsError::provider_error)?; + let credentials: RoleCredentials = resp + .role_credentials + .ok_or_else(|| CredentialsError::unhandled("SSO did not return credentials"))?; + let akid = credentials + .access_key_id + .ok_or_else(|| CredentialsError::unhandled("no access key id in response"))?; + let secret_key = credentials + .secret_access_key + .ok_or_else(|| CredentialsError::unhandled("no secret key in response"))?; + let expiration = DateTime::from_millis(credentials.expiration) + .try_into() + .map_err(|err| { + CredentialsError::unhandled(format!( + "expiration could not be converted into a system time: {}", + err + )) + })?; + Ok(Credentials::new( + akid, + secret_key, + credentials.session_token, + Some(expiration), + "SSO", + )) +} + +/// Load the token for `start_url` from `~/.aws/sso/cache/.json` +async fn load_token(start_url: &str, env: &Env, fs: &Fs) -> Result { + let home = home_dir(env, Os::real()).ok_or(LoadTokenError::NoHomeDirectory)?; + let path = sso_token_path(start_url, &home); + let data = + Zeroizing::new( + fs.read_to_end(&path) + .await + .map_err(|err| LoadTokenError::IoError { + err, + path: path.to_path_buf(), + })?, + ); + let token = parse_token_json(&data).map_err(LoadTokenError::InvalidCredentials)?; + Ok(token) +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct SsoToken { + access_token: Zeroizing, + expires_at: DateTime, + region: Option, +} + +/// Parse SSO token JSON from input +fn parse_token_json(input: &[u8]) -> Result { + /* + Example: + { + "accessToken": "base64string", + "expiresAt": "2019-11-14T04:05:45Z", + "region": "us-west-2", + "startUrl": "https://d-abc123.awsapps.com/start" + }*/ + let mut acccess_token = None; + let mut expires_at = None; + let mut region = None; + let mut start_url = None; + json_parse_loop(input, |key, value| match key { + key if key.eq_ignore_ascii_case("accessToken") => acccess_token = Some(value.to_string()), + key if key.eq_ignore_ascii_case("expiresAt") => expires_at = Some(value), + key if key.eq_ignore_ascii_case("region") => region = Some(value.to_string()), + key if key.eq_ignore_ascii_case("startUrl") => start_url = Some(value.to_string()), + _other => {} // ignored + })?; + let access_token = + Zeroizing::new(acccess_token.ok_or(InvalidJsonCredentials::MissingField("accessToken"))?); + let expires_at = expires_at.ok_or(InvalidJsonCredentials::MissingField("expiresAt"))?; + let expires_at = DateTime::from_str(expires_at.as_ref(), Format::DateTime).map_err(|e| { + InvalidJsonCredentials::InvalidField { + field: "expiresAt", + err: e.into(), + } + })?; + let region = region.map(Region::new); + Ok(SsoToken { + access_token, + expires_at, + region, + }) +} + +#[cfg(feature = "openssl")] +fn sha1(buf: &[u8]) -> [u8; 20] { + use openssl::sha::sha1; + sha1(buf) +} + +#[cfg(feature = "ring")] +fn sha1(buf: &[u8]) -> [u8; 20] { + use ring::digest; + let mut ret: [u8; 20] = [0; 20]; + ret.clone_from_slice(digest::digest(&digest::SHA1_FOR_LEGACY_USE_ONLY, buf).as_ref()); + ret +} + +/// Determine the SSO token path for a given start_url +fn sso_token_path(start_url: &str, home: &str) -> PathBuf { + // hex::encode returns a lowercase string + let mut out = PathBuf::with_capacity(home.len() + "/.aws/sso/cache".len() + ".json".len() + 40); + out.push(home); + out.push(".aws/sso/cache"); + out.push(&hex::encode(sha1(start_url.as_bytes()))); + out.set_extension("json"); + out +} + +#[cfg(test)] +mod test { + use crate::json_credentials::InvalidJsonCredentials; + use crate::sso::{load_token, parse_token_json, sso_token_path, LoadTokenError, SsoToken}; + use aws_smithy_types::DateTime; + use aws_types::os_shim_internal::{Env, Fs}; + use aws_types::region::Region; + use zeroize::Zeroizing; + + #[test] + fn deserialize_valid_tokens() { + let token = br#" + { + "accessToken": "base64string", + "expiresAt": "2009-02-13T23:31:30Z", + "region": "us-west-2", + "startUrl": "https://d-abc123.awsapps.com/start" + }"#; + assert_eq!( + parse_token_json(token).expect("valid"), + SsoToken { + access_token: Zeroizing::new("base64string".into()), + expires_at: DateTime::from_secs(1234567890), + region: Some(Region::from_static("us-west-2")) + } + ); + + let no_region = br#"{ + "accessToken": "base64string", + "expiresAt": "2009-02-13T23:31:30Z" + }"#; + assert_eq!( + parse_token_json(no_region).expect("valid"), + SsoToken { + access_token: Zeroizing::new("base64string".into()), + expires_at: DateTime::from_secs(1234567890), + region: None + } + ); + } + + #[test] + fn invalid_timestamp() { + let token = br#" + { + "accessToken": "base64string", + "expiresAt": "notatimestamp", + "region": "us-west-2", + "startUrl": "https://d-abc123.awsapps.com/start" + }"#; + let err = parse_token_json(token).expect_err("invalid timestamp"); + assert!( + format!("{}", err).contains("Invalid field in response: `expiresAt`."), + "{}", + err + ); + } + + #[test] + fn missing_fields() { + let token = br#" + { + "expiresAt": "notatimestamp", + "region": "us-west-2", + "startUrl": "https://d-abc123.awsapps.com/start" + }"#; + let err = parse_token_json(token).expect_err("missing akid"); + assert!( + matches!(err, InvalidJsonCredentials::MissingField("accessToken")), + "incorrect error: {:?}", + err + ); + + let token = br#" + { + "accessToken": "akid", + "region": "us-west-2", + "startUrl": "https://d-abc123.awsapps.com/start" + }"#; + let err = parse_token_json(token).expect_err("missing expiry"); + assert!( + matches!(err, InvalidJsonCredentials::MissingField("expiresAt")), + "incorrect error: {:?}", + err + ); + } + + #[test] + fn determine_correct_cache_filenames() { + assert_eq!( + sso_token_path("https://d-92671207e4.awsapps.com/start", "/home/me").as_os_str(), + "/home/me/.aws/sso/cache/13f9d35043871d073ab260e020f0ffde092cb14b.json" + ); + assert_eq!( + sso_token_path("https://d-92671207e4.awsapps.com/start", "/home/me/").as_os_str(), + "/home/me/.aws/sso/cache/13f9d35043871d073ab260e020f0ffde092cb14b.json" + ); + } + + #[tokio::test] + async fn gracefully_handle_missing_files() { + let err = load_token( + "asdf", + &Env::from_slice(&[("HOME", "/home")]), + &Fs::from_slice(&[]), + ) + .await + .expect_err("should fail, file is missing"); + assert!( + matches!(err, LoadTokenError::IoError { .. }), + "should be io error, got {}", + err + ); + } +} diff --git a/patch/aws-config/src/sts.rs b/patch/aws-config/src/sts.rs new file mode 100644 index 0000000000000..8d53cbfcf41fa --- /dev/null +++ b/patch/aws-config/src/sts.rs @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Credential provider augmentation through the AWS Security Token Service (STS). + +mod assume_role; + +pub(crate) mod util; + +use crate::connector::expect_connector; +pub use assume_role::{AssumeRoleProvider, AssumeRoleProviderBuilder}; + +use aws_sdk_sts::middleware::DefaultMiddleware; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::http_connector::HttpSettings; +use aws_smithy_client::{Builder, Client}; + +impl crate::provider_config::ProviderConfig { + pub(crate) fn sts_client(&self) -> Client { + Builder::<(), DefaultMiddleware>::new() + .connector(expect_connector(self.connector(&HttpSettings::default()))) + .sleep_impl(self.sleep()) + .build() + } +} diff --git a/patch/aws-config/src/sts/assume_role.rs b/patch/aws-config/src/sts/assume_role.rs new file mode 100644 index 0000000000000..c64fb5d1908da --- /dev/null +++ b/patch/aws-config/src/sts/assume_role.rs @@ -0,0 +1,223 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Assume credentials for a role through the AWS Security Token Service (STS). + +use aws_sdk_sts::error::AssumeRoleErrorKind; +use aws_sdk_sts::middleware::DefaultMiddleware; +use aws_sdk_sts::operation::AssumeRole; +use aws_smithy_async::rt::sleep::default_async_sleep; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::http_connector::HttpSettings; +use aws_smithy_http::result::SdkError; +use aws_types::credentials::{ + self, future, CredentialsError, ProvideCredentials, SharedCredentialsProvider, +}; +use aws_types::region::Region; + +use crate::connector::{default_connector, expect_connector}; +use tracing::Instrument; + +/// Credentials provider that uses credentials provided by another provider to assume a role +/// through the AWS Security Token Service (STS). +/// +/// When asked to provide credentials, this provider will first invoke the inner credentials +/// provider to get AWS credentials for STS. Then, it will call STS to get assumed credentials for +/// the desired role. +/// +/// # Examples +/// ```no_run +/// use aws_config::sts::{AssumeRoleProvider}; +/// use aws_types::{Credentials, region::Region}; +/// use aws_config::environment; +/// use aws_config::environment::credentials::EnvironmentVariableCredentialsProvider; +/// use std::sync::Arc; +/// +/// let provider = AssumeRoleProvider::builder("arn:aws:iam::123456789012:role/demo") +/// .region(Region::from_static("us-east-2")) +/// .session_name("testAR") +/// .build(Arc::new(EnvironmentVariableCredentialsProvider::new()) as Arc<_>); +/// ``` +#[derive(Debug)] +pub struct AssumeRoleProvider { + sts: aws_smithy_client::Client, + conf: aws_sdk_sts::Config, + op: aws_sdk_sts::input::AssumeRoleInput, +} + +impl AssumeRoleProvider { + /// Build a new role-assuming provider for the given role. + /// + /// The `role` argument should take the form an Amazon Resource Name (ARN) like + /// + /// ```text + /// arn:aws:iam::123456789012:role/example + /// ``` + pub fn builder(role: impl Into) -> AssumeRoleProviderBuilder { + AssumeRoleProviderBuilder::new(role.into()) + } +} + +/// A builder for [`AssumeRoleProvider`]. +/// +/// Construct one through [`AssumeRoleProvider::builder`]. +pub struct AssumeRoleProviderBuilder { + role_arn: String, + external_id: Option, + session_name: Option, + region: Option, + connection: Option, +} + +impl AssumeRoleProviderBuilder { + /// Start a new assume role builder for the given role. + /// + /// The `role` argument should take the form an Amazon Resource Name (ARN) like + /// + /// ```text + /// arn:aws:iam::123456789012:role/example + /// ``` + pub fn new(role: impl Into) -> Self { + Self { + role_arn: role.into(), + external_id: None, + session_name: None, + region: None, + connection: None, + } + } + + /// Set a unique identifier that might be required when you assume a role in another account. + /// + /// If the administrator of the account to which the role belongs provided you with an external + /// ID, then provide that value in this parameter. The value can be any string, such as a + /// passphrase or account number. + pub fn external_id(mut self, id: impl Into) -> Self { + self.external_id = Some(id.into()); + self + } + + /// Set an identifier for the assumed role session. + /// + /// Use the role session name to uniquely identify a session when the same role is assumed by + /// different principals or for different reasons. In cross-account scenarios, the role session + /// name is visible to, and can be logged by the account that owns the role. The role session + /// name is also used in the ARN of the assumed role principal. + pub fn session_name(mut self, name: impl Into) -> Self { + self.session_name = Some(name.into()); + self + } + + /// Set the region to assume the role in. + /// + /// This dictates which STS endpoint the AssumeRole action is invoked on. + pub fn region(mut self, region: Region) -> Self { + self.region = Some(region); + self + } + + /// Set the backing connection to use when talking to STS. + /// + /// If the `rustls` or `nativetls` features are enabled, this field is optional and a default + /// backing connection will be provided. + pub fn connection(mut self, conn: impl aws_smithy_client::bounds::SmithyConnector) -> Self { + self.connection = Some(aws_smithy_client::erase::DynConnector::new(conn)); + self + } + + /// Build a credentials provider for this role authorized by the given `provider`. + pub fn build(self, provider: impl Into) -> AssumeRoleProvider { + let config = aws_sdk_sts::Config::builder() + .credentials_provider(provider.into()) + .region(self.region.clone()) + .build(); + + let conn = self.connection.unwrap_or_else(|| { + expect_connector(default_connector( + &HttpSettings::default(), + default_async_sleep(), + )) + }); + let client = aws_smithy_client::Builder::new() + .connector(conn) + .middleware(DefaultMiddleware::new()) + .sleep_impl(default_async_sleep()) + .build(); + + let session_name = self + .session_name + .unwrap_or_else(|| super::util::default_session_name("assume-role-provider")); + + let operation = AssumeRole::builder() + .set_role_arn(Some(self.role_arn)) + .set_external_id(self.external_id) + .set_role_session_name(Some(session_name)) + .build() + .expect("operation is valid"); + + AssumeRoleProvider { + sts: client, + conf: config, + op: operation, + } + } +} + +impl AssumeRoleProvider { + #[tracing::instrument( + name = "assume_role", + level = "info", + skip(self), + fields(op = ?self.op) + )] + async fn credentials(&self) -> credentials::Result { + tracing::info!("assuming role"); + + tracing::debug!("retrieving assumed credentials"); + let op = self + .op + .clone() + .make_operation(&self.conf) + .await + .expect("valid operation"); + + let assumed = self.sts.call(op).in_current_span().await; + match assumed { + Ok(assumed) => { + tracing::debug!( + access_key_id = ?assumed.credentials.as_ref().map(|c| &c.access_key_id), + "obtained assumed credentials" + ); + super::util::into_credentials(assumed.credentials, "AssumeRoleProvider") + } + Err(SdkError::ServiceError { err, raw }) => { + match err.kind { + AssumeRoleErrorKind::RegionDisabledException(_) + | AssumeRoleErrorKind::MalformedPolicyDocumentException(_) => { + return Err(CredentialsError::invalid_configuration( + SdkError::ServiceError { err, raw }, + )) + } + _ => {} + } + tracing::warn!(error = ?err.message(), "sts refused to grant assume role"); + Err(CredentialsError::provider_error(SdkError::ServiceError { + err, + raw, + })) + } + Err(err) => Err(CredentialsError::provider_error(err)), + } + } +} + +impl ProvideCredentials for AssumeRoleProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} diff --git a/patch/aws-config/src/sts/util.rs b/patch/aws-config/src/sts/util.rs new file mode 100644 index 0000000000000..68d6ec844be52 --- /dev/null +++ b/patch/aws-config/src/sts/util.rs @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_sdk_sts::model::Credentials as StsCredentials; +use aws_types::credentials::{self, CredentialsError}; +use aws_types::Credentials as AwsCredentials; + +use std::convert::TryFrom; +use std::time::{SystemTime, UNIX_EPOCH}; + +/// Convert STS credentials to aws_auth::Credentials +pub(crate) fn into_credentials( + sts_credentials: Option, + provider_name: &'static str, +) -> credentials::Result { + let sts_credentials = sts_credentials + .ok_or_else(|| CredentialsError::unhandled("STS credentials must be defined"))?; + let expiration = SystemTime::try_from( + sts_credentials + .expiration + .ok_or_else(|| CredentialsError::unhandled("missing expiration"))?, + ) + .map_err(|_| { + CredentialsError::unhandled( + "credential expiration time cannot be represented by a SystemTime", + ) + })?; + Ok(AwsCredentials::new( + sts_credentials + .access_key_id + .ok_or_else(|| CredentialsError::unhandled("access key id missing from result"))?, + sts_credentials + .secret_access_key + .ok_or_else(|| CredentialsError::unhandled("secret access token missing"))?, + sts_credentials.session_token, + Some(expiration), + provider_name, + )) +} + +/// Create a default STS session name +/// +/// STS Assume Role providers MUST assign a name to their generated session. When a user does not +/// provide a name for the session, the provider will choose a name composed of a base + a timestamp, +/// e.g. `profile-file-provider-123456789` +pub(crate) fn default_session_name(base: &str) -> String { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("post epoch"); + format!("{}-{}", base, now.as_millis()) +} diff --git a/patch/aws-config/src/test_case.rs b/patch/aws-config/src/test_case.rs new file mode 100644 index 0000000000000..4d4988b7debf4 --- /dev/null +++ b/patch/aws-config/src/test_case.rs @@ -0,0 +1,253 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use crate::provider_config::ProviderConfig; + +use aws_smithy_async::rt::sleep::{AsyncSleep, Sleep, TokioSleep}; +use aws_smithy_client::dvr::{NetworkTraffic, RecordingConnection, ReplayingConnection}; +use aws_smithy_client::erase::DynConnector; +use aws_smithy_client::http_connector::HttpSettings; +use aws_types::credentials::{self, ProvideCredentials}; +use aws_types::os_shim_internal::{Env, Fs}; + +use serde::Deserialize; + +use crate::connector::default_connector; +use std::collections::HashMap; +use std::error::Error; +use std::fmt::Debug; +use std::future::Future; +use std::path::{Path, PathBuf}; +use std::time::{Duration, UNIX_EPOCH}; + +/// Test case credentials +/// +/// Credentials for use in test cases. These implement Serialize/Deserialize and have a +/// non-hidden debug implementation. +#[derive(Deserialize, Debug, Eq, PartialEq)] +struct Credentials { + access_key_id: String, + secret_access_key: String, + session_token: Option, + expiry: Option, +} + +/// Convert real credentials to test credentials +/// +/// Comparing equality on real credentials works, but it's a pain because the Debug implementation +/// hides the actual keys +impl From<&aws_types::Credentials> for Credentials { + fn from(credentials: &aws_types::Credentials) -> Self { + Self { + access_key_id: credentials.access_key_id().into(), + secret_access_key: credentials.secret_access_key().into(), + session_token: credentials.session_token().map(ToString::to_string), + expiry: credentials + .expiry() + .map(|t| t.duration_since(UNIX_EPOCH).unwrap().as_secs()), + } + } +} + +impl From for Credentials { + fn from(credentials: aws_types::Credentials) -> Self { + (&credentials).into() + } +} + +/// Credentials test environment +/// +/// A credentials test environment is a directory containing: +/// - an `fs` directory. This is loaded into the test as if it was mounted at `/` +/// - an `env.json` file containing environment variables +/// - an `http-traffic.json` file containing an http traffic log from [`dvr`](aws_smithy_client::dvr) +/// - a `test-case.json` file defining the expected output of the test +pub struct TestEnvironment { + env: Env, + fs: Fs, + network_traffic: NetworkTraffic, + metadata: Metadata, + base_dir: PathBuf, +} + +/// Connector which expects no traffic +pub fn no_traffic_connector() -> DynConnector { + DynConnector::new(ReplayingConnection::new(vec![])) +} + +#[derive(Debug)] +struct InstantSleep; +impl AsyncSleep for InstantSleep { + fn sleep(&self, _duration: Duration) -> Sleep { + Sleep::new(std::future::ready(())) + } +} + +#[derive(Deserialize)] +pub enum GenericTestResult { + Ok(T), + ErrorContains(String), +} + +impl GenericTestResult +where + T: PartialEq + Debug, +{ + pub fn assert_matches(&self, result: Result, impl Error>) { + match (result, &self) { + (Ok(actual), GenericTestResult::Ok(expected)) => { + assert_eq!(expected, &actual.into(), "incorrect result was returned") + } + (Err(err), GenericTestResult::ErrorContains(substr)) => { + assert!( + format!("{}", err).contains(substr), + "`{}` did not contain `{}`", + err, + substr + ) + } + (Err(actual_error), GenericTestResult::Ok(expected_creds)) => panic!( + "expected credentials ({:?}) but an error was returned: {}", + expected_creds, actual_error + ), + (Ok(creds), GenericTestResult::ErrorContains(substr)) => panic!( + "expected an error containing: `{}`, but a result was returned: {:?}", + substr, + creds.into() + ), + } + } +} + +type TestResult = GenericTestResult; + +#[derive(Deserialize)] +pub struct Metadata { + result: TestResult, + docs: String, + name: String, +} + +impl TestEnvironment { + pub fn from_dir(dir: impl AsRef) -> Result> { + let dir = dir.as_ref(); + let env = std::fs::read_to_string(dir.join("env.json")) + .map_err(|e| format!("failed to load env: {}", e))?; + let env: HashMap = + serde_json::from_str(&env).map_err(|e| format!("failed to parse env: {}", e))?; + let env = Env::from(env); + let fs = Fs::from_test_dir(dir.join("fs"), "/"); + let network_traffic = std::fs::read_to_string(dir.join("http-traffic.json")) + .map_err(|e| format!("failed to load http traffic: {}", e))?; + let network_traffic: NetworkTraffic = serde_json::from_str(&network_traffic)?; + + let metadata: Metadata = serde_json::from_str( + &std::fs::read_to_string(dir.join("test-case.json")) + .map_err(|e| format!("failed to load test case: {}", e))?, + )?; + Ok(TestEnvironment { + base_dir: dir.into(), + env, + fs, + network_traffic, + metadata, + }) + } + + pub async fn provider_config(&self) -> (ReplayingConnection, ProviderConfig) { + let connector = ReplayingConnection::new(self.network_traffic.events().clone()); + ( + connector.clone(), + ProviderConfig::empty() + .with_fs(self.fs.clone()) + .with_env(self.env.clone()) + .with_http_connector(DynConnector::new(connector.clone())) + .with_sleep(TokioSleep::new()) + .load_default_region() + .await, + ) + } + + #[allow(unused)] + /// Record a test case from live (remote) HTTPS traffic + /// + /// The `default_connector()` from the crate will be used + pub async fn execute_from_live_traffic(&self, make_provider: impl Fn(ProviderConfig) -> F) + where + F: Future, + P: ProvideCredentials, + { + // swap out the connector generated from `http-traffic.json` for a real connector: + let settings = HttpSettings::default(); + let (_test_connector, config) = self.provider_config().await; + let live_connector = default_connector(&settings, config.sleep()).unwrap(); + let live_connector = RecordingConnection::new(live_connector); + let config = config.with_http_connector(DynConnector::new(live_connector.clone())); + let provider = make_provider(config).await; + let result = provider.provide_credentials().await; + std::fs::write( + self.base_dir.join("http-traffic-recorded.json"), + serde_json::to_string(&live_connector.network_traffic()).unwrap(), + ) + .unwrap(); + self.check_results(result); + } + + #[allow(dead_code)] + /// Execute the test suite & record a new traffic log + /// + /// A connector will be created with the factory, then request traffic will be recorded. + /// Response are generated from the existing http-traffic.json. + pub async fn execute_and_update(&self, make_provider: impl Fn(ProviderConfig) -> F) + where + F: Future, + P: ProvideCredentials, + { + let (connector, config) = self.provider_config().await; + let recording_connector = RecordingConnection::new(connector); + let config = config.with_http_connector(DynConnector::new(recording_connector.clone())); + let provider = make_provider(config).await; + let result = provider.provide_credentials().await; + std::fs::write( + self.base_dir.join("http-traffic-recorded.json"), + serde_json::to_string(&recording_connector.network_traffic()).unwrap(), + ) + .unwrap(); + self.check_results(result); + } + + fn log_info(&self) { + eprintln!("test case: {}. {}", self.metadata.name, self.metadata.docs); + } + + /// Execute a test case. Failures lead to panics. + pub async fn execute(&self, make_provider: impl Fn(ProviderConfig) -> F) + where + F: Future, + P: ProvideCredentials, + { + let (connector, conf) = self.provider_config().await; + let provider = make_provider(conf).await; + let result = provider.provide_credentials().await; + tokio::time::pause(); + self.log_info(); + self.check_results(result); + // todo: validate bodies + match connector + .validate( + &["CONTENT-TYPE", "x-aws-ec2-metadata-token"], + |_expected, _actual| Ok(()), + ) + .await + { + Ok(()) => {} + Err(e) => panic!("{}", e), + } + } + + fn check_results(&self, result: credentials::Result) { + self.metadata.result.assert_matches(result); + } +} diff --git a/patch/aws-config/src/web_identity_token.rs b/patch/aws-config/src/web_identity_token.rs new file mode 100644 index 0000000000000..c44452753081f --- /dev/null +++ b/patch/aws-config/src/web_identity_token.rs @@ -0,0 +1,343 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Load Credentials from Web Identity Tokens +//! +//! Web identity tokens can be loaded from file. The path may be set in one of three ways: +//! 1. [Environment Variables](#environment-variable-configuration) +//! 2. [AWS profile](#aws-profile-configuration) defined in `~/.aws/config` +//! 3. Static configuration via [`static_configuration`](Builder::static_configuration) +//! +//! _Note: [WebIdentityTokenCredentialsProvider] is part of the [default provider chain](crate::default_provider). +//! Unless you need specific behavior or configuration overrides, it is recommended to use the +//! default chain instead of using this provider directly. This client should be considered a "low level" +//! client as it does not include caching or profile-file resolution when used in isolation._ +//! +//! ## Environment Variable Configuration +//! WebIdentityTokenCredentialProvider will load the following environment variables: +//! - `AWS_WEB_IDENTITY_TOKEN_FILE`: **required**, location to find the token file containing a JWT token +//! - `AWS_ROLE_ARN`: **required**, role ARN to assume +//! - `AWS_IAM_ROLE_SESSION_NAME`: **optional**: Session name to use when assuming the role +//! +//! ## AWS Profile Configuration +//! _Note: Configuration of the web identity token provider via a shared profile is only supported +//! when using the [`ProfileFileCredentialsProvider`](crate::profile::credentials)._ +//! +//! Web identity token credentials can be loaded from `~/.aws/config` in two ways: +//! 1. Directly: +//! ```ini +//! [profile default] +//! role_arn = arn:aws:iam::1234567890123:role/RoleA +//! web_identity_token_file = /token.jwt +//! ``` +//! +//! 2. As a source profile for another role: +//! +//! ```ini +//! [profile default] +//! role_arn = arn:aws:iam::123456789:role/RoleA +//! source_profile = base +//! +//! [profile base] +//! role_arn = arn:aws:iam::123456789012:role/s3-reader +//! web_identity_token_file = /token.jwt +//! ``` +//! +//! # Examples +//! Web Identity Token providers are part of the [default chain](crate::default_provider::credentials). +//! However, they may be directly constructed if you don't want to use the default provider chain. +//! Unless overridden with [`static_configuration`](Builder::static_configuration), the provider will +//! load configuration from environment variables. +//! +//! ```no_run +//! # async fn test() { +//! use aws_config::web_identity_token::WebIdentityTokenCredentialsProvider; +//! use aws_config::provider_config::ProviderConfig; +//! let provider = WebIdentityTokenCredentialsProvider::builder() +//! .configure(&ProviderConfig::with_default_region().await) +//! .build(); +//! # } +//! ``` + +use aws_sdk_sts::Region; +use aws_types::os_shim_internal::{Env, Fs}; + +use crate::provider_config::ProviderConfig; +use crate::sts; +use aws_sdk_sts::middleware::DefaultMiddleware; +use aws_smithy_client::erase::DynConnector; +use aws_types::credentials::{self, future, CredentialsError, ProvideCredentials}; +use std::borrow::Cow; +use std::path::{Path, PathBuf}; +use tracing::Instrument; + +const ENV_VAR_TOKEN_FILE: &str = "AWS_WEB_IDENTITY_TOKEN_FILE"; +const ENV_VAR_ROLE_ARN: &str = "AWS_ROLE_ARN"; +const ENV_VAR_SESSION_NAME: &str = "AWS_ROLE_SESSION_NAME"; + +/// Credential provider to load credentials from Web Identity Tokens +/// +/// See Module documentation for more details +#[derive(Debug)] +pub struct WebIdentityTokenCredentialsProvider { + source: Source, + fs: Fs, + client: aws_smithy_client::Client, + region: Option, +} + +impl WebIdentityTokenCredentialsProvider { + /// Builder for this credentials provider + pub fn builder() -> Builder { + Builder::default() + } +} + +#[derive(Debug)] +enum Source { + Env(Env), + Static(StaticConfiguration), +} + +/// Statically configured WebIdentityToken configuration +#[derive(Debug, Clone)] +pub struct StaticConfiguration { + /// Location of the file containing the web identity token + pub web_identity_token_file: PathBuf, + + /// RoleArn to assume + pub role_arn: String, + + /// Session name to use when assuming the role + pub session_name: String, +} + +impl ProvideCredentials for WebIdentityTokenCredentialsProvider { + fn provide_credentials<'a>(&'a self) -> future::ProvideCredentials<'a> + where + Self: 'a, + { + future::ProvideCredentials::new(self.credentials()) + } +} + +impl WebIdentityTokenCredentialsProvider { + fn source(&self) -> Result, CredentialsError> { + match &self.source { + Source::Env(env) => { + let token_file = env.get(ENV_VAR_TOKEN_FILE).map_err(|_| { + CredentialsError::not_loaded(format!("${} was not set", ENV_VAR_TOKEN_FILE)) + })?; + let role_arn = env.get(ENV_VAR_ROLE_ARN).map_err(|_| { + CredentialsError::invalid_configuration( + "AWS_ROLE_ARN environment variable must be set", + ) + })?; + let session_name = env + .get(ENV_VAR_SESSION_NAME) + .unwrap_or_else(|_| sts::util::default_session_name("web-identity-token")); + Ok(Cow::Owned(StaticConfiguration { + web_identity_token_file: token_file.into(), + role_arn, + session_name, + })) + } + Source::Static(conf) => Ok(Cow::Borrowed(conf)), + } + } + async fn credentials(&self) -> credentials::Result { + let conf = self.source()?; + load_credentials( + &self.fs, + &self.client, + &self.region.as_ref().cloned().ok_or_else(|| { + CredentialsError::invalid_configuration( + "region is required for WebIdentityTokenProvider", + ) + })?, + &conf.web_identity_token_file, + &conf.role_arn, + &conf.session_name, + ) + .instrument(tracing::debug_span!( + "load_credentials", + provider = "WebIdentityToken" + )) + .await + } +} + +/// Builder for [`WebIdentityTokenCredentialsProvider`](WebIdentityTokenCredentialsProvider) +#[derive(Default)] +pub struct Builder { + source: Option, + config: Option, +} + +impl Builder { + /// Configure generic options of the [WebIdentityTokenCredentialsProvider] + /// + /// # Examples + /// ```no_run + /// # async fn test() { + /// use aws_config::web_identity_token::WebIdentityTokenCredentialsProvider; + /// use aws_config::provider_config::ProviderConfig; + /// let provider = WebIdentityTokenCredentialsProvider::builder() + /// .configure(&ProviderConfig::with_default_region().await) + /// .build(); + /// # } + /// ``` + pub fn configure(mut self, provider_config: &ProviderConfig) -> Self { + self.config = Some(provider_config.clone()); + self + } + + /// Configure this builder to use [`StaticConfiguration`](StaticConfiguration) + /// + /// WebIdentityToken providers load credentials from the file system. The file system path used + /// may either determine be loaded from environment variables (default), or via a statically + /// configured path. + pub fn static_configuration(mut self, config: StaticConfiguration) -> Self { + self.source = Some(Source::Static(config)); + self + } + + /// Build a [`WebIdentityTokenCredentialsProvider`] + /// + /// ## Panics + /// If no connector has been enabled via crate features and no connector has been provided via the + /// builder, this function will panic. + pub fn build(self) -> WebIdentityTokenCredentialsProvider { + let conf = self.config.unwrap_or_default(); + let client = conf.sts_client(); + let source = self.source.unwrap_or_else(|| Source::Env(conf.env())); + WebIdentityTokenCredentialsProvider { + source, + fs: conf.fs(), + client, + region: conf.region(), + } + } +} + +async fn load_credentials( + fs: &Fs, + client: &aws_smithy_client::Client, + region: &Region, + token_file: impl AsRef, + role_arn: &str, + session_name: &str, +) -> credentials::Result { + let token = fs + .read_to_end(token_file) + .await + .map_err(CredentialsError::provider_error)?; + let token = String::from_utf8(token).map_err(|_utf_8_error| { + CredentialsError::unhandled("WebIdentityToken was not valid UTF-8") + })?; + let conf = aws_sdk_sts::Config::builder() + .region(region.clone()) + .build(); + + let operation = aws_sdk_sts::operation::AssumeRoleWithWebIdentity::builder() + .role_arn(role_arn) + .role_session_name(session_name) + .web_identity_token(token) + .build() + .expect("valid operation") + .make_operation(&conf) + .await + .expect("valid operation"); + let resp = client.call(operation).await.map_err(|sdk_error| { + tracing::warn!(error = ?sdk_error, "sts returned an error assuming web identity role"); + CredentialsError::provider_error(sdk_error) + })?; + sts::util::into_credentials(resp.credentials, "WebIdentityToken") +} + +#[cfg(test)] +mod test { + use crate::web_identity_token::{ + Builder, ENV_VAR_ROLE_ARN, ENV_VAR_SESSION_NAME, ENV_VAR_TOKEN_FILE, + }; + + use aws_sdk_sts::Region; + use aws_types::os_shim_internal::{Env, Fs}; + + use crate::provider_config::ProviderConfig; + use crate::test_case::no_traffic_connector; + use aws_types::credentials::CredentialsError; + use std::collections::HashMap; + + #[tokio::test] + async fn unloaded_provider() { + // empty environment + let conf = ProviderConfig::empty() + .with_env(Env::from_slice(&[])) + .with_http_connector(no_traffic_connector()) + .with_region(Some(Region::from_static("us-east-1"))); + + let provider = Builder::default().configure(&conf).build(); + let err = provider + .credentials() + .await + .expect_err("should fail, provider not loaded"); + match err { + CredentialsError::CredentialsNotLoaded { .. } => { /* ok */ } + _ => panic!("incorrect error variant"), + } + } + + #[tokio::test] + async fn missing_env_var() { + let env = Env::from_slice(&[(ENV_VAR_TOKEN_FILE, "/token.jwt")]); + let region = Some(Region::new("us-east-1")); + let provider = Builder::default() + .configure( + &ProviderConfig::empty() + .with_region(region) + .with_env(env) + .with_http_connector(no_traffic_connector()), + ) + .build(); + let err = provider + .credentials() + .await + .expect_err("should fail, provider not loaded"); + assert!( + format!("{}", err).contains("AWS_ROLE_ARN"), + "`{}` did not contain expected string", + err + ); + match err { + CredentialsError::InvalidConfiguration { .. } => { /* ok */ } + _ => panic!("incorrect error variant"), + } + } + + #[tokio::test] + async fn fs_missing_file() { + let env = Env::from_slice(&[ + (ENV_VAR_TOKEN_FILE, "/token.jwt"), + (ENV_VAR_ROLE_ARN, "arn:aws:iam::123456789123:role/test-role"), + (ENV_VAR_SESSION_NAME, "test-session"), + ]); + let fs = Fs::from_raw_map(HashMap::new()); + let provider = Builder::default() + .configure( + &ProviderConfig::empty() + .with_http_connector(no_traffic_connector()) + .with_region(Some(Region::new("us-east-1"))) + .with_env(env) + .with_fs(fs), + ) + .build(); + let err = provider.credentials().await.expect_err("no JWT token"); + match err { + CredentialsError::ProviderError { .. } => { /* ok */ } + _ => panic!("incorrect error variant"), + } + } +} diff --git a/patch/aws-sigv4/.cargo-checksum.json b/patch/aws-sigv4/.cargo-checksum.json new file mode 100644 index 0000000000000..98cfadd1c1d25 --- /dev/null +++ b/patch/aws-sigv4/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"b7e5d4567f421b1f149d91668c7767639fc4e799f57af5a0184389be349d78ce","LICENSE":"09e8a9bcec8067104652c168685ab0931e7868f9c8284b66f5ae6edae5f1130b","README.md":"a923eafae61ba2ac310d12783e63004583012fc73e2bf2e9ad5d99dc6735200a","src/date_time.rs":"d4afa179d55382951f33d31e18f93d8f6d70ec09575081cf4b3ad5aa7ef9989d","src/event_stream.rs":"3fb2a123e1152d2049dc71c3346c240ccd6975ac6bb8efad8f816ea84f61beba","src/http_request/canonical_request.rs":"1cb07c07e4884adcae5e14f74f5d6350be423d77ac5023c4f4b62dcc7c4fd533","src/http_request/mod.rs":"a9549a92f9c7ce75a56ef0564cb5d5209358215f86c5710bf7c32c18243b82d2","src/http_request/query_writer.rs":"277acbd30c70d2366edb0aa52425d0de5f9a5a185f779b298609e3e9dc4ed1df","src/http_request/settings.rs":"ba24ed298a7b7bf67fce8a47d9c01dd82d1bfc5233447d9c888961316b38728e","src/http_request/sign.rs":"93b95e2174e9cd7d58a74aca61d517a5e9842737ee46396db7b10b4e33910e70","src/http_request/test.rs":"52c0ac478d09c5fd232fcbfd03a36156e4310026d55872e0fdb3a241272391f6","src/http_request/url_escape.rs":"fc71739062595ab78a27fb9cc613e3c19d2fd8b0b4ea2ed8e0c47d2a8e12bc8b","src/lib.rs":"d37cb8d1eb05b83ca25d59e86b9072e4075746b054fab98b083641d4d9351ec5","src/sign.rs":"55cfbcbabe4b4b50f0d2d265733995cadf3fbded5b9e357cf1a73aa322907685"},"package":"ea07a5a108ee538793d681d608057218df95c5575f6c0699a1973c27a09334b2"} \ No newline at end of file diff --git a/patch/aws-sigv4/Cargo.toml b/patch/aws-sigv4/Cargo.toml new file mode 100644 index 0000000000000..2e946e02e67bd --- /dev/null +++ b/patch/aws-sigv4/Cargo.toml @@ -0,0 +1,91 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "aws-sigv4" +version = "0.9.0" +authors = ["AWS Rust SDK Team ", "David Barsky "] +exclude = ["aws-sig-v4-test-suite/*"] +description = "SigV4 signer for HTTP requests and Event Stream messages." +license = "Apache-2.0" +repository = "https://github.com/awslabs/smithy-rs" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +targets = ["x86_64-unknown-linux-gnu"] +[dependencies.aws-smithy-eventstream] +version = "0.39.0" +optional = true + +[dependencies.aws-smithy-http] +version = "0.39.0" + +[dependencies.bytes] +version = "1" +optional = true + +[dependencies.form_urlencoded] +version = "1.0" +optional = true + +[dependencies.hex] +version = "0.4" + +[dependencies.http] +version = "0.2" +optional = true + +[dependencies.once_cell] +version = "1.8" + +[dependencies.percent-encoding] +version = "2.1" +optional = true + +[dependencies.regex] +version = "1.5" + +[dependencies.ring] +version = "0.16" +optional = true + +[dependencies.openssl] +version = "0.10.38" +optional = true + +[dependencies.time] +version = "0.3.5" + +[dependencies.tracing] +version = "0.1" + +[dev-dependencies.bytes] +version = "1" + +[dev-dependencies.httparse] +version = "1.5" + +[dev-dependencies.pretty_assertions] +version = "1.0" + +[dev-dependencies.proptest] +version = "1" + +[dev-dependencies.time] +version = "0.3.4" +features = ["parsing"] + +[features] +default = ["sign-http"] +sign-eventstream = ["aws-smithy-eventstream", "bytes"] +sign-http = ["http", "percent-encoding", "form_urlencoded"] diff --git a/patch/aws-sigv4/LICENSE b/patch/aws-sigv4/LICENSE new file mode 100644 index 0000000000000..67db8588217f2 --- /dev/null +++ b/patch/aws-sigv4/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/patch/aws-sigv4/README.md b/patch/aws-sigv4/README.md new file mode 100644 index 0000000000000..6e69eb5e9019f --- /dev/null +++ b/patch/aws-sigv4/README.md @@ -0,0 +1,9 @@ +# aws-sigv4 + +Low-level SigV4 request signing implementations. Customers will not generally need to use this crate directly. If you +need to manually sign requests, [aws-sig-auth](https://crates.io/crates/aws-sig-auth) offers a higher level interface +for signing. + + +This crate is part of the [AWS SDK for Rust](https://awslabs.github.io/aws-sdk-rust/) and the [smithy-rs](https://github.com/awslabs/smithy-rs) code generator. In most cases, it should not be used directly. + diff --git a/patch/aws-sigv4/src/date_time.rs b/patch/aws-sigv4/src/date_time.rs new file mode 100644 index 0000000000000..bf05278ef6618 --- /dev/null +++ b/patch/aws-sigv4/src/date_time.rs @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +// Some of the functions in this file are unused when disabling certain features +#![allow(dead_code)] + +use std::time::SystemTime; +use time::{OffsetDateTime, Time}; + +/// Truncates the subseconds from the given `SystemTime` to zero. +pub(crate) fn truncate_subsecs(time: SystemTime) -> SystemTime { + let date_time = OffsetDateTime::from(time); + let time = date_time.time(); + date_time + .replace_time( + Time::from_hms(time.hour(), time.minute(), time.second()).expect("was already a time"), + ) + .into() +} + +/// Formats a `SystemTime` in `YYYYMMDD` format. +pub(crate) fn format_date(time: SystemTime) -> String { + let time = OffsetDateTime::from(time); + format!( + "{:04}{:02}{:02}", + time.year(), + u8::from(time.month()), + time.day() + ) +} + +/// Formats a `SystemTime` in `YYYYMMDD'T'HHMMSS'Z'` format. +pub(crate) fn format_date_time(time: SystemTime) -> String { + let time = OffsetDateTime::from(time); + format!( + "{:04}{:02}{:02}T{:02}{:02}{:02}Z", + time.year(), + u8::from(time.month()), + time.day(), + time.hour(), + time.minute(), + time.second() + ) +} + +/// Parse functions that are only needed for unit tests. +#[cfg(test)] +pub(crate) mod test_parsers { + use std::{borrow::Cow, error::Error, fmt, time::SystemTime}; + use time::format_description; + use time::{Date, PrimitiveDateTime, Time}; + + const DATE_TIME_FORMAT: &str = "[year][month][day]T[hour][minute][second]Z"; + const DATE_FORMAT: &str = "[year][month][day]"; + + /// Parses `YYYYMMDD'T'HHMMSS'Z'` formatted dates into a `SystemTime`. + pub(crate) fn parse_date_time(date_time_str: &str) -> Result { + let date_time = PrimitiveDateTime::parse( + date_time_str, + &format_description::parse(DATE_TIME_FORMAT).unwrap(), + ) + .map_err(|err| ParseError(err.to_string().into()))? + .assume_utc(); + Ok(date_time.into()) + } + + /// Parses `YYYYMMDD` formatted dates into a `SystemTime`. + pub(crate) fn parse_date(date_str: &str) -> Result { + let date_time = PrimitiveDateTime::new( + Date::parse(date_str, &format_description::parse(DATE_FORMAT).unwrap()) + .map_err(|err| ParseError(err.to_string().into()))?, + Time::from_hms(0, 0, 0).unwrap(), + ) + .assume_utc(); + Ok(date_time.into()) + } + + #[derive(Debug)] + pub(crate) struct ParseError(Cow<'static, str>); + + impl fmt::Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "failed to parse time: {}", self.0) + } + } + + impl Error for ParseError {} +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::date_time::test_parsers::{parse_date, parse_date_time}; + use time::format_description::well_known::Rfc3339; + + #[test] + fn date_format() { + let time: SystemTime = OffsetDateTime::parse("2039-02-04T23:01:09.104Z", &Rfc3339) + .unwrap() + .into(); + assert_eq!("20390204", format_date(time)); + let time: SystemTime = OffsetDateTime::parse("0100-01-02T00:00:00.000Z", &Rfc3339) + .unwrap() + .into(); + assert_eq!("01000102", format_date(time)); + } + + #[test] + fn date_time_format() { + let time: SystemTime = OffsetDateTime::parse("2039-02-04T23:01:09.104Z", &Rfc3339) + .unwrap() + .into(); + assert_eq!("20390204T230109Z", format_date_time(time)); + let time: SystemTime = OffsetDateTime::parse("0100-01-02T00:00:00.000Z", &Rfc3339) + .unwrap() + .into(); + assert_eq!("01000102T000000Z", format_date_time(time)); + } + + #[test] + fn date_time_roundtrip() { + let time = parse_date_time("20150830T123600Z").unwrap(); + assert_eq!("20150830T123600Z", format_date_time(time)); + } + + #[test] + fn date_roundtrip() { + let time = parse_date("20150830").unwrap(); + assert_eq!("20150830", format_date(time)); + } + + #[test] + fn test_truncate_subsecs() { + let time: SystemTime = OffsetDateTime::parse("2039-02-04T23:01:09.104Z", &Rfc3339) + .unwrap() + .into(); + let expected: SystemTime = OffsetDateTime::parse("2039-02-04T23:01:09.000Z", &Rfc3339) + .unwrap() + .into(); + assert_eq!(expected, truncate_subsecs(time)); + } +} diff --git a/patch/aws-sigv4/src/event_stream.rs b/patch/aws-sigv4/src/event_stream.rs new file mode 100644 index 0000000000000..48f1249d16ab3 --- /dev/null +++ b/patch/aws-sigv4/src/event_stream.rs @@ -0,0 +1,222 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Utilities to sign Event Stream messages. +//! +//! # Example: Signing an event stream message +//! +//! ```rust +//! use aws_sigv4::event_stream::{sign_message, SigningParams}; +//! use aws_smithy_eventstream::frame::{Header, HeaderValue, Message}; +//! use std::time::SystemTime; +//! +//! // The `last_signature` argument is the previous message's signature, or +//! // the signature of the initial HTTP request if a message hasn't been signed yet. +//! let last_signature = "example298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; +//! +//! let message_to_sign = Message::new(&b"example"[..]).add_header(Header::new( +//! "some-header", +//! HeaderValue::String("value".into()), +//! )); +//! +//! let params = SigningParams::builder() +//! .access_key("example access key") +//! .secret_key("example secret key") +//! .region("us-east-1") +//! .service_name("exampleservice") +//! .time(SystemTime::now()) +//! .settings(()) +//! .build() +//! .unwrap(); +//! +//! // Use the returned `signature` to sign the next message. +//! let (signed, signature) = +//! sign_message(&message_to_sign, &last_signature, ¶ms).into_parts(); +//! ``` + +use crate::date_time::{format_date, format_date_time, truncate_subsecs}; +use crate::sign::{calculate_signature, generate_signing_key, sha256_hex_string}; +use crate::SigningOutput; +use aws_smithy_eventstream::frame::{write_headers_to, Header, HeaderValue, Message}; +use bytes::Bytes; +use std::io::Write; +use std::time::SystemTime; + +/// Event stream signing parameters +pub type SigningParams<'a> = super::SigningParams<'a, ()>; + +/// Creates a string to sign for an Event Stream message. +fn calculate_string_to_sign( + message_payload: &[u8], + last_signature: &str, + time: SystemTime, + params: &SigningParams<'_>, +) -> Vec { + // Event Stream string to sign format is documented here: + // https://docs.aws.amazon.com/transcribe/latest/dg/how-streaming.html + let date_time_str = format_date_time(time); + let date_str = format_date(time); + + let mut sts: Vec = Vec::new(); + writeln!(sts, "AWS4-HMAC-SHA256-PAYLOAD").unwrap(); + writeln!(sts, "{}", date_time_str).unwrap(); + writeln!( + sts, + "{}/{}/{}/aws4_request", + date_str, params.region, params.service_name + ) + .unwrap(); + writeln!(sts, "{}", last_signature).unwrap(); + + let date_header = Header::new(":date", HeaderValue::Timestamp(time.into())); + let mut date_buffer = Vec::new(); + write_headers_to(&[date_header], &mut date_buffer).unwrap(); + writeln!(sts, "{}", sha256_hex_string(&date_buffer)).unwrap(); + write!(sts, "{}", sha256_hex_string(&message_payload)).unwrap(); + sts +} + +/// Signs an Event Stream message with the given `credentials`. +/// +/// Each message's signature incorporates the signature of the previous message (`last_signature`). +/// The very first message incorporates the signature of the top-level request +/// for both HTTP 2 and WebSocket. +pub fn sign_message<'a>( + message: &'a Message, + last_signature: &'a str, + params: &'a SigningParams<'a>, +) -> SigningOutput { + let message_payload = { + let mut payload = Vec::new(); + message.write_to(&mut payload).unwrap(); + payload + }; + sign_payload(Some(message_payload), last_signature, params) +} + +/// Returns a signed empty message +/// +/// Empty signed event stream messages differ from normal signed event stream +/// in that the payload is 0-bytes rather than a nested message. There is no way +/// to create a signed empty message using [`sign_message`]. +pub fn sign_empty_message<'a>( + last_signature: &'a str, + params: &'a SigningParams<'a>, +) -> SigningOutput { + sign_payload(None, last_signature, params) +} + +fn sign_payload<'a>( + message_payload: Option>, + last_signature: &'a str, + params: &'a SigningParams<'a>, +) -> SigningOutput { + // Truncate the sub-seconds up front since the timestamp written to the signed message header + // needs to exactly match the string formatted timestamp, which doesn't include sub-seconds. + let time = truncate_subsecs(params.time); + + let signing_key = + generate_signing_key(params.secret_key, time, params.region, params.service_name); + let string_to_sign = calculate_string_to_sign( + message_payload.as_ref().map(|v| &v[..]).unwrap_or(&[]), + last_signature, + time, + params, + ); + let signature = calculate_signature(signing_key, &string_to_sign); + + // Generate the signed wrapper event frame + SigningOutput::new( + Message::new(message_payload.map(Bytes::from).unwrap_or_else(Bytes::new)) + .add_header(Header::new( + ":chunk-signature", + HeaderValue::ByteArray(hex::decode(&signature).unwrap().into()), + )) + .add_header(Header::new(":date", HeaderValue::Timestamp(time.into()))), + signature, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::{Duration, UNIX_EPOCH}; + + #[test] + fn string_to_sign() { + let message_to_sign = Message::new(&b"test payload"[..]).add_header(Header::new( + "some-header", + HeaderValue::String("value".into()), + )); + let mut message_payload = Vec::new(); + message_to_sign.write_to(&mut message_payload).unwrap(); + + let params = SigningParams { + access_key: "fake access key", + secret_key: "fake secret key", + security_token: None, + region: "us-east-1", + service_name: "testservice", + time: (UNIX_EPOCH + Duration::new(123_456_789_u64, 1234u32)).into(), + settings: (), + }; + + let expected = "\ + AWS4-HMAC-SHA256-PAYLOAD\n\ + 19731129T213309Z\n\ + 19731129/us-east-1/testservice/aws4_request\n\ + be1f8c7d79ef8e1abc5254a2c70e4da3bfaf4f07328f527444e1fc6ea67273e2\n\ + 0c0e3b3bf66b59b976181bd7d401927bbd624107303c713fd1e5f3d3c8dd1b1e\n\ + f2eba0f2e95967ee9fbc6db5e678d2fd599229c0d04b11e4fc8e0f2a02a806c6\ + "; + + let last_signature = sha256_hex_string(b"last message sts"); + assert_eq!( + expected, + std::str::from_utf8(&calculate_string_to_sign( + &message_payload, + &last_signature, + params.time, + ¶ms + )) + .unwrap() + ); + } + + #[test] + fn sign() { + let message_to_sign = Message::new(&b"test payload"[..]).add_header(Header::new( + "some-header", + HeaderValue::String("value".into()), + )); + let params = SigningParams { + access_key: "fake access key", + secret_key: "fake secret key", + security_token: None, + region: "us-east-1", + service_name: "testservice", + time: (UNIX_EPOCH + Duration::new(123_456_789_u64, 1234u32)).into(), + settings: (), + }; + + let last_signature = sha256_hex_string(b"last message sts"); + let (signed, signature) = + sign_message(&message_to_sign, &last_signature, ¶ms).into_parts(); + assert_eq!(":chunk-signature", signed.headers()[0].name().as_str()); + if let HeaderValue::ByteArray(bytes) = signed.headers()[0].value() { + assert_eq!(signature, hex::encode(bytes)); + } else { + panic!("expected byte array for :chunk-signature header"); + } + assert_eq!(":date", signed.headers()[1].name().as_str()); + if let HeaderValue::Timestamp(value) = signed.headers()[1].value() { + assert_eq!(123_456_789_i64, value.secs()); + // The subseconds should have been truncated off + assert_eq!(0, value.subsec_nanos()); + } else { + panic!("expected timestamp for :date header"); + } + } +} diff --git a/patch/aws-sigv4/src/http_request/canonical_request.rs b/patch/aws-sigv4/src/http_request/canonical_request.rs new file mode 100644 index 0000000000000..1d647aaeb1a44 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/canonical_request.rs @@ -0,0 +1,744 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use super::query_writer::QueryWriter; +use super::{Error, PayloadChecksumKind, SignableBody, SignatureLocation, SigningParams}; +use crate::date_time::{format_date, format_date_time}; +use crate::http_request::sign::SignableRequest; +use crate::http_request::url_escape::percent_encode_path; +use crate::http_request::PercentEncodingMode; +use crate::sign::sha256_hex_string; +use http::header::{HeaderName, HOST, USER_AGENT}; +use http::{HeaderMap, HeaderValue, Method, Uri}; +use std::borrow::Cow; +use std::cmp::Ordering; +use std::convert::TryFrom; +use std::fmt; +use std::fmt::Formatter; +use std::str::FromStr; +use std::time::SystemTime; + +pub(crate) mod header { + pub(crate) const X_AMZ_CONTENT_SHA_256: &str = "x-amz-content-sha256"; + pub(crate) const X_AMZ_DATE: &str = "x-amz-date"; + pub(crate) const X_AMZ_SECURITY_TOKEN: &str = "x-amz-security-token"; + pub(crate) const X_AMZ_USER_AGENT: &str = "x-amz-user-agent"; +} + +pub(crate) mod param { + pub(crate) const X_AMZ_ALGORITHM: &str = "X-Amz-Algorithm"; + pub(crate) const X_AMZ_CREDENTIAL: &str = "X-Amz-Credential"; + pub(crate) const X_AMZ_DATE: &str = "X-Amz-Date"; + pub(crate) const X_AMZ_EXPIRES: &str = "X-Amz-Expires"; + pub(crate) const X_AMZ_SECURITY_TOKEN: &str = "X-Amz-Security-Token"; + pub(crate) const X_AMZ_SIGNED_HEADERS: &str = "X-Amz-SignedHeaders"; + pub(crate) const X_AMZ_SIGNATURE: &str = "X-Amz-Signature"; +} + +pub(crate) const HMAC_256: &str = "AWS4-HMAC-SHA256"; + +const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD"; + +#[derive(Debug, PartialEq)] +pub(super) struct HeaderValues<'a> { + pub(super) content_sha256: Cow<'a, str>, + pub(super) date_time: String, + pub(super) security_token: Option<&'a str>, + pub(super) signed_headers: SignedHeaders, +} + +#[derive(Debug, PartialEq)] +pub(super) struct QueryParamValues<'a> { + pub(super) algorithm: &'static str, + pub(super) content_sha256: Cow<'a, str>, + pub(super) credential: String, + pub(super) date_time: String, + pub(super) expires: String, + pub(super) security_token: Option<&'a str>, + pub(super) signed_headers: SignedHeaders, +} + +#[derive(Debug, PartialEq)] +pub(super) enum SignatureValues<'a> { + Headers(HeaderValues<'a>), + QueryParams(QueryParamValues<'a>), +} + +impl<'a> SignatureValues<'a> { + pub(super) fn signed_headers(&self) -> &SignedHeaders { + match self { + SignatureValues::Headers(values) => &values.signed_headers, + SignatureValues::QueryParams(values) => &values.signed_headers, + } + } + + fn content_sha256(&self) -> &str { + match self { + SignatureValues::Headers(values) => &values.content_sha256, + SignatureValues::QueryParams(values) => &values.content_sha256, + } + } + + pub(super) fn as_headers(&self) -> Option<&HeaderValues<'_>> { + match self { + SignatureValues::Headers(values) => Some(values), + _ => None, + } + } + + pub(super) fn into_query_params(self) -> Result, Self> { + match self { + SignatureValues::QueryParams(values) => Ok(values), + _ => Err(self), + } + } +} + +#[derive(Debug, PartialEq)] +pub(super) struct CanonicalRequest<'a> { + pub(super) method: &'a Method, + pub(super) path: Cow<'a, str>, + pub(super) params: Option, + pub(super) headers: HeaderMap, + pub(super) values: SignatureValues<'a>, +} + +impl<'a> CanonicalRequest<'a> { + /// Construct a CanonicalRequest from a [`SignableRequest`] and [`SigningParams`]. + /// + /// The returned canonical request includes information required for signing as well + /// as query parameters or header values that go along with the signature in a request. + /// + /// ## Behavior + /// + /// There are several settings which alter signing behavior: + /// - If a `security_token` is provided as part of the credentials it will be included in the signed headers + /// - If `settings.percent_encoding_mode` specifies double encoding, `%` in the URL will be re-encoded as `%25` + /// - If `settings.payload_checksum_kind` is XAmzSha256, add a x-amz-content-sha256 with the body + /// checksum. This is the same checksum used as the "payload_hash" in the canonical request + /// - `settings.signature_location` determines where the signature will be placed in a request, + /// and also alters the kinds of signing values that go along with it in the request. + pub(super) fn from<'b>( + req: &'b SignableRequest<'b>, + params: &'b SigningParams<'b>, + ) -> Result, Error> { + // Path encoding: if specified, re-encode % as %25 + // Set method and path into CanonicalRequest + let path = req.uri().path(); + let path = match params.settings.percent_encoding_mode { + // The string is already URI encoded, we don't need to encode everything again, just `%` + PercentEncodingMode::Double => Cow::Owned(percent_encode_path(path)), + PercentEncodingMode::Single => Cow::Borrowed(path), + }; + let payload_hash = Self::payload_hash(req.body()); + + let date_time = format_date_time(params.time); + let (signed_headers, canonical_headers) = + Self::headers(req, params, &payload_hash, &date_time)?; + let signed_headers = SignedHeaders::new(signed_headers); + let values = match params.settings.signature_location { + SignatureLocation::Headers => SignatureValues::Headers(HeaderValues { + content_sha256: payload_hash, + date_time, + security_token: params.security_token, + signed_headers, + }), + SignatureLocation::QueryParams => SignatureValues::QueryParams(QueryParamValues { + algorithm: "AWS4-HMAC-SHA256", + content_sha256: payload_hash, + credential: format!( + "{}/{}/{}/{}/aws4_request", + params.access_key, + format_date(params.time), + params.region, + params.service_name, + ), + date_time, + expires: params + .settings + .expires_in + .expect("presigning requires expires_in") + .as_secs() + .to_string(), + security_token: params.security_token, + signed_headers, + }), + }; + let creq = CanonicalRequest { + method: req.method(), + path, + params: Self::params(req.uri(), &values), + headers: canonical_headers, + values, + }; + Ok(creq) + } + + fn headers( + req: &SignableRequest<'_>, + params: &SigningParams<'_>, + payload_hash: &str, + date_time: &str, + ) -> Result<(Vec, HeaderMap), Error> { + // Header computation: + // The canonical request will include headers not present in the input. We need to clone and + // normalize the headers from the original request and add: + // - host + // - x-amz-date + // - x-amz-security-token (if provided) + // - x-amz-content-sha256 (if requested by signing settings) + let mut canonical_headers = HeaderMap::with_capacity(req.headers().len()); + for (name, value) in req.headers().iter() { + // Header names and values need to be normalized according to Step 4 of https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + // Using append instead of insert means this will not clobber headers that have the same lowercased name + canonical_headers.append( + HeaderName::from_str(&name.as_str().to_lowercase())?, + normalize_header_value(value), + ); + } + + Self::insert_host_header(&mut canonical_headers, req.uri()); + + if params.settings.signature_location == SignatureLocation::Headers { + Self::insert_date_header(&mut canonical_headers, date_time); + + if let Some(security_token) = params.security_token { + let mut sec_header = HeaderValue::from_str(security_token)?; + sec_header.set_sensitive(true); + canonical_headers.insert(header::X_AMZ_SECURITY_TOKEN, sec_header); + } + + if params.settings.payload_checksum_kind == PayloadChecksumKind::XAmzSha256 { + let header = HeaderValue::from_str(payload_hash)?; + canonical_headers.insert(header::X_AMZ_CONTENT_SHA_256, header); + } + } + + let mut signed_headers = Vec::with_capacity(canonical_headers.len()); + for (name, _) in &canonical_headers { + // The user agent header should not be signed because it may be altered by proxies + if name == USER_AGENT { + continue; + } + if params.settings.signature_location == SignatureLocation::QueryParams { + // The X-Amz-User-Agent header should not be signed if this is for a presigned URL + if name == HeaderName::from_static(header::X_AMZ_USER_AGENT) { + continue; + } + } + signed_headers.push(CanonicalHeaderName(name.clone())); + } + Ok((signed_headers, canonical_headers)) + } + + fn payload_hash<'b>(body: &'b SignableBody<'b>) -> Cow<'b, str> { + // Payload hash computation + // + // Based on the input body, set the payload_hash of the canonical request: + // Either: + // - compute a hash + // - use the precomputed hash + // - use `UnsignedPayload` + match body { + SignableBody::Bytes(data) => Cow::Owned(sha256_hex_string(data)), + SignableBody::Precomputed(digest) => Cow::Borrowed(digest.as_str()), + SignableBody::UnsignedPayload => Cow::Borrowed(UNSIGNED_PAYLOAD), + } + } + + fn params(uri: &Uri, values: &SignatureValues<'_>) -> Option { + let mut params: Vec<(Cow<'_, str>, Cow<'_, str>)> = + form_urlencoded::parse(uri.query().unwrap_or_default().as_bytes()).collect(); + fn add_param<'a>(params: &mut Vec<(Cow<'a, str>, Cow<'a, str>)>, k: &'a str, v: &'a str) { + params.push((Cow::Borrowed(k), Cow::Borrowed(v))); + } + + if let SignatureValues::QueryParams(values) = values { + add_param(&mut params, param::X_AMZ_DATE, &values.date_time); + add_param(&mut params, param::X_AMZ_EXPIRES, &values.expires); + add_param(&mut params, param::X_AMZ_ALGORITHM, values.algorithm); + add_param(&mut params, param::X_AMZ_CREDENTIAL, &values.credential); + add_param( + &mut params, + param::X_AMZ_SIGNED_HEADERS, + values.signed_headers.as_str(), + ); + if let Some(security_token) = values.security_token { + add_param(&mut params, param::X_AMZ_SECURITY_TOKEN, security_token); + } + } + // Sort by param name, and then by param value + params.sort(); + + let mut query = QueryWriter::new(uri); + query.clear_params(); + for (key, value) in params { + query.insert(&key, &value); + } + + let query = query.build_query(); + if query.is_empty() { + None + } else { + Some(query) + } + } + + fn insert_host_header( + canonical_headers: &mut HeaderMap, + uri: &Uri, + ) -> HeaderValue { + match canonical_headers.get(&HOST) { + Some(header) => header.clone(), + None => { + let authority = uri + .authority() + .expect("request uri authority must be set for signing"); + let header = HeaderValue::try_from(authority.as_str()) + .expect("endpoint must contain valid header characters"); + canonical_headers.insert(HOST, header.clone()); + header + } + } + } + + fn insert_date_header( + canonical_headers: &mut HeaderMap, + date_time: &str, + ) -> HeaderValue { + let x_amz_date = HeaderName::from_static(header::X_AMZ_DATE); + let date_header = HeaderValue::try_from(date_time).expect("date is valid header value"); + canonical_headers.insert(x_amz_date, date_header.clone()); + date_header + } +} + +impl<'a> fmt::Display for CanonicalRequest<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "{}", self.method)?; + writeln!(f, "{}", self.path)?; + writeln!(f, "{}", self.params.as_deref().unwrap_or(""))?; + // write out _all_ the headers + for header in &self.values.signed_headers().headers { + // a missing header is a bug, so we should panic. + let value = &self.headers[&header.0]; + write!(f, "{}:", header.0.as_str())?; + writeln!( + f, + "{}", + std::str::from_utf8(value.as_bytes()) + .expect("SDK request header values are valid UTF-8") + )?; + } + writeln!(f)?; + // write out the signed headers + write!(f, "{}", self.values.signed_headers().as_str())?; + writeln!(f)?; + write!(f, "{}", self.values.content_sha256())?; + Ok(()) + } +} + +/// A regex for matching on 2 or more spaces that acts on bytes. +static MULTIPLE_SPACES: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(|| regex::bytes::Regex::new(r" {2,}").unwrap()); + +/// Removes excess spaces before and after a given byte string, and converts multiple sequential +/// spaces to a single space e.g. " Some example text " -> "Some example text". +/// +/// This function ONLY affects spaces and not other kinds of whitespace. +fn trim_all(text: &[u8]) -> Cow<'_, [u8]> { + // The normal trim function will trim non-breaking spaces and other various whitespace chars. + // S3 ONLY trims spaces so we use trim_matches to trim spaces only + let text = trim_spaces_from_byte_string(text); + MULTIPLE_SPACES.replace_all(text, " ".as_bytes()) +} + +/// Removes excess spaces before and after a given byte string by returning a subset of those bytes. +/// Will return an empty slice if a string is composed entirely of whitespace. +fn trim_spaces_from_byte_string(bytes: &[u8]) -> &[u8] { + let starting_index = bytes.iter().position(|b| *b != b' ').unwrap_or(0); + let ending_offset = bytes.iter().rev().position(|b| *b != b' ').unwrap_or(0); + let ending_index = bytes.len() - ending_offset; + &bytes[starting_index..ending_index] +} + +/// Works just like [trim_all] but acts on HeaderValues instead of bytes +fn normalize_header_value(header_value: &HeaderValue) -> HeaderValue { + let trimmed_value = trim_all(header_value.as_bytes()); + // This can't fail because we started with a valid HeaderValue and then only trimmed spaces + HeaderValue::from_bytes(&trimmed_value).unwrap() +} + +#[derive(Debug, PartialEq, Default)] +pub(super) struct SignedHeaders { + headers: Vec, + formatted: String, +} + +impl SignedHeaders { + fn new(mut headers: Vec) -> Self { + headers.sort(); + let formatted = Self::fmt(&headers); + SignedHeaders { headers, formatted } + } + + fn fmt(headers: &[CanonicalHeaderName]) -> String { + let mut value = String::new(); + let mut iter = headers.iter().peekable(); + while let Some(next) = iter.next() { + value += next.0.as_str(); + if iter.peek().is_some() { + value.push(';'); + } + } + value + } + + pub(super) fn as_str(&self) -> &str { + &self.formatted + } +} + +impl fmt::Display for SignedHeaders { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.formatted) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +struct CanonicalHeaderName(HeaderName); + +impl PartialOrd for CanonicalHeaderName { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CanonicalHeaderName { + fn cmp(&self, other: &Self) -> Ordering { + self.0.as_str().cmp(other.0.as_str()) + } +} + +#[derive(PartialEq, Debug, Clone)] +pub(super) struct SigningScope<'a> { + pub(super) time: SystemTime, + pub(super) region: &'a str, + pub(super) service: &'a str, +} + +impl<'a> fmt::Display for SigningScope<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}/{}/{}/aws4_request", + format_date(self.time), + self.region, + self.service + ) + } +} + +#[derive(PartialEq, Debug)] +pub(super) struct StringToSign<'a> { + pub(super) scope: SigningScope<'a>, + pub(super) time: SystemTime, + pub(super) region: &'a str, + pub(super) service: &'a str, + pub(super) hashed_creq: &'a str, +} + +impl<'a> StringToSign<'a> { + pub(crate) fn new( + time: SystemTime, + region: &'a str, + service: &'a str, + hashed_creq: &'a str, + ) -> Self { + let scope = SigningScope { + time, + region, + service, + }; + Self { + scope, + time, + region, + service, + hashed_creq, + } + } +} + +impl<'a> fmt::Display for StringToSign<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "{}\n{}\n{}\n{}", + HMAC_256, + format_date_time(self.time), + self.scope.to_string(), + self.hashed_creq + ) + } +} + +#[cfg(test)] +mod tests { + use crate::date_time::test_parsers::parse_date_time; + use crate::http_request::canonical_request::{ + normalize_header_value, trim_all, CanonicalRequest, SigningScope, StringToSign, + }; + use crate::http_request::query_writer::QueryWriter; + use crate::http_request::test::{test_canonical_request, test_request, test_sts}; + use crate::http_request::{ + PayloadChecksumKind, SignableBody, SignableRequest, SigningSettings, + }; + use crate::http_request::{SignatureLocation, SigningParams}; + use crate::sign::sha256_hex_string; + use http::HeaderValue; + use http::Uri; + use pretty_assertions::assert_eq; + use proptest::proptest; + use std::time::Duration; + + fn signing_params(settings: SigningSettings) -> SigningParams<'static> { + SigningParams { + access_key: "test-access-key", + secret_key: "test-secret-key", + security_token: None, + region: "test-region", + service_name: "testservicename", + time: parse_date_time("20210511T154045Z").unwrap(), + settings, + } + } + + #[test] + fn test_set_xamz_sha_256() { + let req = test_request("get-vanilla-query-order-key-case"); + let req = SignableRequest::from(&req); + let settings = SigningSettings { + payload_checksum_kind: PayloadChecksumKind::XAmzSha256, + ..Default::default() + }; + let mut signing_params = signing_params(settings); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + assert_eq!( + creq.values.content_sha256(), + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ); + // assert that the sha256 header was added + assert_eq!( + creq.values.signed_headers().as_str(), + "host;x-amz-content-sha256;x-amz-date" + ); + + signing_params.settings.payload_checksum_kind = PayloadChecksumKind::NoHeader; + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + assert_eq!(creq.values.signed_headers().as_str(), "host;x-amz-date"); + } + + #[test] + fn test_unsigned_payload() { + let req = test_request("get-vanilla-query-order-key-case"); + let req = SignableRequest::new( + req.method(), + req.uri(), + req.headers(), + SignableBody::UnsignedPayload, + ); + let settings = SigningSettings { + payload_checksum_kind: PayloadChecksumKind::XAmzSha256, + ..Default::default() + }; + let signing_params = signing_params(settings); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + assert_eq!(creq.values.content_sha256(), "UNSIGNED-PAYLOAD"); + assert!(creq.to_string().ends_with("UNSIGNED-PAYLOAD")); + } + + #[test] + fn test_precomputed_payload() { + let payload_hash = "44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072"; + let req = test_request("get-vanilla-query-order-key-case"); + let req = SignableRequest::new( + req.method(), + req.uri(), + req.headers(), + SignableBody::Precomputed(String::from(payload_hash)), + ); + let settings = SigningSettings { + payload_checksum_kind: PayloadChecksumKind::XAmzSha256, + ..Default::default() + }; + let signing_params = signing_params(settings); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + assert_eq!(creq.values.content_sha256(), payload_hash); + assert!(creq.to_string().ends_with(payload_hash)); + } + + #[test] + fn test_generate_scope() { + let expected = "20150830/us-east-1/iam/aws4_request\n"; + let scope = SigningScope { + time: parse_date_time("20150830T123600Z").unwrap(), + region: "us-east-1", + service: "iam", + }; + assert_eq!(format!("{}\n", scope.to_string()), expected); + } + + #[test] + fn test_string_to_sign() { + let time = parse_date_time("20150830T123600Z").unwrap(); + let creq = test_canonical_request("get-vanilla-query-order-key-case"); + let expected_sts = test_sts("get-vanilla-query-order-key-case"); + let encoded = sha256_hex_string(creq.as_bytes()); + + let actual = StringToSign::new(time, "us-east-1", "service", &encoded); + assert_eq!(expected_sts, actual.to_string()); + } + + #[test] + fn test_digest_of_canonical_request() { + let creq = test_canonical_request("get-vanilla-query-order-key-case"); + let expected = "816cd5b414d056048ba4f7c5386d6e0533120fb1fcfa93762cf0fc39e2cf19e0"; + let actual = sha256_hex_string(creq.as_bytes()); + assert_eq!(expected, actual); + } + + #[test] + fn test_double_url_encode_path() { + let req = test_request("double-encode-path"); + let req = SignableRequest::from(&req); + let signing_params = signing_params(SigningSettings::default()); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + + let expected = test_canonical_request("double-encode-path"); + let actual = format!("{}", creq); + assert_eq!(actual, expected); + } + + #[test] + fn test_double_url_encode() { + let req = test_request("double-url-encode"); + let req = SignableRequest::from(&req); + let signing_params = signing_params(SigningSettings::default()); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + + let expected = test_canonical_request("double-url-encode"); + let actual = format!("{}", creq); + assert_eq!(actual, expected); + } + + #[test] + fn test_tilde_in_uri() { + let req = http::Request::builder() + .uri("https://s3.us-east-1.amazonaws.com/my-bucket?list-type=2&prefix=~objprefix&single&k=&unreserved=-_.~").body("").unwrap(); + let req = SignableRequest::from(&req); + let signing_params = signing_params(SigningSettings::default()); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + assert_eq!( + Some("k=&list-type=2&prefix=~objprefix&single=&unreserved=-_.~"), + creq.params.as_deref(), + ); + } + + #[test] + fn test_signing_urls_with_percent_encoded_query_strings() { + let all_printable_ascii_chars: String = (32u8..127).map(char::from).collect(); + let uri = Uri::from_static("https://s3.us-east-1.amazonaws.com/my-bucket"); + + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("list-type", "2"); + query_writer.insert("prefix", &all_printable_ascii_chars); + + let req = http::Request::builder() + .uri(query_writer.build_uri()) + .body("") + .unwrap(); + let req = SignableRequest::from(&req); + let signing_params = signing_params(SigningSettings::default()); + let creq = CanonicalRequest::from(&req, &signing_params).unwrap(); + + let expected = "list-type=2&prefix=%20%21%22%23%24%25%26%27%28%29%2A%2B%2C-.%2F0123456789%3A%3B%3C%3D%3E%3F%40ABCDEFGHIJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~"; + let actual = creq.params.unwrap(); + assert_eq!(expected, actual); + } + + // It should exclude user-agent and x-amz-user-agent headers from presigning + #[test] + fn presigning_header_exclusion() { + let request = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .header("content-type", "application/xml") + .header("content-length", "0") + .header("user-agent", "test-user-agent") + .header("x-amz-user-agent", "test-user-agent") + .body("") + .unwrap(); + let request = SignableRequest::from(&request); + + let settings = SigningSettings { + signature_location: SignatureLocation::QueryParams, + expires_in: Some(Duration::from_secs(30)), + ..Default::default() + }; + + let signing_params = signing_params(settings); + let canonical = CanonicalRequest::from(&request, &signing_params).unwrap(); + + let values = canonical.values.into_query_params().unwrap(); + assert_eq!( + "content-length;content-type;host", + values.signed_headers.as_str() + ); + } + + #[test] + fn test_trim_all_handles_spaces_correctly() { + // Can't compare a byte array to a Cow so we convert both to slices before comparing + let expected = &b"Some example text"[..]; + let actual = &trim_all(b" Some example text ")[..]; + + assert_eq!(expected, actual); + } + + #[test] + fn test_trim_all_ignores_other_forms_of_whitespace() { + // Can't compare a byte array to a Cow so we convert both to slices before comparing + let expected = &b"\t\xA0Some\xA0 example \xA0text\xA0\n"[..]; + // \xA0 is a non-breaking space character + let actual = &trim_all(b"\t\xA0Some\xA0 example \xA0text\xA0\n")[..]; + + assert_eq!(expected, actual); + } + + #[test] + fn trim_spaces_works_on_single_characters() { + assert_eq!(trim_all(b"2").as_ref(), b"2"); + } + + proptest! { + #[test] + fn test_trim_all_doesnt_elongate_strings(s in ".*") { + assert!(trim_all(s.as_bytes()).len() <= s.len()) + } + + #[test] + fn test_normalize_header_value_doesnt_panic(v in (".*")) { + if let Ok(header_value) = HeaderValue::from_maybe_shared(v) { + let _ = normalize_header_value(&header_value); + } + } + + #[test] + fn test_trim_all_does_nothing_when_there_are_no_spaces(s in "[^ ]*") { + assert_eq!(trim_all(s.as_bytes()).as_ref(), s.as_bytes()); + } + } +} diff --git a/patch/aws-sigv4/src/http_request/mod.rs b/patch/aws-sigv4/src/http_request/mod.rs new file mode 100644 index 0000000000000..0b80e71137296 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/mod.rs @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Utilities to sign HTTP requests. +//! +//! # Example: Signing an HTTP request +//! +//! ```rust +//! # fn test() -> Result<(), aws_sigv4::http_request::Error> { +//! use aws_sigv4::http_request::{sign, SigningSettings, SigningParams, SignableRequest}; +//! use http; +//! use std::time::SystemTime; +//! +//! // Create the request to sign +//! let mut request = http::Request::builder() +//! .uri("https://some-endpoint.some-region.amazonaws.com") +//! .body("") +//! .unwrap(); +//! +//! // Set up information and settings for the signing +//! let signing_settings = SigningSettings::default(); +//! let signing_params = SigningParams::builder() +//! .access_key("example access key") +//! .secret_key("example secret key") +//! .region("us-east-1") +//! .service_name("exampleservice") +//! .time(SystemTime::now()) +//! .settings(signing_settings) +//! .build() +//! .unwrap(); +//! // Convert the HTTP request into a signable request +//! let signable_request = SignableRequest::from(&request); +//! +//! // Sign and then apply the signature to the request +//! let (signing_instructions, _signature) = sign(signable_request, &signing_params)?.into_parts(); +//! signing_instructions.apply_to_request(&mut request); +//! # Ok(()) +//! # } +//! ``` +//! + +mod canonical_request; +mod query_writer; +mod settings; +mod sign; +mod url_escape; + +#[cfg(test)] +pub(crate) mod test; + +pub use settings::{ + PayloadChecksumKind, PercentEncodingMode, SignatureLocation, SigningParams, SigningSettings, +}; +pub use sign::{sign, Error, SignableBody, SignableRequest}; diff --git a/patch/aws-sigv4/src/http_request/query_writer.rs b/patch/aws-sigv4/src/http_request/query_writer.rs new file mode 100644 index 0000000000000..d67891c44cb57 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/query_writer.rs @@ -0,0 +1,165 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use crate::http_request::url_escape::percent_encode_query; +use http::Uri; + +/// Utility for updating the query string in a [`Uri`]. +pub(super) struct QueryWriter { + base_uri: Uri, + new_path_and_query: String, + prefix: Option, +} + +impl QueryWriter { + /// Creates a new `QueryWriter` based off the given `uri`. + pub(super) fn new(uri: &Uri) -> Self { + let new_path_and_query = uri + .path_and_query() + .map(|pq| pq.to_string()) + .unwrap_or_default(); + let prefix = if uri.query().is_none() { + Some('?') + } else if !uri.query().unwrap_or_default().is_empty() { + Some('&') + } else { + None + }; + QueryWriter { + base_uri: uri.clone(), + new_path_and_query, + prefix, + } + } + + /// Clears all query parameters. + pub(super) fn clear_params(&mut self) { + if let Some(index) = self.new_path_and_query.find('?') { + self.new_path_and_query.truncate(index); + self.prefix = Some('?'); + } + } + + /// Inserts a new query parameter. The key and value are percent encoded + /// by `QueryWriter`. Passing in percent encoded values will result in double encoding. + pub(super) fn insert(&mut self, k: &str, v: &str) { + if let Some(prefix) = self.prefix { + self.new_path_and_query.push(prefix); + } + self.prefix = Some('&'); + self.new_path_and_query.push_str(&percent_encode_query(k)); + self.new_path_and_query.push('='); + + self.new_path_and_query.push_str(&percent_encode_query(v)); + } + + /// Returns just the built query string. + pub(super) fn build_query(self) -> String { + self.build_uri().query().unwrap_or_default().to_string() + } + + /// Returns a full [`Uri`] with the query string updated. + pub(super) fn build_uri(self) -> Uri { + let mut parts = self.base_uri.into_parts(); + parts.path_and_query = Some( + self.new_path_and_query + .parse() + .expect("adding query should not invalidate URI"), + ); + Uri::from_parts(parts).expect("a valid URL in should always produce a valid URL out") + } +} + +#[cfg(test)] +mod test { + use super::QueryWriter; + use http::Uri; + + #[test] + fn empty_uri() { + let uri = Uri::from_static("http://www.example.com"); + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("key", "val%ue"); + query_writer.insert("another", "value"); + assert_eq!( + query_writer.build_uri(), + Uri::from_static("http://www.example.com?key=val%25ue&another=value") + ); + } + + #[test] + fn uri_with_path() { + let uri = Uri::from_static("http://www.example.com/path"); + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("key", "val%ue"); + query_writer.insert("another", "value"); + assert_eq!( + query_writer.build_uri(), + Uri::from_static("http://www.example.com/path?key=val%25ue&another=value") + ); + } + + #[test] + fn uri_with_path_and_query() { + let uri = Uri::from_static("http://www.example.com/path?original=here"); + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("key", "val%ue"); + query_writer.insert("another", "value"); + assert_eq!( + query_writer.build_uri(), + Uri::from_static( + "http://www.example.com/path?original=here&key=val%25ue&another=value" + ) + ); + } + + #[test] + fn build_query() { + let uri = Uri::from_static("http://www.example.com"); + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("key", "val%ue"); + query_writer.insert("ano%ther", "value"); + assert_eq!("key=val%25ue&ano%25ther=value", query_writer.build_query()); + } + + #[test] + // This test ensures that the percent encoding applied to queries always produces a valid URI if + // the starting URI is valid + fn doesnt_panic_when_adding_query_to_valid_uri() { + let uri = Uri::from_static("http://www.example.com"); + + let mut problematic_chars = Vec::new(); + + for byte in u8::MIN..=u8::MAX { + match std::str::from_utf8(&[byte]) { + // If we can't make a str from the byte then we certainly can't make a URL from it + Err(_) => { + continue; + } + Ok(value) => { + let mut query_writer = QueryWriter::new(&uri); + query_writer.insert("key", value); + + if let Err(_) = std::panic::catch_unwind(|| query_writer.build_uri()) { + problematic_chars.push(char::from(byte)); + }; + } + } + } + + if !problematic_chars.is_empty() { + panic!("we got some bad bytes here: {:#?}", problematic_chars) + } + } + + #[test] + fn clear_params() { + let uri = Uri::from_static("http://www.example.com/path?original=here&foo=1"); + let mut query_writer = QueryWriter::new(&uri); + query_writer.clear_params(); + query_writer.insert("new", "value"); + assert_eq!("new=value", query_writer.build_query()); + } +} diff --git a/patch/aws-sigv4/src/http_request/settings.rs b/patch/aws-sigv4/src/http_request/settings.rs new file mode 100644 index 0000000000000..b353326288145 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/settings.rs @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use std::time::Duration; + +/// HTTP signing parameters +pub type SigningParams<'a> = crate::SigningParams<'a, SigningSettings>; + +/// HTTP-specific signing settings +#[derive(Debug, PartialEq)] +#[non_exhaustive] +pub struct SigningSettings { + /// Specifies how to encode the request URL when signing. Some services do not decode + /// the path prior to checking the signature, requiring clients to actually _double-encode_ + /// the URI in creating the canonical request in order to pass a signature check. + pub percent_encoding_mode: PercentEncodingMode, + + /// Add an additional checksum header + pub payload_checksum_kind: PayloadChecksumKind, + + /// Where to put the signature + pub signature_location: SignatureLocation, + + /// For presigned requests, how long the presigned request is valid for + pub expires_in: Option, +} + +/// HTTP payload checksum type +#[non_exhaustive] +#[derive(Debug, Eq, PartialEq)] +pub enum PayloadChecksumKind { + /// Add x-amz-checksum-sha256 to the canonical request + /// + /// This setting is required for S3 + XAmzSha256, + + /// Do not add an additional header when creating the canonical request + /// + /// This is "normal mode" and will work for services other than S3 + NoHeader, +} + +/// Config value to specify how to encode the request URL when signing. +/// +/// We assume the URI will be encoded _once_ prior to transmission. Some services +/// do not decode the path prior to checking the signature, requiring clients to actually +/// _double-encode_ the URI in creating the canonical request in order to pass a signature check. +#[non_exhaustive] +#[derive(Debug, Eq, PartialEq)] +pub enum PercentEncodingMode { + /// Re-encode the resulting URL (e.g. %30 becomes `%2530) + Double, + + /// Take the resulting URL as-is + Single, +} + +impl Default for SigningSettings { + fn default() -> Self { + Self { + percent_encoding_mode: PercentEncodingMode::Double, + payload_checksum_kind: PayloadChecksumKind::NoHeader, + signature_location: SignatureLocation::Headers, + expires_in: None, + } + } +} + +/// Where to place signing values in the HTTP request +#[non_exhaustive] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum SignatureLocation { + /// Place the signature in the request headers + Headers, + /// Place the signature in the request query parameters + QueryParams, +} diff --git a/patch/aws-sigv4/src/http_request/sign.rs b/patch/aws-sigv4/src/http_request/sign.rs new file mode 100644 index 0000000000000..3f83779336592 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/sign.rs @@ -0,0 +1,554 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use super::{PayloadChecksumKind, SignatureLocation}; +use crate::http_request::canonical_request::header; +use crate::http_request::canonical_request::param; +use crate::http_request::canonical_request::{CanonicalRequest, StringToSign, HMAC_256}; +use crate::http_request::query_writer::QueryWriter; +use crate::http_request::SigningParams; +use crate::sign::{calculate_signature, generate_signing_key, sha256_hex_string}; +use crate::SigningOutput; +use http::header::HeaderValue; +use http::{HeaderMap, Method, Uri}; +use std::borrow::Cow; +use std::convert::TryFrom; +use std::error::Error as StdError; +use std::str; + +/// Signing error type +pub type Error = Box; + +/// Represents all of the information necessary to sign an HTTP request. +#[derive(Debug)] +#[non_exhaustive] +pub struct SignableRequest<'a> { + method: &'a Method, + uri: &'a Uri, + headers: &'a HeaderMap, + body: SignableBody<'a>, +} + +impl<'a> SignableRequest<'a> { + /// Creates a new `SignableRequest`. If you have an [`http::Request`], then + /// consider using [`SignableRequest::from`] instead of `new`. + pub fn new( + method: &'a Method, + uri: &'a Uri, + headers: &'a HeaderMap, + body: SignableBody<'a>, + ) -> Self { + Self { + method, + uri, + headers, + body, + } + } + + /// Returns the signable URI + pub fn uri(&self) -> &Uri { + self.uri + } + + /// Returns the signable HTTP method + pub fn method(&self) -> &Method { + self.method + } + + /// Returns the request headers + pub fn headers(&self) -> &HeaderMap { + self.headers + } + + /// Returns the signable body + pub fn body(&self) -> &SignableBody<'_> { + &self.body + } +} + +impl<'a, B> From<&'a http::Request> for SignableRequest<'a> +where + B: 'a, + B: AsRef<[u8]>, +{ + fn from(request: &'a http::Request) -> SignableRequest<'a> { + SignableRequest::new( + request.method(), + request.uri(), + request.headers(), + SignableBody::Bytes(request.body().as_ref()), + ) + } +} + +/// A signable HTTP request body +#[derive(Debug, Clone, Eq, PartialEq)] +#[non_exhaustive] +pub enum SignableBody<'a> { + /// A body composed of a slice of bytes + Bytes(&'a [u8]), + + /// An unsigned payload + /// + /// UnsignedPayload is used for streaming requests where the contents of the body cannot be + /// known prior to signing + UnsignedPayload, + + /// A precomputed body checksum. The checksum should be a SHA256 checksum of the body, + /// lowercase hex encoded. Eg: + /// `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + Precomputed(String), +} + +#[derive(Debug)] +pub struct SigningInstructions { + headers: Option>, + params: Option)>>, +} + +impl SigningInstructions { + fn new( + headers: Option>, + params: Option)>>, + ) -> Self { + Self { headers, params } + } + + pub fn headers(&self) -> Option<&HeaderMap> { + self.headers.as_ref() + } + pub fn take_headers(&mut self) -> Option> { + self.headers.take() + } + + pub fn params(&self) -> Option<&Vec<(&'static str, Cow<'static, str>)>> { + self.params.as_ref() + } + pub fn take_params(&mut self) -> Option)>> { + self.params.take() + } + + pub fn apply_to_request(mut self, request: &mut http::Request) { + if let Some(new_headers) = self.take_headers() { + for (name, value) in new_headers.into_iter() { + request.headers_mut().insert(name.unwrap(), value); + } + } + if let Some(params) = self.take_params() { + let mut query = QueryWriter::new(request.uri()); + for (name, value) in params { + query.insert(name, &value); + } + *request.uri_mut() = query.build_uri(); + } + } +} + +/// Produces a signature for the given `request` and returns instructions +/// that can be used to apply that signature to an HTTP request. +pub fn sign<'a>( + request: SignableRequest<'a>, + params: &'a SigningParams<'a>, +) -> Result, Error> { + tracing::trace!(request = ?request, params = ?params, "signing request"); + match params.settings.signature_location { + SignatureLocation::Headers => { + let (signing_headers, signature) = + calculate_signing_headers(&request, params)?.into_parts(); + Ok(SigningOutput::new( + SigningInstructions::new(Some(signing_headers), None), + signature, + )) + } + SignatureLocation::QueryParams => { + let (params, signature) = calculate_signing_params(&request, params)?; + Ok(SigningOutput::new( + SigningInstructions::new(None, Some(params)), + signature, + )) + } + } +} + +type CalculatedParams = Vec<(&'static str, Cow<'static, str>)>; + +fn calculate_signing_params<'a>( + request: &'a SignableRequest<'a>, + params: &'a SigningParams<'a>, +) -> Result<(CalculatedParams, String), Error> { + let creq = CanonicalRequest::from(request, params)?; + tracing::trace!(canonical_request = %creq); + + let encoded_creq = &sha256_hex_string(creq.to_string().as_bytes()); + let sts = StringToSign::new( + params.time, + params.region, + params.service_name, + encoded_creq, + ); + let signing_key = generate_signing_key( + params.secret_key, + params.time, + params.region, + params.service_name, + ); + let signature = calculate_signature(signing_key, sts.to_string().as_bytes()); + + let values = creq.values.into_query_params().expect("signing with query"); + let mut signing_params = vec![ + (param::X_AMZ_ALGORITHM, Cow::Borrowed(values.algorithm)), + (param::X_AMZ_CREDENTIAL, Cow::Owned(values.credential)), + (param::X_AMZ_DATE, Cow::Owned(values.date_time)), + (param::X_AMZ_EXPIRES, Cow::Owned(values.expires)), + ( + param::X_AMZ_SIGNED_HEADERS, + Cow::Owned(values.signed_headers.as_str().into()), + ), + (param::X_AMZ_SIGNATURE, Cow::Owned(signature.clone())), + ]; + if let Some(security_token) = params.security_token { + signing_params.push(( + param::X_AMZ_SECURITY_TOKEN, + Cow::Owned(security_token.to_string()), + )); + } + Ok((signing_params, signature)) +} + +/// Calculates the signature headers that need to get added to the given `request`. +/// +/// `request` MUST NOT contain any of the following headers: +/// - x-amz-date +/// - x-amz-content-sha-256 +/// - x-amz-security-token +fn calculate_signing_headers<'a>( + request: &'a SignableRequest<'a>, + params: &'a SigningParams<'a>, +) -> Result>, Error> { + // Step 1: https://docs.aws.amazon.com/en_pv/general/latest/gr/sigv4-create-canonical-request.html. + let creq = CanonicalRequest::from(request, params)?; + tracing::trace!(canonical_request = %creq); + + // Step 2: https://docs.aws.amazon.com/en_pv/general/latest/gr/sigv4-create-string-to-sign.html. + let encoded_creq = &sha256_hex_string(creq.to_string().as_bytes()); + let sts = StringToSign::new( + params.time, + params.region, + params.service_name, + encoded_creq, + ); + + // Step 3: https://docs.aws.amazon.com/en_pv/general/latest/gr/sigv4-calculate-signature.html + let signing_key = generate_signing_key( + params.secret_key, + params.time, + params.region, + params.service_name, + ); + let signature = calculate_signature(signing_key, sts.to_string().as_bytes()); + + // Step 4: https://docs.aws.amazon.com/en_pv/general/latest/gr/sigv4-add-signature-to-request.html + let values = creq.values.as_headers().expect("signing with headers"); + let mut headers = HeaderMap::new(); + add_header(&mut headers, header::X_AMZ_DATE, &values.date_time); + headers.insert( + "authorization", + build_authorization_header(params.access_key, &creq, sts, &signature), + ); + if params.settings.payload_checksum_kind == PayloadChecksumKind::XAmzSha256 { + add_header( + &mut headers, + header::X_AMZ_CONTENT_SHA_256, + &values.content_sha256, + ); + } + if let Some(security_token) = values.security_token { + add_header(&mut headers, header::X_AMZ_SECURITY_TOKEN, security_token); + } + Ok(SigningOutput::new(headers, signature)) +} + +fn add_header(map: &mut HeaderMap, key: &'static str, value: &str) { + map.insert(key, HeaderValue::try_from(value).expect(key)); +} + +// add signature to authorization header +// Authorization: algorithm Credential=access key ID/credential scope, SignedHeaders=SignedHeaders, Signature=signature +fn build_authorization_header( + access_key: &str, + creq: &CanonicalRequest<'_>, + sts: StringToSign<'_>, + signature: &str, +) -> HeaderValue { + let mut value = HeaderValue::try_from(format!( + "{} Credential={}/{}, SignedHeaders={}, Signature={}", + HMAC_256, + access_key, + sts.scope.to_string(), + creq.values.signed_headers().as_str(), + signature + )) + .unwrap(); + value.set_sensitive(true); + value +} + +#[cfg(test)] +mod tests { + use super::{sign, SigningInstructions}; + use crate::date_time::test_parsers::parse_date_time; + use crate::http_request::sign::SignableRequest; + use crate::http_request::test::{ + make_headers_comparable, test_request, test_signed_request, + test_signed_request_query_params, + }; + use crate::http_request::{SignatureLocation, SigningParams, SigningSettings}; + use http::{HeaderMap, HeaderValue}; + use pretty_assertions::assert_eq; + use std::borrow::Cow; + use std::time::Duration; + + macro_rules! assert_req_eq { + ($a:tt, $b:tt) => { + make_headers_comparable(&mut $a); + make_headers_comparable(&mut $b); + assert_eq!(format!("{:?}", $a), format!("{:?}", $b)) + }; + } + + #[test] + fn test_sign_vanilla_with_headers() { + let settings = SigningSettings::default(); + let params = SigningParams { + access_key: "AKIDEXAMPLE", + secret_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + security_token: None, + region: "us-east-1", + service_name: "service", + time: parse_date_time("20150830T123600Z").unwrap(), + settings, + }; + + let original = test_request("get-vanilla-query-order-key-case"); + let signable = SignableRequest::from(&original); + let out = sign(signable, ¶ms).unwrap(); + assert_eq!( + "b97d918cfa904a5beff61c982a1b6f458b799221646efd99d3219ec94cdf2500", + out.signature + ); + + let mut signed = original; + out.output.apply_to_request(&mut signed); + + let mut expected = test_signed_request("get-vanilla-query-order-key-case"); + assert_req_eq!(expected, signed); + } + + #[test] + fn test_sign_url_escape() { + let test = "double-encode-path"; + let settings = SigningSettings::default(); + let params = SigningParams { + access_key: "AKIDEXAMPLE", + secret_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + security_token: None, + region: "us-east-1", + service_name: "service", + time: parse_date_time("20150830T123600Z").unwrap(), + settings, + }; + + let original = test_request(test); + let signable = SignableRequest::from(&original); + let out = sign(signable, ¶ms).unwrap(); + assert_eq!( + "6f871eb157f326fa5f7439eb88ca200048635950ce7d6037deda56f0c95d4364", + out.signature + ); + + let mut signed = original; + out.output.apply_to_request(&mut signed); + + let mut expected = test_signed_request(test); + assert_req_eq!(expected, signed); + } + + #[test] + fn test_sign_vanilla_with_query_params() { + let mut settings = SigningSettings::default(); + settings.signature_location = SignatureLocation::QueryParams; + settings.expires_in = Some(Duration::from_secs(35)); + let params = SigningParams { + access_key: "AKIDEXAMPLE", + secret_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + security_token: None, + region: "us-east-1", + service_name: "service", + time: parse_date_time("20150830T123600Z").unwrap(), + settings, + }; + + let original = test_request("get-vanilla-query-order-key-case"); + let signable = SignableRequest::from(&original); + let out = sign(signable, ¶ms).unwrap(); + assert_eq!( + "f25aea20f8c722ece3b363fc5d60cc91add973f9b64c42ba36fa28d57afe9019", + out.signature + ); + + let mut signed = original; + out.output.apply_to_request(&mut signed); + + let mut expected = test_signed_request_query_params("get-vanilla-query-order-key-case"); + assert_req_eq!(expected, signed); + } + + #[test] + fn test_sign_headers_utf8() { + let settings = SigningSettings::default(); + let params = SigningParams { + access_key: "AKIDEXAMPLE", + secret_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + security_token: None, + region: "us-east-1", + service_name: "service", + time: parse_date_time("20150830T123600Z").unwrap(), + settings, + }; + + let original = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .header("some-header", HeaderValue::from_str("テスト").unwrap()) + .body("") + .unwrap(); + let signable = SignableRequest::from(&original); + let out = sign(signable, ¶ms).unwrap(); + assert_eq!( + "4596b207a7fc6bdf18725369bc0cd7022cf20efbd2c19730549f42d1a403648e", + out.signature + ); + + let mut signed = original; + out.output.apply_to_request(&mut signed); + + let mut expected = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .header("some-header", HeaderValue::from_str("テスト").unwrap()) + .header( + "x-amz-date", + HeaderValue::from_str("20150830T123600Z").unwrap(), + ) + .header( + "authorization", + HeaderValue::from_str( + "AWS4-HMAC-SHA256 \ + Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, \ + SignedHeaders=host;some-header;x-amz-date, \ + Signature=4596b207a7fc6bdf18725369bc0cd7022cf20efbd2c19730549f42d1a403648e", + ) + .unwrap(), + ) + .body("") + .unwrap(); + assert_req_eq!(expected, signed); + } + + #[test] + fn test_sign_headers_space_trimming() { + let settings = SigningSettings::default(); + let params = SigningParams { + access_key: "AKIDEXAMPLE", + secret_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + security_token: None, + region: "us-east-1", + service_name: "service", + time: parse_date_time("20150830T123600Z").unwrap(), + settings, + }; + + let original = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .header( + "some-header", + HeaderValue::from_str("  test test ").unwrap(), + ) + .body("") + .unwrap(); + let signable = SignableRequest::from(&original); + let out = sign(signable, ¶ms).unwrap(); + assert_eq!( + "0bd74dbf6f21161f61a1a3a1c313b6a4bc67ec57bf5ea9ae956a63753ca1d7f7", + out.signature + ); + + let mut signed = original; + out.output.apply_to_request(&mut signed); + + let mut expected = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .header( + "some-header", + HeaderValue::from_str("  test test ").unwrap(), + ) + .header( + "x-amz-date", + HeaderValue::from_str("20150830T123600Z").unwrap(), + ) + .header( + "authorization", + HeaderValue::from_str( + "AWS4-HMAC-SHA256 \ + Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, \ + SignedHeaders=host;some-header;x-amz-date, \ + Signature=0bd74dbf6f21161f61a1a3a1c313b6a4bc67ec57bf5ea9ae956a63753ca1d7f7", + ) + .unwrap(), + ) + .body("") + .unwrap(); + assert_req_eq!(expected, signed); + } + + #[test] + fn apply_signing_instructions_headers() { + let mut headers = HeaderMap::new(); + headers.insert("some-header", HeaderValue::from_static("foo")); + headers.insert("some-other-header", HeaderValue::from_static("bar")); + let instructions = SigningInstructions::new(Some(headers), None); + + let mut request = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com") + .body("") + .unwrap(); + + instructions.apply_to_request(&mut request); + + let get_header = |n: &str| request.headers().get(n).unwrap().to_str().unwrap(); + assert_eq!("foo", get_header("some-header")); + assert_eq!("bar", get_header("some-other-header")); + } + + #[test] + fn apply_signing_instructions_query_params() { + let params = vec![ + ("some-param", Cow::Borrowed("f&o?o")), + ("some-other-param?", Cow::Borrowed("bar")), + ]; + let instructions = SigningInstructions::new(None, Some(params)); + + let mut request = http::Request::builder() + .uri("https://some-endpoint.some-region.amazonaws.com/some/path") + .body("") + .unwrap(); + + instructions.apply_to_request(&mut request); + + assert_eq!( + "/some/path?some-param=f%26o%3Fo&some-other-param%3F=bar", + request.uri().path_and_query().unwrap().to_string() + ); + } +} diff --git a/patch/aws-sigv4/src/http_request/test.rs b/patch/aws-sigv4/src/http_request/test.rs new file mode 100644 index 0000000000000..c9d26c21676e1 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/test.rs @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Functions shared between the tests of several modules. + +use bytes::Bytes; +use http::{Method, Request, Uri, Version}; +use std::error::Error as StdError; + +fn path(name: &str, ext: &str) -> String { + format!("aws-sig-v4-test-suite/{}/{}.{}", name, name, ext) +} + +fn read(path: &str) -> String { + println!("Loading `{}` for test case...", path); + match std::fs::read_to_string(path) { + // This replacement is necessary for tests to pass on Windows, as reading the + // sigv4 snapshots from the file system results in CRLF line endings being inserted. + Ok(value) => value.replace("\r\n", "\n"), + Err(err) => { + panic!("failed to load test case `{}`: {}", path, err); + } + } +} + +pub(crate) fn test_canonical_request(name: &str) -> String { + // Tests fail if there's a trailing newline in the file, and pre-commit requires trailing newlines + read(&path(name, "creq")).trim().to_string() +} + +pub(crate) fn test_sts(name: &str) -> String { + read(&path(name, "sts")) +} + +pub(crate) fn test_request(name: &str) -> Request { + test_parsed_request(name, "req") +} + +pub(crate) fn test_signed_request(name: &str) -> Request { + test_parsed_request(name, "sreq") +} + +pub(crate) fn test_signed_request_query_params(name: &str) -> Request { + test_parsed_request(name, "qpsreq") +} + +fn test_parsed_request(name: &str, ext: &str) -> Request { + let path = path(name, ext); + match parse_request(read(&path).as_bytes()) { + Ok(parsed) => parsed, + Err(err) => panic!("Failed to parse {}: {}", path, err), + } +} + +pub(crate) fn make_headers_comparable(request: &mut Request) { + for (_name, value) in request.headers_mut() { + value.set_sensitive(false); + } +} + +fn parse_request( + s: &[u8], +) -> Result, Box> { + let mut headers = [httparse::EMPTY_HEADER; 64]; + // httparse 1.5 requires two trailing newlines to head the header section. + let mut with_newline = Vec::from(s); + with_newline.push(b'\n'); + let mut req = httparse::Request::new(&mut headers); + let _ = req.parse(&with_newline).unwrap(); + + let version = match req.version.unwrap() { + 1 => Version::HTTP_11, + _ => unimplemented!(), + }; + + let method = match req.method.unwrap() { + "GET" => Method::GET, + "POST" => Method::POST, + _ => unimplemented!(), + }; + + let mut builder = Request::builder(); + builder = builder.version(version); + builder = builder.method(method); + + let mut uri_builder = Uri::builder().scheme("https"); + if let Some(path) = req.path { + uri_builder = uri_builder.path_and_query(path); + } + for header in req.headers { + let name = header.name.to_lowercase(); + if name == "host" { + uri_builder = uri_builder.authority(header.value); + } else if !name.is_empty() { + builder = builder.header(&name, header.value); + } + } + + builder = builder.uri(uri_builder.build()?); + let req = builder.body(bytes::Bytes::new())?; + Ok(req) +} + +#[test] +fn test_parse_headers() { + let buf = b"Host:example.amazonaws.com\nX-Amz-Date:20150830T123600Z\n\nblah blah"; + let mut headers = [httparse::EMPTY_HEADER; 4]; + assert_eq!( + httparse::parse_headers(buf, &mut headers), + Ok(httparse::Status::Complete(( + 56, + &[ + httparse::Header { + name: "Host", + value: b"example.amazonaws.com", + }, + httparse::Header { + name: "X-Amz-Date", + value: b"20150830T123600Z", + } + ][..] + ))) + ); +} + +#[test] +fn test_parse() { + test_request("post-header-key-case"); +} + +#[test] +fn test_read_query_params() { + test_request("get-vanilla-query-order-key-case"); +} diff --git a/patch/aws-sigv4/src/http_request/url_escape.rs b/patch/aws-sigv4/src/http_request/url_escape.rs new file mode 100644 index 0000000000000..a377039bba4d6 --- /dev/null +++ b/patch/aws-sigv4/src/http_request/url_escape.rs @@ -0,0 +1,14 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +use aws_smithy_http::{label, query}; + +pub(super) fn percent_encode_query(value: &str) -> String { + query::fmt_string(value) +} + +pub(super) fn percent_encode_path(value: &str) -> String { + label::fmt_string(value, true) +} diff --git a/patch/aws-sigv4/src/lib.rs b/patch/aws-sigv4/src/lib.rs new file mode 100644 index 0000000000000..232c2bc3b0304 --- /dev/null +++ b/patch/aws-sigv4/src/lib.rs @@ -0,0 +1,226 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Provides functions for calculating Sigv4 signing keys, signatures, and +//! optional utilities for signing HTTP requests and Event Stream messages. + +#![warn( + missing_docs, + rustdoc::missing_crate_level_docs, + missing_debug_implementations, + rust_2018_idioms, + unreachable_pub +)] + +use std::time::SystemTime; + +pub mod sign; + +mod date_time; + +#[cfg(feature = "sign-eventstream")] +pub mod event_stream; + +#[cfg(feature = "sign-http")] +pub mod http_request; + +/// Parameters to use when signing. +#[non_exhaustive] +#[derive(Debug)] +pub struct SigningParams<'a, S> { + /// Access Key ID to use. + pub(crate) access_key: &'a str, + /// Secret access key to use. + pub(crate) secret_key: &'a str, + /// (Optional) Security token to use. + pub(crate) security_token: Option<&'a str>, + + /// Region to sign for. + pub(crate) region: &'a str, + /// AWS Service Name to sign for. + pub(crate) service_name: &'a str, + /// Timestamp to use in the signature (should be `SystemTime::now()` unless testing). + pub(crate) time: SystemTime, + + /// Additional signing settings. These differ between HTTP and Event Stream. + pub(crate) settings: S, +} + +impl<'a, S: Default> SigningParams<'a, S> { + /// Returns a builder that can create new `SigningParams`. + pub fn builder() -> signing_params::Builder<'a, S> { + Default::default() + } +} + +/// Builder and error for creating [`SigningParams`] +pub mod signing_params { + use super::SigningParams; + use std::error::Error; + use std::fmt; + use std::time::SystemTime; + + /// [`SigningParams`] builder error + #[derive(Debug)] + pub struct BuildError { + reason: &'static str, + } + impl BuildError { + fn new(reason: &'static str) -> Self { + Self { reason } + } + } + + impl fmt::Display for BuildError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.reason) + } + } + + impl Error for BuildError {} + + /// Builder that can create new [`SigningParams`] + #[derive(Debug, Default)] + pub struct Builder<'a, S> { + access_key: Option<&'a str>, + secret_key: Option<&'a str>, + security_token: Option<&'a str>, + region: Option<&'a str>, + service_name: Option<&'a str>, + time: Option, + settings: Option, + } + + impl<'a, S> Builder<'a, S> { + /// Sets the access key (required). + pub fn access_key(mut self, access_key: &'a str) -> Self { + self.access_key = Some(access_key); + self + } + /// Sets the access key (required) + pub fn set_access_key(&mut self, access_key: Option<&'a str>) { + self.access_key = access_key; + } + + /// Sets the secret key (required) + pub fn secret_key(mut self, secret_key: &'a str) -> Self { + self.secret_key = Some(secret_key); + self + } + /// Sets the secret key (required) + pub fn set_secret_key(&mut self, secret_key: Option<&'a str>) { + self.secret_key = secret_key; + } + + /// Sets the security token (optional) + pub fn security_token(mut self, security_token: &'a str) -> Self { + self.security_token = Some(security_token); + self + } + /// Sets the security token (optional) + pub fn set_security_token(&mut self, security_token: Option<&'a str>) { + self.security_token = security_token; + } + + /// Sets the region (required) + pub fn region(mut self, region: &'a str) -> Self { + self.region = Some(region); + self + } + /// Sets the region (required) + pub fn set_region(&mut self, region: Option<&'a str>) { + self.region = region; + } + + /// Sets the service name (required) + pub fn service_name(mut self, service_name: &'a str) -> Self { + self.service_name = Some(service_name); + self + } + /// Sets the service name (required) + pub fn set_service_name(&mut self, service_name: Option<&'a str>) { + self.service_name = service_name; + } + + /// Sets the time to be used in the signature (required) + pub fn time(mut self, time: SystemTime) -> Self { + self.time = Some(time); + self + } + /// Sets the time to be used in the signature (required) + pub fn set_time(&mut self, time: Option) { + self.time = time; + } + + /// Sets additional signing settings (required) + pub fn settings(mut self, settings: S) -> Self { + self.settings = Some(settings); + self + } + /// Sets additional signing settings (required) + pub fn set_settings(&mut self, settings: Option) { + self.settings = settings; + } + + /// Builds an instance of [`SigningParams`]. Will yield a [`BuildError`] if + /// a required argument was not given. + pub fn build(self) -> Result, BuildError> { + Ok(SigningParams { + access_key: self + .access_key + .ok_or_else(|| BuildError::new("access key is required"))?, + secret_key: self + .secret_key + .ok_or_else(|| BuildError::new("secret key is required"))?, + security_token: self.security_token, + region: self + .region + .ok_or_else(|| BuildError::new("region is required"))?, + service_name: self + .service_name + .ok_or_else(|| BuildError::new("service name is required"))?, + time: self + .time + .ok_or_else(|| BuildError::new("time is required"))?, + settings: self + .settings + .ok_or_else(|| BuildError::new("settings are required"))?, + }) + } + } +} + +/// Container for the signed output and the signature. +/// +/// This is returned by signing functions, and the signed output will be +/// different based on what is being signed (for example, an event stream +/// message, or an HTTP request). +#[derive(Debug)] +pub struct SigningOutput { + output: T, + signature: String, +} + +impl SigningOutput { + /// Creates a new [`SigningOutput`] + pub fn new(output: T, signature: String) -> Self { + Self { output, signature } + } + + /// Returns the signed output + pub fn output(&self) -> &T { + &self.output + } + + /// Returns the signature as a lowercase hex string + pub fn signature(&self) -> &str { + &self.signature + } + + /// Decomposes the `SigningOutput` into a tuple of the signed output and the signature + pub fn into_parts(self) -> (T, String) { + (self.output, self.signature) + } +} diff --git a/patch/aws-sigv4/src/sign.rs b/patch/aws-sigv4/src/sign.rs new file mode 100644 index 0000000000000..f7fd35a75d4d9 --- /dev/null +++ b/patch/aws-sigv4/src/sign.rs @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +//! Functions to create signing keys and calculate signatures. + +use crate::date_time::format_date; + +#[cfg(feature = "ring")] +use ring::{ + hmac::{self, Key, Tag} +}; + +#[cfg(feature = "openssl")] +use openssl::{ + hash::MessageDigest, + pkey::PKey, + sign::Signer +}; + +use std::time::SystemTime; + +/// HashedPayload = Lowercase(HexEncode(Hash(requestPayload))) +#[cfg(feature = "openssl")] +#[allow(dead_code)] // Unused when compiling without certain features +pub(crate) fn sha256_hex_string(bytes: impl AsRef<[u8]>) -> String { + use openssl::sha::sha256; + // hex::encode returns a lowercase string + hex::encode(sha256(bytes.as_ref())) +} + +/// HashedPayload = Lowercase(HexEncode(Hash(requestPayload))) +#[cfg(feature = "ring")] +#[allow(dead_code)] // Unused when compiling without certain features +pub(crate) fn sha256_hex_string(bytes: impl AsRef<[u8]>) -> String { + use ring::digest; + // hex::encode returns a lowercase string + hex::encode(digest::digest(&digest::SHA256, bytes.as_ref())) +} + +/// Calculates a Sigv4 signature +#[cfg(feature = "ring")] +pub fn calculate_signature(signing_key: Tag, string_to_sign: &[u8]) -> String { + let s_key = Key::new(hmac::HMAC_SHA256, signing_key.as_ref()); + let tag = hmac::sign(&s_key, string_to_sign); + hex::encode(tag) +} + +/// Generates a signing key for Sigv4 +#[cfg(feature = "ring")] +pub fn generate_signing_key( + secret: &str, + time: SystemTime, + region: &str, + service: &str, +) -> hmac::Tag { + // kSecret = your secret access key + // kDate = HMAC("AWS4" + kSecret, Date) + // kRegion = HMAC(kDate, Region) + // kService = HMAC(kRegion, Service) + // kSigning = HMAC(kService, "aws4_request") + + let secret = format!("AWS4{}", secret); + let secret = hmac::Key::new(hmac::HMAC_SHA256, secret.as_bytes()); + let tag = hmac::sign(&secret, format_date(time).as_bytes()); + + // sign region + let key = hmac::Key::new(hmac::HMAC_SHA256, tag.as_ref()); + let tag = hmac::sign(&key, region.as_bytes()); + + // sign service + let key = hmac::Key::new(hmac::HMAC_SHA256, tag.as_ref()); + let tag = hmac::sign(&key, service.as_bytes()); + + // sign request + let key = hmac::Key::new(hmac::HMAC_SHA256, tag.as_ref()); + hmac::sign(&key, "aws4_request".as_bytes()) +} + +#[cfg(feature = "openssl")] +fn sign_sha256(secret: &[u8], buf_to_sign: &[u8]) -> Vec { + let key = PKey::hmac(secret).unwrap(); + let mut signer = Signer::new(MessageDigest::sha256(), &key).unwrap(); + signer.update(buf_to_sign).unwrap(); + signer.sign_to_vec().unwrap() +} + +/// Calculates a Sigv4 signature +#[cfg(feature = "openssl")] +pub fn calculate_signature(signing_key: Vec, string_to_sign: &[u8]) -> String { + let signature = sign_sha256(signing_key.as_slice(), string_to_sign); + hex::encode(signature) +} + +/// Generates a signing key for Sigv4 +#[cfg(feature = "openssl")] +pub fn generate_signing_key( + secret: &str, + time: SystemTime, + region: &str, + service: &str, +) -> Vec { + // kSecret = your secret access key + // kDate = HMAC("AWS4" + kSecret, Date) + // kRegion = HMAC(kDate, Region) + // kService = HMAC(kRegion, Service) + // kSigning = HMAC(kService, "aws4_request") + + let secret = format!("AWS4{}", secret); + let signature = sign_sha256(secret.as_bytes(), format_date(time).as_bytes()); + let signature = sign_sha256(signature.as_slice(), region.as_bytes()); + let signature = sign_sha256(signature.as_slice(), service.as_bytes()); + sign_sha256(signature.as_slice(), "aws4_request".as_bytes()) +} + +#[cfg(test)] +mod tests { + use super::{calculate_signature, generate_signing_key}; + use crate::date_time::test_parsers::parse_date_time; + use crate::http_request::test::test_canonical_request; + use crate::sign::sha256_hex_string; + + #[test] + fn test_signature_calculation() { + let secret = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"; + let creq = test_canonical_request("iam"); + let time = parse_date_time("20150830T123600Z").unwrap(); + + let derived_key = generate_signing_key(secret, time, "us-east-1", "iam"); + let signature = calculate_signature(derived_key, creq.as_bytes()); + + let expected = "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"; + assert_eq!(expected, &signature); + } + + #[test] + fn sign_payload_empty_string() { + let expected = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + let actual = sha256_hex_string(&[]); + assert_eq!(expected, actual); + } +} diff --git a/thirdparty/cargo-nextest/cargo-nextest-linux-x86_64 b/thirdparty/cargo-nextest/cargo-nextest-linux-x86_64 new file mode 100755 index 0000000000000..e49c4f55b4888 Binary files /dev/null and b/thirdparty/cargo-nextest/cargo-nextest-linux-x86_64 differ