From 00d53788981d57a8d6495c6b5ebdd32bc7567603 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 7 Mar 2026 17:04:42 +0100 Subject: [PATCH 01/29] WIP: Refactor tarpc away Signed-off-by: Bob Weinand # Conflicts: # datadog-sidecar-ffi/src/lib.rs # datadog-sidecar/src/service/blocking.rs # datadog-sidecar/src/service/session_info.rs # datadog-sidecar/src/service/sidecar_interface.rs # datadog-sidecar/src/service/sidecar_server.rs --- Cargo.lock | 264 +--- datadog-ipc-macros/Cargo.toml | 4 +- datadog-ipc-macros/src/lib.rs | 532 ++++++- datadog-ipc/Cargo.toml | 10 +- datadog-ipc/benches/ipc.rs | 52 +- datadog-ipc/plugins/Cargo.toml | 36 - datadog-ipc/plugins/LICENSE | 9 - datadog-ipc/plugins/rustfmt.toml | 1 - datadog-ipc/plugins/src/lib.rs | 825 ----------- datadog-ipc/plugins/tests/server.rs | 145 -- datadog-ipc/plugins/tests/service.rs | 85 -- datadog-ipc/src/client.rs | 141 ++ datadog-ipc/src/codec.rs | 62 + datadog-ipc/src/example_interface.rs | 94 +- datadog-ipc/src/handles.rs | 215 +-- datadog-ipc/src/lib.rs | 12 +- datadog-ipc/src/platform/channel/metadata.rs | 33 - datadog-ipc/src/platform/channel/mod.rs | 4 - datadog-ipc/src/platform/mod.rs | 2 - datadog-ipc/src/platform/unix/channel.rs | 173 --- .../platform/unix/channel/async_channel.rs | 146 -- .../src/platform/unix/channel/metadata.rs | 102 -- datadog-ipc/src/platform/unix/mod.rs | 121 +- datadog-ipc/src/platform/unix/sockets.rs | 57 - .../src/platform/unix/sockets/linux.rs | 145 ++ .../src/platform/unix/sockets/macos.rs | 241 ++++ datadog-ipc/src/platform/unix/sockets/mod.rs | 325 +++++ .../src/platform/windows/channel/metadata.rs | 20 +- datadog-ipc/src/platform/windows/mod.rs | 3 + datadog-ipc/src/platform/windows/sockets.rs | 561 ++++++++ datadog-ipc/src/sequential.rs | 150 -- datadog-ipc/src/transport/blocking.rs | 200 --- datadog-ipc/src/transport/mod.rs | 163 --- datadog-ipc/tarpc/Cargo.toml | 118 -- datadog-ipc/tarpc/LICENSE | 9 - datadog-ipc/tarpc/README.md | 1 - datadog-ipc/tarpc/clippy.toml | 1 - datadog-ipc/tarpc/examples/compression.rs | 122 -- .../tarpc/examples/custom_transport.rs | 48 - datadog-ipc/tarpc/examples/pubsub.rs | 358 ----- datadog-ipc/tarpc/examples/readme.rs | 55 - datadog-ipc/tarpc/examples/tracing.rs | 112 -- datadog-ipc/tarpc/rustfmt.toml | 1 - datadog-ipc/tarpc/src/cancellations.rs | 49 - datadog-ipc/tarpc/src/client.rs | 889 ------------ .../tarpc/src/client/in_flight_requests.rs | 134 -- datadog-ipc/tarpc/src/context.rs | 169 --- datadog-ipc/tarpc/src/lib.rs | 422 ------ datadog-ipc/tarpc/src/serde_transport.rs | 673 --------- datadog-ipc/tarpc/src/server.rs | 1242 ----------------- .../tarpc/src/server/in_flight_requests.rs | 225 --- datadog-ipc/tarpc/src/server/incoming.rs | 51 - datadog-ipc/tarpc/src/server/limits.rs | 5 - .../src/server/limits/channels_per_key.rs | 478 ------- .../src/server/limits/requests_per_channel.rs | 362 ----- datadog-ipc/tarpc/src/server/testing.rs | 146 -- datadog-ipc/tarpc/src/server/tokio.rs | 114 -- datadog-ipc/tarpc/src/trace.rs | 268 ---- datadog-ipc/tarpc/src/transport.rs | 40 - datadog-ipc/tarpc/src/transport/channel.rs | 203 --- datadog-ipc/tarpc/src/util.rs | 71 - datadog-ipc/tarpc/src/util/serde.rs | 73 - datadog-ipc/tarpc/tests/compile_fail.rs | 9 - .../compile_fail/must_use_request_dispatch.rs | 15 - .../must_use_request_dispatch.stderr | 11 - .../serde_transport/must_use_tcp_connect.rs | 9 - .../must_use_tcp_connect.stderr | 11 - .../tarpc_server_missing_async.rs | 15 - .../tarpc_server_missing_async.stderr | 11 - .../compile_fail/tarpc_service_arg_pat.rs | 6 - .../compile_fail/tarpc_service_arg_pat.stderr | 5 - .../compile_fail/tarpc_service_fn_new.rs | 6 - .../compile_fail/tarpc_service_fn_new.stderr | 5 - .../compile_fail/tarpc_service_fn_serve.rs | 6 - .../tarpc_service_fn_serve.stderr | 5 - .../tokio/must_use_channel_executor.rs | 29 - .../tokio/must_use_channel_executor.stderr | 11 - .../tokio/must_use_server_executor.rs | 30 - .../tokio/must_use_server_executor.stderr | 11 - datadog-ipc/tarpc/tests/dataservice.rs | 55 - datadog-ipc/tarpc/tests/service_functional.rs | 280 ---- datadog-ipc/tests/blocking_client.rs | 69 +- datadog-sidecar-ffi/src/lib.rs | 93 +- datadog-sidecar/Cargo.toml | 2 +- datadog-sidecar/src/entry.rs | 12 +- datadog-sidecar/src/service/blocking.rs | 518 ++----- datadog-sidecar/src/service/mod.rs | 7 +- datadog-sidecar/src/service/sender.rs | 419 ++++++ datadog-sidecar/src/service/session_info.rs | 8 +- .../src/service/sidecar_interface.rs | 33 +- datadog-sidecar/src/service/sidecar_server.rs | 706 +++++----- datadog-sidecar/src/setup/mod.rs | 4 +- datadog-sidecar/src/setup/unix.rs | 70 +- datadog-sidecar/src/setup/windows.rs | 236 +--- datadog-sidecar/src/unix.rs | 42 +- datadog-sidecar/src/windows.rs | 51 +- tools/docker/Dockerfile.build | 1 - 97 files changed, 3359 insertions(+), 10814 deletions(-) delete mode 100644 datadog-ipc/plugins/Cargo.toml delete mode 100644 datadog-ipc/plugins/LICENSE delete mode 100644 datadog-ipc/plugins/rustfmt.toml delete mode 100644 datadog-ipc/plugins/src/lib.rs delete mode 100644 datadog-ipc/plugins/tests/server.rs delete mode 100644 datadog-ipc/plugins/tests/service.rs create mode 100644 datadog-ipc/src/client.rs create mode 100644 datadog-ipc/src/codec.rs delete mode 100644 datadog-ipc/src/platform/channel/metadata.rs delete mode 100644 datadog-ipc/src/platform/channel/mod.rs delete mode 100644 datadog-ipc/src/platform/unix/channel.rs delete mode 100644 datadog-ipc/src/platform/unix/channel/async_channel.rs delete mode 100644 datadog-ipc/src/platform/unix/channel/metadata.rs delete mode 100644 datadog-ipc/src/platform/unix/sockets.rs create mode 100644 datadog-ipc/src/platform/unix/sockets/linux.rs create mode 100644 datadog-ipc/src/platform/unix/sockets/macos.rs create mode 100644 datadog-ipc/src/platform/unix/sockets/mod.rs create mode 100644 datadog-ipc/src/platform/windows/sockets.rs delete mode 100644 datadog-ipc/src/sequential.rs delete mode 100644 datadog-ipc/src/transport/blocking.rs delete mode 100644 datadog-ipc/src/transport/mod.rs delete mode 100644 datadog-ipc/tarpc/Cargo.toml delete mode 100644 datadog-ipc/tarpc/LICENSE delete mode 120000 datadog-ipc/tarpc/README.md delete mode 100644 datadog-ipc/tarpc/clippy.toml delete mode 100644 datadog-ipc/tarpc/examples/compression.rs delete mode 100644 datadog-ipc/tarpc/examples/custom_transport.rs delete mode 100644 datadog-ipc/tarpc/examples/pubsub.rs delete mode 100644 datadog-ipc/tarpc/examples/readme.rs delete mode 100644 datadog-ipc/tarpc/examples/tracing.rs delete mode 100644 datadog-ipc/tarpc/rustfmt.toml delete mode 100644 datadog-ipc/tarpc/src/cancellations.rs delete mode 100644 datadog-ipc/tarpc/src/client.rs delete mode 100644 datadog-ipc/tarpc/src/client/in_flight_requests.rs delete mode 100644 datadog-ipc/tarpc/src/context.rs delete mode 100644 datadog-ipc/tarpc/src/lib.rs delete mode 100644 datadog-ipc/tarpc/src/serde_transport.rs delete mode 100644 datadog-ipc/tarpc/src/server.rs delete mode 100644 datadog-ipc/tarpc/src/server/in_flight_requests.rs delete mode 100644 datadog-ipc/tarpc/src/server/incoming.rs delete mode 100644 datadog-ipc/tarpc/src/server/limits.rs delete mode 100644 datadog-ipc/tarpc/src/server/limits/channels_per_key.rs delete mode 100644 datadog-ipc/tarpc/src/server/limits/requests_per_channel.rs delete mode 100644 datadog-ipc/tarpc/src/server/testing.rs delete mode 100644 datadog-ipc/tarpc/src/server/tokio.rs delete mode 100644 datadog-ipc/tarpc/src/trace.rs delete mode 100644 datadog-ipc/tarpc/src/transport.rs delete mode 100644 datadog-ipc/tarpc/src/transport/channel.rs delete mode 100644 datadog-ipc/tarpc/src/util.rs delete mode 100644 datadog-ipc/tarpc/src/util/serde.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.stderr delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.rs delete mode 100644 datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.stderr delete mode 100644 datadog-ipc/tarpc/tests/dataservice.rs delete mode 100644 datadog-ipc/tarpc/tests/service_functional.rs create mode 100644 datadog-sidecar/src/service/sender.rs diff --git a/Cargo.lock b/Cargo.lock index 7bc34355a8..d9fdabd98e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -162,18 +162,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "assert-type-eq" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd49a41856ee21a0cfb2b1cfbfcca0f1d3e6c257c38939f0d6ecfaf177f2ea47" - -[[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - [[package]] name = "assert_no_alloc" version = "1.1.2" @@ -1441,6 +1429,7 @@ name = "datadog-ipc" version = "0.1.0" dependencies = [ "anyhow", + "bincode", "bytes", "criterion", "datadog-ipc-macros", @@ -1458,10 +1447,8 @@ dependencies = [ "sendfd", "serde", "spawn_worker", - "tarpc", "tempfile", "tokio", - "tokio-serde", "tokio-util", "tracing", "tracing-subscriber", @@ -1473,6 +1460,8 @@ dependencies = [ name = "datadog-ipc-macros" version = "0.0.1" dependencies = [ + "heck 0.5.0", + "proc-macro2", "quote", "syn 2.0.87", ] @@ -1780,18 +1769,6 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" -[[package]] -name = "educe" -version = "0.4.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "either" version = "1.13.0" @@ -1819,19 +1796,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "enum-ordinalize" -version = "3.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" -dependencies = [ - "num-bigint", - "num-traits", - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "env_logger" version = "0.10.2" @@ -2092,22 +2056,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" -[[package]] -name = "futures-test" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961fb6311645f46e2cdc2964a8bfae6743fd72315eaec181a71ae3eb2467113" -dependencies = [ - "futures-core", - "futures-executor", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "futures-util", - "pin-project", -] - [[package]] name = "futures-timer" version = "3.0.3" @@ -2801,12 +2749,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "integer-encoding" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" - [[package]] name = "io-lifetimes" version = "1.0.11" @@ -3803,16 +3745,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", -] - [[package]] name = "num-conv" version = "0.1.0" @@ -3830,15 +3762,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -3848,16 +3771,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.9", - "libc", -] - [[package]] name = "objc2" version = "0.6.3" @@ -4059,60 +3972,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror 1.0.68", - "tokio", - "tokio-stream", -] - -[[package]] -name = "opentelemetry-jaeger" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c0b12cd9e3f9b35b52f6e0dac66866c519b26f424f4bbf96e3fe8bfbdc5229" -dependencies = [ - "async-trait", - "lazy_static", - "opentelemetry", - "opentelemetry-semantic-conventions", - "thiserror 1.0.68", - "thrift", - "tokio", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "ordered-float" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" -dependencies = [ - "num-traits", -] - [[package]] name = "os_info" version = "3.14.0" @@ -4430,7 +4289,7 @@ dependencies = [ [[package]] name = "proptest" version = "1.5.0" -source = "git+https://github.com/bantonsson/proptest.git?branch=ban/avoid-libm-in-std#9f623fbab7a1a4da487551128c2bffeee2ed6b87" +source = "git+https://github.com/bantonsson/proptest.git?branch=ban%2Favoid-libm-in-std#9f623fbab7a1a4da487551128c2bffeee2ed6b87" dependencies = [ "bit-set", "bit-vec", @@ -5831,51 +5690,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" -[[package]] -name = "tarpc" -version = "0.31.0" -dependencies = [ - "anyhow", - "assert_matches", - "bincode", - "bytes", - "flate2", - "fnv", - "futures", - "futures-test", - "humantime", - "opentelemetry", - "opentelemetry-jaeger", - "pin-project", - "pin-utils", - "rand 0.8.5", - "serde", - "serde_bytes", - "static_assertions", - "tarpc-plugins", - "thiserror 1.0.68", - "tokio", - "tokio-serde", - "tokio-util", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", - "trybuild", -] - -[[package]] -name = "tarpc-plugins" -version = "0.12.0" -dependencies = [ - "assert-type-eq", - "futures", - "proc-macro2", - "quote", - "serde", - "syn 1.0.109", - "tarpc", -] - [[package]] name = "tempfile" version = "3.23.0" @@ -5981,28 +5795,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "thrift" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82ca8f46f95b3ce96081fe3dd89160fdea970c254bb72925255d1b62aae692e" -dependencies = [ - "byteorder", - "integer-encoding", - "log", - "ordered-float", - "threadpool", -] - [[package]] name = "time" version = "0.3.41" @@ -6110,22 +5902,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-serde" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" -dependencies = [ - "bincode", - "bytes", - "educe", - "futures-core", - "futures-sink", - "pin-project", - "serde", - "serde_json", -] - [[package]] name = "tokio-stream" version = "0.1.16" @@ -6149,7 +5925,6 @@ dependencies = [ "futures-util", "hashbrown 0.14.5", "pin-project-lite", - "slab", "tokio", ] @@ -6314,7 +6089,6 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -6364,19 +6138,6 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-subscriber", -] - [[package]] name = "tracing-serde" version = "0.2.0" @@ -6414,21 +6175,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "trybuild" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4" -dependencies = [ - "glob", - "serde", - "serde_derive", - "serde_json", - "target-triple", - "termcolor", - "toml", -] - [[package]] name = "twox-hash" version = "1.6.3" diff --git a/datadog-ipc-macros/Cargo.toml b/datadog-ipc-macros/Cargo.toml index ec309d26e7..8fbd138dca 100644 --- a/datadog-ipc-macros/Cargo.toml +++ b/datadog-ipc-macros/Cargo.toml @@ -10,5 +10,7 @@ proc-macro = true bench = false [dependencies] -syn = "^2" +proc-macro2 = "1" quote = "^1" +syn = { version = "^2", features = ["full"] } +heck = "0.5" diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index 5f025ffc12..8a4a0326de 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -1,15 +1,14 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 +use heck::{ToPascalCase, ToSnakeCase}; use proc_macro::TokenStream; +use proc_macro2::Span; use quote::{format_ident, quote, ToTokens}; -use syn::FnArg::Typed; -use syn::__private::Span; -use syn::{parse_quote, Arm, FieldPat, Ident, ItemTrait, Member, Pat, Stmt, TraitItem}; +use syn::{FnArg, Ident, ItemTrait, ReturnType, TraitItem, Type}; fn snake_to_camel(ident_str: &str) -> String { let mut camel_ty = String::with_capacity(ident_str.len()); - let mut last_char_was_underscore = true; for c in ident_str.chars() { match c { @@ -21,43 +20,57 @@ fn snake_to_camel(ident_str: &str) -> String { c => camel_ty.extend(c.to_lowercase()), } } - camel_ty.shrink_to_fit(); camel_ty } +fn is_unit_type(ty: &Type) -> bool { + matches!(ty, Type::Tuple(t) if t.elems.is_empty()) +} + +fn has_attr(attrs: &[syn::Attribute], name: &str) -> bool { + attrs + .iter() + .any(|a| a.meta.path().to_token_stream().to_string() == name) +} + +// --------------------------------------------------------------------------- +// Old macro — kept during migration to the new #[service] macro. +// --------------------------------------------------------------------------- + #[proc_macro_attribute] pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStream { let mut item: ItemTrait = syn::parse(input).unwrap(); let req_name = format_ident!("{}Request", item.ident); let res_name = format_ident!("{}Response", item.ident); - let mut arms_req_move: Vec = vec![]; - let mut arms_req_recv: Vec = vec![]; - let mut arms_res_move: Vec = vec![]; - let mut arms_res_recv: Vec = vec![]; + let mut arms_req_move: Vec = vec![]; + let mut arms_req_recv: Vec = vec![]; + let mut arms_res_move: Vec = vec![]; + let mut arms_res_recv: Vec = vec![]; for inner in item.items.iter_mut() { if let TraitItem::Fn(ref mut func) = inner { - let mut params: Vec = vec![]; - let mut stmts_move: Vec = vec![]; - let mut stmts_recv: Vec = vec![]; + let mut params: Vec = vec![]; + let mut stmts_move: Vec = vec![]; + let mut stmts_recv: Vec = vec![]; for any_arg in func.sig.inputs.iter_mut() { - if let Typed(ref mut arg) = any_arg { + if let FnArg::Typed(ref mut arg) = any_arg { let orig_attr_num = arg.attrs.len(); arg.attrs.retain(|attr| { attr.meta.path().to_token_stream().to_string() != "SerializedHandle" }); if orig_attr_num != arg.attrs.len() { - if let Pat::Ident(ref ident) = *arg.pat { - params.push(FieldPat { + if let syn::Pat::Ident(ref ident) = *arg.pat { + params.push(syn::FieldPat { attrs: vec![], - member: Member::Named(ident.ident.clone()), + member: syn::Member::Named(ident.ident.clone()), colon_token: None, - pat: Box::new(parse_quote! { #ident }), + pat: Box::new(syn::parse_quote! { #ident }), }); stmts_move.push( - parse_quote! { __transport.copy_handle(#ident.clone().into())?; }, + syn::parse_quote! { __transport.copy_handle(#ident.clone().into())?; }, ); - stmts_recv.push(parse_quote! { #ident.receive_handles(__transport)?; }); + stmts_recv + .push(syn::parse_quote! { #ident.receive_handles(__transport)?; }); } } } @@ -67,13 +80,13 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr Span::mixed_site(), ); if !params.is_empty() { - arms_req_move.push(parse_quote! { + arms_req_move.push(syn::parse_quote! { #req_name::#method { #(#params,)* .. } => { #(#stmts_move)* Ok(()) } }); - arms_req_recv.push(parse_quote! { + arms_req_recv.push(syn::parse_quote! { #req_name::#method { #(#params,)* .. } => { #(#stmts_recv)* Ok(()) @@ -85,10 +98,10 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr attr.meta.path().to_token_stream().to_string() != "SerializedHandle" }); if orig_attr_num != func.attrs.len() { - arms_res_move.push(parse_quote! { + arms_res_move.push(syn::parse_quote! { #res_name::#method(response) => response.copy_handles(transport) }); - arms_res_recv.push(parse_quote! { + arms_res_recv.push(syn::parse_quote! { #res_name::#method(response) => response.receive_handles(transport) }); } @@ -104,9 +117,7 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr __transport: Transport, ) -> Result<(), Transport::Error> { match self { - #( - #arms_req_move, - )* + #(#arms_req_move,)* _ => Ok(()), } } @@ -116,9 +127,7 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr __transport: Transport, ) -> Result<(), Transport::Error> { match self { - #( - #arms_req_recv, - )* + #(#arms_req_recv,)* _ => Ok(()), } } @@ -130,9 +139,7 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr transport: Transport, ) -> Result<(), Transport::Error> { match self { - #( - #arms_res_move, - )* + #(#arms_res_move,)* _ => Ok(()), } } @@ -142,12 +149,467 @@ pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStr transport: Transport, ) -> Result<(), Transport::Error> { match self { - #( - #arms_res_recv, - )* + #(#arms_res_recv,)* + _ => Ok(()), + } + } + } + }) +} + +// --------------------------------------------------------------------------- +// New #[service] macro +// --------------------------------------------------------------------------- + +// Each param stores: (non-SerializedHandle attrs, name, type, is_handle). +// The attrs include #[cfg(...)], allowing conditional compilation of parameters. +type ParamInfo = (Vec, Ident, Box); + +struct MethodInfo { + name: Ident, + variant: Ident, + discriminant: u32, + is_blocking: bool, + return_type: Option>, + params: Vec, + handle_param_indices: Vec, +} + +fn collect_methods(item: &ItemTrait) -> Vec { + let mut methods = Vec::new(); + let mut discriminant: u32 = 0; + + for trait_item in &item.items { + let TraitItem::Fn(func) = trait_item else { + continue; + }; + + let name = func.sig.ident.clone(); + let variant = Ident::new(&name.to_string().to_pascal_case(), Span::mixed_site()); + let is_blocking = has_attr(&func.attrs, "blocking"); + + let return_type = match &func.sig.output { + ReturnType::Default => None, + ReturnType::Type(_, ty) => { + if is_unit_type(ty) { + None + } else { + Some(ty.clone()) + } + } + }; + + let mut params: Vec = Vec::new(); + let mut handle_param_indices: Vec = Vec::new(); + + for arg in &func.sig.inputs { + let FnArg::Typed(pat_ty) = arg else { + continue; + }; + let syn::Pat::Ident(ident_pat) = &*pat_ty.pat else { + continue; + }; + if has_attr(&pat_ty.attrs, "SerializedHandle") { + handle_param_indices.push(params.len()); + } + // Keep all attrs except #[SerializedHandle] (e.g. #[cfg(...)]). + let pass_through_attrs: Vec = pat_ty + .attrs + .iter() + .filter(|a| a.meta.path().to_token_stream().to_string() != "SerializedHandle") + .cloned() + .collect(); + params.push((pass_through_attrs, ident_pat.ident.clone(), pat_ty.ty.clone())); + } + + methods.push(MethodInfo { + name, + variant, + discriminant, + is_blocking, + return_type, + params, + handle_param_indices, + }); + discriminant += 1; + } + + methods +} + +fn gen_request_enum(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::TokenStream { + let variants: Vec<_> = methods + .iter() + .map(|m| { + let variant = &m.variant; + let fields: Vec<_> = m + .params + .iter() + .map(|(attrs, n, t)| quote! { #(#attrs)* #n: #t }) + .collect(); + quote! { #variant { #(#fields),* } } + }) + .collect(); + + let disc_arms: Vec<_> = methods + .iter() + .map(|m| { + let variant = &m.variant; + let d = m.discriminant; + quote! { Self::#variant { .. } => #d } + }) + .collect(); + + quote! { + #[derive(::serde::Serialize, ::serde::Deserialize)] + pub enum #enum_name { + #(#variants),* + } + + impl #enum_name { + pub fn discriminant(&self) -> u32 { + match self { + #(#disc_arms),* + } + } + } + } +} + +fn gen_transfer_handles(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::TokenStream { + let copy_arms: Vec<_> = methods + .iter() + .filter(|m| !m.handle_param_indices.is_empty()) + .map(|m| { + let variant = &m.variant; + let handle_names: Vec<_> = m + .handle_param_indices + .iter() + .map(|&i| &m.params[i].1) + .collect(); + // One copy_handle call per #[SerializedHandle] param. + // Uses .into() to convert from the param type to PlatformHandle. + let stmts: Vec<_> = handle_names + .iter() + .map(|hn| quote! { __transport.copy_handle(#hn.clone().into())?; }) + .collect(); + quote! { + #enum_name::#variant { #(#handle_names,)* .. } => { + #(#stmts)* + Ok(()) + } + } + }) + .collect(); + + let recv_arms: Vec<_> = methods + .iter() + .filter(|m| !m.handle_param_indices.is_empty()) + .map(|m| { + let variant = &m.variant; + let handle_names: Vec<_> = m + .handle_param_indices + .iter() + .map(|&i| &m.params[i].1) + .collect(); + let stmts: Vec<_> = handle_names + .iter() + .map(|hn| quote! { #hn.receive_handles(__transport)?; }) + .collect(); + quote! { + #enum_name::#variant { #(#handle_names,)* .. } => { + #(#stmts)* + Ok(()) + } + } + }) + .collect(); + + quote! { + impl datadog_ipc::handles::TransferHandles for #enum_name { + fn copy_handles( + &self, + __transport: Transport, + ) -> ::std::result::Result<(), Transport::Error> { + match self { + #(#copy_arms,)* + _ => Ok(()), + } + } + + fn receive_handles( + &mut self, + __transport: Transport, + ) -> ::std::result::Result<(), Transport::Error> { + match self { + #(#recv_arms,)* _ => Ok(()), } } } + } +} + +fn gen_handler_trait( + trait_name: &Ident, + vis: &syn::Visibility, + methods: &[MethodInfo], +) -> proc_macro2::TokenStream { + let handler_methods: Vec<_> = methods + .iter() + .map(|m| { + let name = &m.name; + let params: Vec<_> = m + .params + .iter() + .map(|(attrs, n, t)| quote! { #(#attrs)* #n: #t }) + .collect(); + let ret = match &m.return_type { + None => quote! { () }, + Some(ty) => quote! { #ty }, + }; + quote! { + fn #name( + &self, + peer: datadog_ipc::PeerCredentials, + #(#params),* + ) -> impl ::std::future::Future + Send + '_; + } + }) + .collect(); + + quote! { + #vis trait #trait_name: Send + Sync + 'static { + #(#handler_methods)* + } + } +} + +fn gen_serve_fn( + trait_name: &Ident, + enum_name: &Ident, + methods: &[MethodInfo], +) -> proc_macro2::TokenStream { + let snake = trait_name.to_string().to_snake_case(); + let serve_fn = format_ident!("serve_{}_connection", snake); + + let match_arms: Vec<_> = methods + .iter() + .map(|m| { + let variant = &m.variant; + let name = &m.name; + // field_names: includes leading #[cfg(...)] attrs for conditional params. + let field_names: Vec<_> = m + .params + .iter() + .map(|(attrs, n, _)| quote! { #(#attrs)* #n }) + .collect(); + + let response_code = if m.return_type.is_some() { + quote! { + let result = handler.#name(peer, #(#field_names),*).await; + let __resp_data = datadog_ipc::codec::encode_response(&result); + datadog_ipc::send_raw_async(&async_fd, &__resp_data, &[]).await.ok(); + } + } else { + quote! { + handler.#name(peer, #(#field_names),*).await; + // 1-byte ack: distinguishable from EOF (0 bytes from recvmsg on closed socket). + datadog_ipc::send_raw_async(&async_fd, &[0u8], &[]).await.ok(); + } + }; + + quote! { + #enum_name::#variant { #(#field_names),* } => { + #response_code + } + } + }) + .collect(); + + quote! { + pub async fn #serve_fn( + conn: datadog_ipc::SeqpacketConn, + handler: ::std::sync::Arc, + ) { + let peer = conn.peer_credentials().unwrap_or_default(); + let async_fd = match conn.into_async_fd() { + Ok(fd) => fd, + Err(e) => { + ::tracing::error!("IPC serve: into_async_fd failed: {e}"); + return; + } + }; + let mut recv_counter: u64 = 0; + let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE]; + loop { + let (n, fds) = match datadog_ipc::recv_raw_async(&async_fd, &mut buf).await { + Ok(x) => x, + Err(e) => { + ::tracing::trace!("IPC serve: recv (connection closed?): {e}"); + break; + } + }; + let Ok((discriminant, mut req)) = + datadog_ipc::codec::decode::<#enum_name>(&buf[..n]) + else { + ::tracing::warn!("IPC serve: failed to decode request"); + break; + }; + let mut __source = datadog_ipc::handles::FdSource::new(fds); + if datadog_ipc::handles::TransferHandles::receive_handles( + &mut req, + &mut __source, + ).is_err() { + ::tracing::warn!("IPC serve: failed to receive handles"); + break; + } + recv_counter += 1; + ::tracing::trace!(recv_counter, discriminant, pid = peer.pid, "IPC recv"); + + match req { + #(#match_arms)* + } + } + } + } +} + +fn gen_channel( + trait_name: &Ident, + vis: &syn::Visibility, + enum_name: &Ident, + methods: &[MethodInfo], +) -> proc_macro2::TokenStream { + let channel_name = format_ident!("{}Channel", trait_name); + + let channel_methods: Vec<_> = methods + .iter() + .map(|m| { + let name = &m.name; + let params: Vec<_> = m + .params + .iter() + .map(|(attrs, n, t)| quote! { #(#attrs)* #n: #t }) + .collect(); + // field_names includes leading attrs (e.g. #[cfg(windows)]) for struct init + call args. + let field_names: Vec<_> = m + .params + .iter() + .map(|(attrs, n, _)| quote! { #(#attrs)* #n }) + .collect(); + let d = m.discriminant; + let variant = &m.variant; + + // Build the request and collect fds via TransferHandles. + let build_req_and_fds = quote! { + let __req = #enum_name::#variant { #(#field_names),* }; + let mut __sink = datadog_ipc::handles::FdSink::new(); + datadog_ipc::handles::TransferHandles::copy_handles( + &__req, &mut __sink + ).ok(); + let mut __data = datadog_ipc::codec::encode(#d, &__req); + let __fds = __sink.into_fds(); + }; + + if m.return_type.is_none() && !m.is_blocking { + let method_name = format_ident!("try_send_{}", name); + quote! { + pub fn #method_name(&mut self, #(#params),*) -> bool { + #build_req_and_fds + self.0.try_send(&mut __data, &__fds) + } + } + } else if m.return_type.is_none() { + let method_name = format_ident!("call_{}", name); + quote! { + pub fn #method_name(&mut self, #(#params),*) -> ::std::io::Result<()> { + #build_req_and_fds + self.0.call(&mut __data, &__fds)?; + Ok(()) + } + } + } else { + let method_name = format_ident!("call_{}", name); + let ret_ty = m.return_type.as_ref().unwrap(); + quote! { + pub fn #method_name(&mut self, #(#params),*) -> ::std::result::Result<#ret_ty, datadog_ipc::codec::DecodeError> { + #build_req_and_fds + let (__resp, _) = self.0.call(&mut __data, &__fds) + .map_err(datadog_ipc::codec::DecodeError::Io)?; + datadog_ipc::codec::decode_response::<#ret_ty>(&__resp) + } + } + } + }) + .collect(); + + quote! { + #vis struct #channel_name(pub datadog_ipc::IpcClientConn); + + impl #channel_name { + pub fn new(conn: datadog_ipc::SeqpacketConn) -> Self { + Self(datadog_ipc::IpcClientConn::new(conn)) + } + + #(#channel_methods)* + + /// Generic fire-and-forget send (used by SidecarSender outbox drain). + pub fn try_send_request(&mut self, req: &#enum_name) -> bool { + let mut __sink = datadog_ipc::handles::FdSink::new(); + datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); + let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); + let __fds = __sink.into_fds(); + self.0.try_send(&mut __data, &__fds) + } + + /// Generic blocking send (used by SidecarSender outbox drain). + pub fn send_request_blocking( + &mut self, + req: &#enum_name, + ) -> ::std::io::Result<()> { + let mut __sink = datadog_ipc::handles::FdSink::new(); + datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); + let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); + let __fds = __sink.into_fds(); + self.0.send_blocking(&mut __data, &__fds) + } + } + } +} + +/// `#[service]` replaces `#[tarpc::service]` + `#[impl_transfer_handles]`. +/// +/// Generates from a `trait` definition: +/// - `{Trait}Request` enum (Clone, Serialize, Deserialize, TransferHandles) +/// - Handler trait with RPIT async methods (no `async_trait`) +/// - `serve_{trait}_connection` async dispatch function (Unix) +/// - `{Trait}Channel` client struct with `try_send_*` / `call_*` methods (Unix) +/// +/// Method attributes recognized (stripped before emission): +/// - `#[blocking]` — `-> ()` method where client waits for ack (vs fire-and-forget) +/// - `#[SerializedHandle]` on a parameter — the value carries an fd via SCM_RIGHTS +#[proc_macro_attribute] +pub fn service(_attr: TokenStream, input: TokenStream) -> TokenStream { + let item: ItemTrait = syn::parse(input).unwrap(); + + let trait_name = item.ident.clone(); + let vis = item.vis.clone(); + let enum_name = format_ident!("{}Request", trait_name); + + let methods = collect_methods(&item); + + let enum_def = gen_request_enum(&enum_name, &methods); + let transfer_handles = gen_transfer_handles(&enum_name, &methods); + let handler_trait = gen_handler_trait(&trait_name, &vis, &methods); + let serve_fn = gen_serve_fn(&trait_name, &enum_name, &methods); + let channel = gen_channel(&trait_name, &vis, &enum_name, &methods); + + TokenStream::from(quote! { + #enum_def + #transfer_handles + #handler_trait + #serve_fn + #channel }) } diff --git a/datadog-ipc/Cargo.toml b/datadog-ipc/Cargo.toml index 8f4fcddf53..bebd519246 100644 --- a/datadog-ipc/Cargo.toml +++ b/datadog-ipc/Cargo.toml @@ -8,6 +8,7 @@ publish = false [dependencies] anyhow = { version = "1.0" } +bincode = { version = "1" } bytes = { version = "1.11.1" } futures = { version = "0.3", default-features = false } io-lifetimes = { version = "1.0" } @@ -15,13 +16,10 @@ page_size = "0.6.0" pin-project = { version = "1" } memfd = { version = "0.6" } serde = { version = "1.0", default-features = false, features = ["derive"] } -tokio-serde = { version = "0.8", features = ["bincode"] } tokio-util = { version = "0.7.11", features = ["codec"] } libc = { version = "0.2" } libdd-tinybytes = { path = "../libdd-tinybytes", optional = true } -# tarpc needed extensions to allow 1 way communication and to export some internal structs -tarpc = { path = "./tarpc", default-features = false, features = ["serde-transport"] } libdd-common = { path = "../libdd-common" } datadog-ipc-macros = { path = "../datadog-ipc-macros" } @@ -44,7 +42,7 @@ tracing-subscriber = { version = "0.3.22" } spawn_worker = { path = "../spawn_worker" } [target.'cfg(not(windows))'.dependencies] -nix = { version = "0.29", features = ["fs", "mman", "process", "poll", "socket"] } +nix = { version = "0.29", features = ["fs", "mman", "process", "poll", "socket", "uio"] } sendfd = { version = "0.4", features = ["tokio"] } tokio = { version = "1.23", features = ["sync", "io-util", "signal"] } @@ -52,8 +50,8 @@ tokio = { version = "1.23", features = ["sync", "io-util", "signal"] } glibc_version = "0.1.2" [target.'cfg(windows)'.dependencies] -winapi = { version = "0.3.9", features = ["handleapi", "memoryapi", "winbase", "winerror"] } -windows-sys = { version = "0.48.0", features = ["Win32_System", "Win32_System_WindowsProgramming", "Win32_Foundation", "Win32_System_Pipes"] } +winapi = { version = "0.3.9", features = ["handleapi", "memoryapi", "winbase", "winnt", "winerror", "processthreadsapi", "fileapi", "minwinbase"] } +windows-sys = { version = "0.48.0", features = ["Win32_System", "Win32_System_WindowsProgramming", "Win32_Foundation", "Win32_System_Pipes", "Win32_Storage_FileSystem", "Win32_System_IO", "Win32_System_Threading"] } tokio = { version = "1.23", features = ["sync", "io-util", "signal", "net"] } [lib] diff --git a/datadog-ipc/benches/ipc.rs b/datadog-ipc/benches/ipc.rs index ded759b6b2..8a39978891 100644 --- a/datadog-ipc/benches/ipc.rs +++ b/datadog-ipc/benches/ipc.rs @@ -1,72 +1,52 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -#[cfg(not(windows))] +#[cfg(unix)] use criterion::{criterion_group, criterion_main, Criterion}; -#[cfg(all(not(windows), not(target_arch = "aarch64")))] -use datadog_ipc::example_interface::ExampleInterfaceResponse; -#[cfg(not(windows))] -use datadog_ipc::{ - example_interface::{ExampleInterfaceRequest, ExampleServer, ExampleTransport}, - platform::Channel, -}; +#[cfg(unix)] +use datadog_ipc::example_interface::{ExampleInterfaceChannel, ExampleServer}; -#[cfg(not(windows))] -use std::{ - os::unix::net::UnixStream, - thread::{self}, -}; -#[cfg(not(windows))] +#[cfg(unix)] +use std::thread; +#[cfg(unix)] use tokio::runtime; -#[cfg(not(windows))] +#[cfg(unix)] fn criterion_benchmark(c: &mut Criterion) { - let (sock_a, sock_b) = UnixStream::pair().unwrap(); + let (conn_server, conn_client) = datadog_ipc::SeqpacketConn::socketpair().unwrap(); let worker = thread::spawn(move || { let rt = runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); - let _g = rt.enter(); - sock_a.set_nonblocking(true).unwrap(); let server = ExampleServer::default(); - - rt.block_on(server.accept_connection(Channel::from(sock_a))); + rt.block_on(server.accept_connection(conn_server)); }); - let mut transport = ExampleTransport::from(sock_b); - transport.set_nonblocking(false).unwrap(); + let mut channel = ExampleInterfaceChannel::new(conn_client); c.bench_function("write only interface", |b| { - b.iter(|| transport.send(&ExampleInterfaceRequest::Notify {}).unwrap()) + b.iter(|| channel.try_send_notify()) }); // This consistently blocks on aarch64 (both MacOS and Linux), is there an issue with the // optimized code? #[cfg(not(target_arch = "aarch64"))] c.bench_function("two way interface", |b| { - b.iter(|| transport.call(&ExampleInterfaceRequest::ReqCnt {}).unwrap()) + b.iter(|| channel.call_req_cnt().unwrap()) }); - // This consistently blocks on aarch64 (both MacOS and Linux), is there an issue with the - // optimized code? #[cfg(not(target_arch = "aarch64"))] - match transport.call(&ExampleInterfaceRequest::ReqCnt {}).unwrap() { - ExampleInterfaceResponse::ReqCnt(cnt) => { - println!("Total requests handled: {cnt}"); - } - _ => panic!("shouldn't happen"), - }; + println!("Total requests handled: {}", channel.call_req_cnt().unwrap()); - drop(transport); + drop(channel); worker.join().unwrap(); } -#[cfg(not(windows))] +#[cfg(unix)] criterion_group!(benches, criterion_benchmark); - -#[cfg(not(windows))] +#[cfg(unix)] criterion_main!(benches); #[cfg(windows)] diff --git a/datadog-ipc/plugins/Cargo.toml b/datadog-ipc/plugins/Cargo.toml deleted file mode 100644 index 18ed9e5dac..0000000000 --- a/datadog-ipc/plugins/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "tarpc-plugins" -version = "0.12.0" -rust-version = "1.56" -authors = ["Adam Wright ", "Tim Kuehn "] -edition = "2021" -license = "MIT" -documentation = "https://docs.rs/tarpc-plugins" -homepage = "https://github.com/google/tarpc" -repository = "https://github.com/google/tarpc" -keywords = ["rpc", "network", "server", "api", "microservices"] -categories = ["asynchronous", "network-programming"] -readme = "../README.md" -description = "Proc macros for tarpc." -publish = false - -[features] -serde1 = [] - -[badges] -travis-ci = { repository = "google/tarpc" } - -[dependencies] -proc-macro2 = "1.0" -quote = "1.0" -syn = { version = "1.0", features = ["full"] } - -[lib] -proc-macro = true -bench = false - -[dev-dependencies] -assert-type-eq = "0.1.0" -futures = "0.3" -serde = { version = "1.0", features = ["derive"] } -tarpc = { path = "../tarpc", features = ["serde1"] } diff --git a/datadog-ipc/plugins/LICENSE b/datadog-ipc/plugins/LICENSE deleted file mode 100644 index 9d6eea67e1..0000000000 --- a/datadog-ipc/plugins/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) - -Copyright 2016 Google Inc. All Rights Reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/datadog-ipc/plugins/rustfmt.toml b/datadog-ipc/plugins/rustfmt.toml deleted file mode 100644 index 32a9786fa1..0000000000 --- a/datadog-ipc/plugins/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -edition = "2018" diff --git a/datadog-ipc/plugins/src/lib.rs b/datadog-ipc/plugins/src/lib.rs deleted file mode 100644 index a449b77aac..0000000000 --- a/datadog-ipc/plugins/src/lib.rs +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -#![recursion_limit = "512"] - -extern crate proc_macro; -extern crate proc_macro2; -extern crate quote; -extern crate syn; - -use proc_macro::TokenStream; -use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{format_ident, quote, ToTokens}; -use syn::{ - braced, - ext::IdentExt, - parenthesized, - parse::{Parse, ParseStream}, - parse_macro_input, parse_quote, parse_str, - spanned::Spanned, - token::Comma, - Attribute, FnArg, Ident, ImplItem, ImplItemMethod, ImplItemType, ItemImpl, Lit, LitBool, - MetaNameValue, Pat, PatType, ReturnType, Token, Type, Visibility, -}; - -/// Accumulates multiple errors into a result. -/// Only use this for recoverable errors, i.e. non-parse errors. Fatal errors should early exit to -/// avoid further complications. -macro_rules! extend_errors { - ($errors: ident, $e: expr) => { - match $errors { - Ok(_) => $errors = Err($e), - Err(ref mut errors) => errors.extend($e), - } - }; -} - -struct Service { - attrs: Vec, - vis: Visibility, - ident: Ident, - rpcs: Vec, -} - -struct RpcMethod { - attrs: Vec, - ident: Ident, - args: Vec, - output: ReturnType, -} - -impl Parse for Service { - fn parse(input: ParseStream) -> syn::Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse()?; - input.parse::()?; - let ident: Ident = input.parse()?; - let content; - braced!(content in input); - let mut rpcs = Vec::::new(); - while !content.is_empty() { - rpcs.push(content.parse()?); - } - let mut ident_errors = Ok(()); - for rpc in &rpcs { - if rpc.ident == "new" { - extend_errors!( - ident_errors, - syn::Error::new( - rpc.ident.span(), - format!( - "method name conflicts with generated fn `{}Client::new`", - ident.unraw() - ) - ) - ); - } - if rpc.ident == "serve" { - extend_errors!( - ident_errors, - syn::Error::new( - rpc.ident.span(), - format!("method name conflicts with generated fn `{ident}::serve`") - ) - ); - } - } - ident_errors?; - - Ok(Self { - attrs, - vis, - ident, - rpcs, - }) - } -} - -impl Parse for RpcMethod { - fn parse(input: ParseStream) -> syn::Result { - let attrs = input.call(Attribute::parse_outer)?; - input.parse::()?; - input.parse::()?; - let ident = input.parse()?; - let content; - parenthesized!(content in input); - let mut args = Vec::new(); - let mut errors = Ok(()); - for arg in content.parse_terminated::(FnArg::parse)? { - match arg { - FnArg::Typed(captured) if matches!(&*captured.pat, Pat::Ident(_)) => { - args.push(captured); - } - FnArg::Typed(captured) => { - extend_errors!( - errors, - syn::Error::new(captured.pat.span(), "patterns aren't allowed in RPC args") - ); - } - FnArg::Receiver(_) => { - extend_errors!( - errors, - syn::Error::new(arg.span(), "method args cannot start with self") - ); - } - } - } - errors?; - let output = input.parse()?; - input.parse::()?; - - Ok(Self { - attrs, - ident, - args, - output, - }) - } -} - -// If `derive_serde` meta item is not present, defaults to cfg!(feature = "serde1"). -// `derive_serde` can only be true when serde1 is enabled. -struct DeriveSerde(bool); - -impl Parse for DeriveSerde { - fn parse(input: ParseStream) -> syn::Result { - let mut result = Ok(None); - let mut derive_serde = Vec::new(); - let meta_items = input.parse_terminated::(MetaNameValue::parse)?; - for meta in meta_items { - if meta.path.segments.len() != 1 { - extend_errors!( - result, - syn::Error::new( - meta.span(), - "tarpc::service does not support this meta item" - ) - ); - continue; - } - let segment = meta.path.segments.first().unwrap(); - if segment.ident != "derive_serde" { - extend_errors!( - result, - syn::Error::new( - meta.span(), - "tarpc::service does not support this meta item" - ) - ); - continue; - } - match meta.lit { - Lit::Bool(LitBool { value: true, .. }) if cfg!(feature = "serde1") => { - result = result.and(Ok(Some(true))) - } - Lit::Bool(LitBool { value: true, .. }) => { - extend_errors!( - result, - syn::Error::new( - meta.span(), - "To enable serde, first enable the `serde1` feature of tarpc" - ) - ); - } - Lit::Bool(LitBool { value: false, .. }) => result = result.and(Ok(Some(false))), - _ => extend_errors!( - result, - syn::Error::new( - meta.lit.span(), - "`derive_serde` expects a value of type `bool`" - ) - ), - } - derive_serde.push(meta); - } - if derive_serde.len() > 1 { - for (i, derive_serde) in derive_serde.iter().enumerate() { - extend_errors!( - result, - syn::Error::new( - derive_serde.span(), - format!( - "`derive_serde` appears more than once (occurrence #{})", - i + 1 - ) - ) - ); - } - } - let derive_serde = result?.unwrap_or(cfg!(feature = "serde1")); - Ok(Self(derive_serde)) - } -} - -/// A helper attribute to avoid a direct dependency on Serde. -/// -/// Adds the following annotations to the annotated item: -/// -/// ```rust -/// #[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)] -/// #[serde(crate = "tarpc::serde")] -/// # struct Foo; -/// ``` -#[proc_macro_attribute] -pub fn derive_serde(_attr: TokenStream, item: TokenStream) -> TokenStream { - let mut gen: proc_macro2::TokenStream = quote! { - #[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)] - #[serde(crate = "tarpc::serde")] - }; - gen.extend(proc_macro2::TokenStream::from(item)); - proc_macro::TokenStream::from(gen) -} - -/// Generates: -/// - service trait -/// - serve fn -/// - client stub struct -/// - new_stub client factory fn -/// - Request and Response enums -/// - ResponseFut Future -#[proc_macro_attribute] -pub fn service(attr: TokenStream, input: TokenStream) -> TokenStream { - let derive_serde = parse_macro_input!(attr as DeriveSerde); - let unit_type: &Type = &parse_quote!(()); - let Service { - ref attrs, - ref vis, - ref ident, - ref rpcs, - } = parse_macro_input!(input as Service); - - let camel_case_fn_names: &Vec<_> = &rpcs - .iter() - .map(|rpc| snake_to_camel(&rpc.ident.unraw().to_string())) - .collect(); - let args: &[&[PatType]] = &rpcs.iter().map(|rpc| &*rpc.args).collect::>(); - let response_fut_name = &format!("{}ResponseFut", ident.unraw()); - let derive_serialize = if derive_serde.0 { - Some( - quote! {#[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)] - #[serde(crate = "tarpc::serde")]}, - ) - } else { - None - }; - - let methods = rpcs.iter().map(|rpc| &rpc.ident).collect::>(); - let request_names = methods - .iter() - .map(|m| format!("{ident}.{m}")) - .collect::>(); - - ServiceGenerator { - response_fut_name, - service_ident: ident, - server_ident: &format_ident!("Serve{}", ident), - response_fut_ident: &Ident::new(response_fut_name, ident.span()), - client_ident: &format_ident!("{}Client", ident), - request_ident: &format_ident!("{}Request", ident), - response_ident: &format_ident!("{}Response", ident), - vis, - args, - method_attrs: &rpcs.iter().map(|rpc| &*rpc.attrs).collect::>(), - method_idents: &methods, - request_names: &request_names, - attrs, - rpcs, - return_types: &rpcs - .iter() - .map(|rpc| match rpc.output { - ReturnType::Type(_, ref ty) => ty, - ReturnType::Default => unit_type, - }) - .collect::>(), - arg_pats: &args - .iter() - .map(|args| args.iter().map(|arg| &*arg.pat).collect()) - .collect::>(), - camel_case_idents: &rpcs - .iter() - .zip(camel_case_fn_names.iter()) - .map(|(rpc, name)| Ident::new(name, rpc.ident.span())) - .collect::>(), - future_types: &camel_case_fn_names - .iter() - .map(|name| parse_str(&format!("{name}Fut")).unwrap()) - .collect::>(), - derive_serialize: derive_serialize.as_ref(), - } - .into_token_stream() - .into() -} - -/// generate an identifier consisting of the method name to CamelCase with -/// Fut appended to it. -fn associated_type_for_rpc(method: &ImplItemMethod) -> String { - snake_to_camel(&method.sig.ident.unraw().to_string()) + "Fut" -} - -/// Transforms an async function into a sync one, returning a type declaration -/// for the return type (a future). -fn transform_method(method: &mut ImplItemMethod) -> ImplItemType { - method.sig.asyncness = None; - - // get either the return type or (). - let ret = match &method.sig.output { - ReturnType::Default => quote!(()), - ReturnType::Type(_, ret) => quote!(#ret), - }; - - let fut_name = associated_type_for_rpc(method); - let fut_name_ident = Ident::new(&fut_name, method.sig.ident.span()); - - // generate the updated return signature. - method.sig.output = parse_quote! { - -> ::core::pin::Pin + ::core::marker::Send - >> - }; - - // transform the body of the method into Box::pin(async move { body }). - let block = method.block.clone(); - method.block = parse_quote! [{ - Box::pin(async move - #block - ) - }]; - - // generate and return type declaration for return type. - let t: ImplItemType = parse_quote! { - type #fut_name_ident = ::core::pin::Pin + ::core::marker::Send>>; - }; - - t -} - -#[proc_macro_attribute] -pub fn server(_attr: TokenStream, input: TokenStream) -> TokenStream { - let mut item = syn::parse_macro_input!(input as ItemImpl); - let span = item.span(); - - // the generated type declarations - let mut types: Vec = Vec::new(); - let mut expected_non_async_types: Vec<(&ImplItemMethod, String)> = Vec::new(); - let mut found_non_async_types: Vec<&ImplItemType> = Vec::new(); - - for inner in &mut item.items { - match inner { - ImplItem::Method(method) => { - if method.sig.asyncness.is_some() { - // if this function is declared async, transform it into a regular function - let typedecl = transform_method(method); - types.push(typedecl); - } else { - // If it's not async, keep track of all required associated types for better - // error reporting. - expected_non_async_types.push((method, associated_type_for_rpc(method))); - } - } - ImplItem::Type(typedecl) => found_non_async_types.push(typedecl), - _ => {} - } - } - - if let Err(e) = - verify_types_were_provided(span, &expected_non_async_types, &found_non_async_types) - { - return TokenStream::from(e.to_compile_error()); - } - - // add the type declarations into the impl block - for t in types.into_iter() { - item.items.push(syn::ImplItem::Type(t)); - } - - TokenStream::from(quote!(#item)) -} - -fn verify_types_were_provided( - span: Span, - expected: &[(&ImplItemMethod, String)], - provided: &[&ImplItemType], -) -> syn::Result<()> { - let mut result = Ok(()); - for (method, expected) in expected { - if !provided.iter().any(|typedecl| typedecl.ident == expected) { - let mut e = syn::Error::new( - span, - format!("not all trait items implemented, missing: `{expected}`"), - ); - let fn_span = method.sig.fn_token.span(); - e.extend(syn::Error::new( - fn_span.join(method.sig.ident.span()).unwrap_or(fn_span), - format!( - "hint: `#[tarpc::server]` only rewrites async fns, and `fn {}` is not async", - method.sig.ident - ), - )); - match result { - Ok(_) => result = Err(e), - Err(ref mut error) => error.extend(Some(e)), - } - } - } - result -} - -// Things needed to generate the service items: trait, serve impl, request/response enums, and -// the client stub. -struct ServiceGenerator<'a> { - service_ident: &'a Ident, - server_ident: &'a Ident, - response_fut_ident: &'a Ident, - response_fut_name: &'a str, - client_ident: &'a Ident, - request_ident: &'a Ident, - response_ident: &'a Ident, - vis: &'a Visibility, - attrs: &'a [Attribute], - rpcs: &'a [RpcMethod], - camel_case_idents: &'a [Ident], - future_types: &'a [Type], - method_idents: &'a [&'a Ident], - request_names: &'a [String], - method_attrs: &'a [&'a [Attribute]], - args: &'a [&'a [PatType]], - return_types: &'a [&'a Type], - arg_pats: &'a [Vec<&'a Pat>], - derive_serialize: Option<&'a TokenStream2>, -} - -impl ServiceGenerator<'_> { - fn trait_service(&self) -> TokenStream2 { - let &Self { - attrs, - rpcs, - vis, - future_types, - return_types, - service_ident, - server_ident, - .. - } = self; - - let types_and_fns = rpcs - .iter() - .zip(future_types.iter()) - .zip(return_types.iter()) - .map( - |( - ( - RpcMethod { - attrs, ident, args, .. - }, - future_type, - ), - output, - )| { - let ty_doc = format!("The response future returned by [`{service_ident}::{ident}`]."); - quote! { - #[doc = #ty_doc] - type #future_type: std::future::Future; - - #( #attrs )* - fn #ident(self, context: tarpc::context::Context, #( #args ),*) -> Self::#future_type; - } - }, - ); - - quote! { - #( #attrs )* - #vis trait #service_ident: Sized { - #( #types_and_fns )* - - /// Returns a serving function to use with - /// [InFlightRequest::execute](tarpc::server::InFlightRequest::execute). - fn serve(self) -> #server_ident { - #server_ident { service: self } - } - } - } - } - - fn struct_server(&self) -> TokenStream2 { - let &Self { - vis, server_ident, .. - } = self; - - quote! { - /// A serving function to use with [tarpc::server::InFlightRequest::execute]. - #[derive(Clone)] - #vis struct #server_ident { - service: S, - } - } - } - - fn impl_serve_for_server(&self) -> TokenStream2 { - let &Self { - request_ident, - server_ident, - service_ident, - response_ident, - response_fut_ident, - camel_case_idents, - arg_pats, - method_idents, - request_names, - .. - } = self; - - quote! { - impl tarpc::server::Serve<#request_ident> for #server_ident - where S: #service_ident - { - type Resp = #response_ident; - type Fut = #response_fut_ident; - - fn method(&self, req: &#request_ident) -> Option<&'static str> { - Some(match req { - #( - #request_ident::#camel_case_idents{..} => { - #request_names - } - )* - }) - } - - fn serve(self, ctx: tarpc::context::Context, req: #request_ident) -> Self::Fut { - match req { - #( - #request_ident::#camel_case_idents{ #( #arg_pats ),* } => { - #response_fut_ident::#camel_case_idents( - #service_ident::#method_idents( - self.service, ctx, #( #arg_pats ),* - ) - ) - } - )* - } - } - } - } - } - - fn enum_request(&self) -> TokenStream2 { - let &Self { - derive_serialize, - vis, - request_ident, - camel_case_idents, - args, - .. - } = self; - - quote! { - /// The request sent over the wire from the client to the server. - #[allow(missing_docs)] - #[derive(Debug)] - #derive_serialize - #vis enum #request_ident { - #( #camel_case_idents{ #( #args ),* } ),* - } - } - } - - fn enum_response(&self) -> TokenStream2 { - let &Self { - derive_serialize, - vis, - response_ident, - camel_case_idents, - return_types, - .. - } = self; - - quote! { - /// The response sent over the wire from the server to the client. - #[allow(missing_docs)] - #[derive(Debug)] - #derive_serialize - #vis enum #response_ident { - #( #camel_case_idents(#return_types) ),* - } - } - } - - fn enum_response_future(&self) -> TokenStream2 { - let &Self { - vis, - service_ident, - response_fut_ident, - camel_case_idents, - future_types, - .. - } = self; - - quote! { - /// A future resolving to a server response. - #[allow(missing_docs)] - #vis enum #response_fut_ident { - #( #camel_case_idents(::#future_types) ),* - } - } - } - - fn impl_debug_for_response_future(&self) -> TokenStream2 { - let &Self { - service_ident, - response_fut_ident, - response_fut_name, - .. - } = self; - - quote! { - impl std::fmt::Debug for #response_fut_ident { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct(#response_fut_name).finish() - } - } - } - } - - fn impl_future_for_response_future(&self) -> TokenStream2 { - let &Self { - service_ident, - response_fut_ident, - response_ident, - camel_case_idents, - .. - } = self; - - quote! { - impl std::future::Future for #response_fut_ident { - type Output = #response_ident; - - fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) - -> std::task::Poll<#response_ident> - { - unsafe { - match std::pin::Pin::get_unchecked_mut(self) { - #( - #response_fut_ident::#camel_case_idents(resp) => - std::pin::Pin::new_unchecked(resp) - .poll(cx) - .map(#response_ident::#camel_case_idents), - )* - } - } - } - } - } - } - - fn struct_client(&self) -> TokenStream2 { - let &Self { - vis, - client_ident, - request_ident, - response_ident, - .. - } = self; - - quote! { - #[allow(unused)] - #[derive(Clone, Debug)] - /// The client stub that makes RPC calls to the server. All request methods return - /// [Futures](std::future::Future). - #vis struct #client_ident(tarpc::client::Channel<#request_ident, #response_ident>); - } - } - - fn impl_client_new(&self) -> TokenStream2 { - let &Self { - client_ident, - vis, - request_ident, - response_ident, - .. - } = self; - - quote! { - impl #client_ident { - /// Returns a new client stub that sends requests over the given transport. - #vis fn new(config: tarpc::client::Config, transport: T) - -> tarpc::client::NewClient< - Self, - tarpc::client::RequestDispatch<#request_ident, #response_ident, T> - > - where - T: tarpc::Transport, tarpc::Response<#response_ident>> - { - let new_client = tarpc::client::new(config, transport); - tarpc::client::NewClient { - client: #client_ident(new_client.client), - dispatch: new_client.dispatch, - } - } - - } - } - } - - fn impl_client_rpc_methods(&self) -> TokenStream2 { - let &Self { - client_ident, - request_ident, - response_ident, - method_attrs, - vis, - method_idents, - request_names, - args, - return_types, - arg_pats, - camel_case_idents, - .. - } = self; - - quote! { - impl #client_ident { - #( - #[allow(unused)] - #( #method_attrs )* - #vis fn #method_idents(&self, ctx: tarpc::context::Context, #( #args ),*) - -> impl std::future::Future> + '_ { - let request = #request_ident::#camel_case_idents { #( #arg_pats ),* }; - let resp = self.0.call(ctx, #request_names, request); - async move { - match resp.await? { - #response_ident::#camel_case_idents(msg) => std::result::Result::Ok(msg), - _ => unreachable!(), - } - } - } - )* - } - } - } -} - -impl ToTokens for ServiceGenerator<'_> { - fn to_tokens(&self, output: &mut TokenStream2) { - output.extend(vec![ - self.trait_service(), - self.struct_server(), - self.impl_serve_for_server(), - self.enum_request(), - self.enum_response(), - self.enum_response_future(), - self.impl_debug_for_response_future(), - self.impl_future_for_response_future(), - self.struct_client(), - self.impl_client_new(), - self.impl_client_rpc_methods(), - ]) - } -} - -fn snake_to_camel(ident_str: &str) -> String { - let mut camel_ty = String::with_capacity(ident_str.len()); - - let mut last_char_was_underscore = true; - for c in ident_str.chars() { - match c { - '_' => last_char_was_underscore = true, - c if last_char_was_underscore => { - camel_ty.extend(c.to_uppercase()); - last_char_was_underscore = false; - } - c => camel_ty.extend(c.to_lowercase()), - } - } - - camel_ty.shrink_to_fit(); - camel_ty -} - -#[test] -fn snake_to_camel_basic() { - assert_eq!(snake_to_camel("abc_def"), "AbcDef"); -} - -#[test] -fn snake_to_camel_underscore_suffix() { - assert_eq!(snake_to_camel("abc_def_"), "AbcDef"); -} - -#[test] -fn snake_to_camel_underscore_prefix() { - assert_eq!(snake_to_camel("_abc_def"), "AbcDef"); -} - -#[test] -fn snake_to_camel_underscore_consecutive() { - assert_eq!(snake_to_camel("abc__def"), "AbcDef"); -} - -#[test] -fn snake_to_camel_capital_in_middle() { - assert_eq!(snake_to_camel("aBc_dEf"), "AbcDef"); -} diff --git a/datadog-ipc/plugins/tests/server.rs b/datadog-ipc/plugins/tests/server.rs deleted file mode 100644 index c2e48d866a..0000000000 --- a/datadog-ipc/plugins/tests/server.rs +++ /dev/null @@ -1,145 +0,0 @@ -use assert_type_eq::assert_type_eq; -use futures::Future; -use std::pin::Pin; -use tarpc::context; - -// these need to be out here rather than inside the function so that the -// assert_type_eq macro can pick them up. -#[tarpc::service] -trait Foo { - async fn two_part(s: String, i: i32) -> (String, i32); - async fn bar(s: String) -> String; - async fn baz(); -} - -#[test] -fn type_generation_works() { - #[allow(non_local_definitions)] - #[tarpc::server] - impl Foo for () { - async fn two_part(self, _: context::Context, s: String, i: i32) -> (String, i32) { - (s, i) - } - - async fn bar(self, _: context::Context, s: String) -> String { - s - } - - async fn baz(self, _: context::Context) {} - } - - // the assert_type_eq macro can only be used once per block. - { - assert_type_eq!( - <() as Foo>::TwoPartFut, - Pin + Send>> - ); - } - { - assert_type_eq!( - <() as Foo>::BarFut, - Pin + Send>> - ); - } - { - assert_type_eq!( - <() as Foo>::BazFut, - Pin + Send>> - ); - } -} - -#[allow(non_camel_case_types)] -#[test] -fn raw_idents_work() { - type r#yield = String; - - #[tarpc::service] - trait r#trait { - async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32); - async fn r#fn(r#impl: r#yield) -> r#yield; - async fn r#async(); - } - - #[tarpc::server] - impl r#trait for () { - async fn r#await( - self, - _: context::Context, - r#struct: r#yield, - r#enum: i32, - ) -> (r#yield, i32) { - (r#struct, r#enum) - } - - async fn r#fn(self, _: context::Context, r#impl: r#yield) -> r#yield { - r#impl - } - - async fn r#async(self, _: context::Context) {} - } -} - -#[test] -fn syntax() { - #[tarpc::service] - trait Syntax { - #[deny(warnings)] - #[allow(non_snake_case)] - async fn TestCamelCaseDoesntConflict(); - async fn hello() -> String; - #[doc = "attr"] - async fn attr(s: String) -> String; - async fn no_args_no_return(); - async fn no_args() -> (); - async fn one_arg(one: String) -> i32; - async fn two_args_no_return(one: String, two: u64); - async fn two_args(one: String, two: u64) -> String; - async fn no_args_ret_error() -> i32; - async fn one_arg_ret_error(one: String) -> String; - async fn no_arg_implicit_return_error(); - #[doc = "attr"] - async fn one_arg_implicit_return_error(one: String); - } - - #[tarpc::server] - impl Syntax for () { - #[deny(warnings)] - #[allow(non_snake_case)] - async fn TestCamelCaseDoesntConflict(self, _: context::Context) {} - - async fn hello(self, _: context::Context) -> String { - String::new() - } - - async fn attr(self, _: context::Context, _s: String) -> String { - String::new() - } - - async fn no_args_no_return(self, _: context::Context) {} - - async fn no_args(self, _: context::Context) -> () {} - - async fn one_arg(self, _: context::Context, _one: String) -> i32 { - 0 - } - - async fn two_args_no_return(self, _: context::Context, _one: String, _two: u64) {} - - async fn two_args(self, _: context::Context, _one: String, _two: u64) -> String { - String::new() - } - - async fn no_args_ret_error(self, _: context::Context) -> i32 { - 0 - } - - async fn one_arg_ret_error(self, _: context::Context, _one: String) -> String { - String::new() - } - - async fn no_arg_implicit_return_error(self, _: context::Context) {} - - async fn one_arg_implicit_return_error(self, _: context::Context, _one: String) {} - } -} diff --git a/datadog-ipc/plugins/tests/service.rs b/datadog-ipc/plugins/tests/service.rs deleted file mode 100644 index b37cbcead5..0000000000 --- a/datadog-ipc/plugins/tests/service.rs +++ /dev/null @@ -1,85 +0,0 @@ -use tarpc::context; - -#[test] -fn att_service_trait() { - use futures::future::{ready, Ready}; - - #[tarpc::service] - trait Foo { - async fn two_part(s: String, i: i32) -> (String, i32); - async fn bar(s: String) -> String; - async fn baz(); - } - - impl Foo for () { - type TwoPartFut = Ready<(String, i32)>; - fn two_part(self, _: context::Context, s: String, i: i32) -> Self::TwoPartFut { - ready((s, i)) - } - - type BarFut = Ready; - fn bar(self, _: context::Context, s: String) -> Self::BarFut { - ready(s) - } - - type BazFut = Ready<()>; - fn baz(self, _: context::Context) -> Self::BazFut { - ready(()) - } - } -} - -#[allow(non_camel_case_types)] -#[test] -fn raw_idents() { - use futures::future::{ready, Ready}; - - type r#yield = String; - - #[tarpc::service] - trait r#trait { - async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32); - async fn r#fn(r#impl: r#yield) -> r#yield; - async fn r#async(); - } - - impl r#trait for () { - type AwaitFut = Ready<(r#yield, i32)>; - fn r#await(self, _: context::Context, r#struct: r#yield, r#enum: i32) -> Self::AwaitFut { - ready((r#struct, r#enum)) - } - - type FnFut = Ready; - fn r#fn(self, _: context::Context, r#impl: r#yield) -> Self::FnFut { - ready(r#impl) - } - - type AsyncFut = Ready<()>; - fn r#async(self, _: context::Context) -> Self::AsyncFut { - ready(()) - } - } -} - -#[test] -fn syntax() { - #[tarpc::service] - trait Syntax { - #[deny(warnings)] - #[allow(non_snake_case)] - async fn TestCamelCaseDoesntConflict(); - async fn hello() -> String; - #[doc = "attr"] - async fn attr(s: String) -> String; - async fn no_args_no_return(); - async fn no_args() -> (); - async fn one_arg(one: String) -> i32; - async fn two_args_no_return(one: String, two: u64); - async fn two_args(one: String, two: u64) -> String; - async fn no_args_ret_error() -> i32; - async fn one_arg_ret_error(one: String) -> String; - async fn no_arg_implicit_return_error(); - #[doc = "attr"] - async fn one_arg_implicit_return_error(one: String); - } -} diff --git a/datadog-ipc/src/client.rs b/datadog-ipc/src/client.rs new file mode 100644 index 0000000000..cefa0ff910 --- /dev/null +++ b/datadog-ipc/src/client.rs @@ -0,0 +1,141 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Generic IPC client connection state shared by all generated channel types. + +use crate::platform::{SeqpacketConn, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE}; + +#[cfg(unix)] +use std::os::unix::io::{OwnedFd, RawFd}; +#[cfg(windows)] +use std::os::windows::io::{OwnedHandle as OwnedFd, RawHandle as RawFd}; + +use std::io; +use std::time::Duration; + +/// Maximum number of fire-and-forget messages that may be outstanding +/// (sent but not yet acked) before the client blocks or drops new messages. +pub const MAX_OUTSTANDING: u64 = 100; + +/// Client-side state for a single IPC connection. +/// +/// Tracks in-flight message counts for ack-based flow control. +/// `SeqpacketConn` is non-blocking; blocking behavior is implemented via +/// `libc::poll` internally. +pub struct IpcClientConn { + pub conn: SeqpacketConn, + /// Number of messages sent (incremented on each successful send). + send_count: u64, + /// Number of server replies received (acks or typed responses). + ack_count: u64, + /// Maximum allowed `send_count - ack_count` before `try_send` returns false. + pub max_outstanding: u64, + /// Reusable receive buffer. Sized to hold a maximum payload plus the platform wire overhead + /// (`HANDLE_SUFFIX_SIZE`), so that messages can be read directly without an intermediate copy. + recv_buf: Vec, + /// Set to true when a fatal I/O error occurs on send or receive. + closed: bool, +} + +impl IpcClientConn { + pub fn new(conn: SeqpacketConn) -> Self { + Self { + conn, + send_count: 0, + ack_count: 0, + max_outstanding: MAX_OUTSTANDING, + recv_buf: vec![0u8; MAX_MESSAGE_SIZE + HANDLE_SUFFIX_SIZE], + closed: false, + } + } + + pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { + self.conn.set_read_timeout(d) + } + + pub fn set_write_timeout(&mut self, d: Option) -> io::Result<()> { + self.conn.set_write_timeout(d) + } + + /// Returns `true` if a fatal I/O error has occurred on this connection. + pub fn is_closed(&self) -> bool { + self.closed + } + + /// Number of sent-but-not-yet-acked messages. + pub fn outstanding(&self) -> u64 { + self.send_count - self.ack_count + } + + /// Non-blocking drain of all pending acks. Updates `ack_count`. + pub fn drain_acks(&mut self) { + loop { + match self.conn.try_recv_raw(&mut self.recv_buf) { + Ok(_) => self.ack_count += 1, + Err(_) => break, + } + } + } + + /// Non-blocking send. + /// + /// First drains pending acks, then checks the outstanding limit. + /// Returns `false` if the socket would block (EAGAIN) or the outstanding + /// limit has been reached. `data` is unmodified after the call. + pub fn try_send(&mut self, data: &mut Vec, fds: &[RawFd]) -> bool { + self.drain_acks(); + if self.outstanding() >= self.max_outstanding { + return false; + } + match self.conn.try_send_raw(data, fds) { + Ok(()) => { + self.send_count += 1; + true + } + Err(e) if e.kind() == io::ErrorKind::WouldBlock => false, + Err(_) => { + // Fatal error (e.g. EPIPE): mark connection as closed. + self.closed = true; + false + } + } + } + + /// Blocking send (no response wait). + /// + /// Used when draining the outbox of state-change messages. + pub fn send_blocking(&mut self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { + self.conn.send_raw_blocking(data, fds).map_err(|e| { + self.closed = true; + e + })?; + self.send_count += 1; + Ok(()) + } + + /// Blocking send + blocking receive of response. + /// + /// Sends `data`/`fds` (blocking), then receives in a loop, skipping any + /// intermediate 0-byte acks for prior fire-and-forget messages, until the + /// ack for this specific send arrives. Returns the response bytes and any + /// transferred file descriptors. + pub fn call(&mut self, data: &mut Vec, fds: &[RawFd]) -> io::Result<(Vec, Vec)> { + self.conn.send_raw_blocking(data, fds).map_err(|e| { + self.closed = true; + e + })?; + self.send_count += 1; + let target = self.send_count; + loop { + let (n, resp_fds) = self.conn.recv_raw_blocking(&mut self.recv_buf).map_err(|e| { + self.closed = true; + e + })?; + self.ack_count += 1; + if self.ack_count == target { + return Ok((self.recv_buf[..n].to_vec(), resp_fds)); + } + // Intermediate ack for a prior fire-and-forget message — discard. + } + } +} diff --git a/datadog-ipc/src/codec.rs b/datadog-ipc/src/codec.rs new file mode 100644 index 0000000000..f2c5119b71 --- /dev/null +++ b/datadog-ipc/src/codec.rs @@ -0,0 +1,62 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Codec for IPC messages. +//! +//! Request wire format: `[4 bytes: u32 LE discriminant][N bytes: bincode payload]` +//! Response wire format: `[N bytes: bincode payload]` (no discriminant) +//! Ack wire format: `[0 bytes]` (empty datagram) + +use serde::{de::DeserializeOwned, Serialize}; +use std::fmt; + +pub const DISCRIMINANT_SIZE: usize = 4; + +/// Encode a request: 4-byte LE discriminant prefix + bincode payload. +pub fn encode(discriminant: u32, value: &T) -> Vec { + let payload = bincode::serialize(value).unwrap_or_default(); + let mut buf = Vec::with_capacity(DISCRIMINANT_SIZE + payload.len()); + buf.extend_from_slice(&discriminant.to_le_bytes()); + buf.extend_from_slice(&payload); + buf +} + +/// Decode a request: returns `(discriminant, value)`. +pub fn decode(buf: &[u8]) -> Result<(u32, T), DecodeError> { + if buf.len() < DISCRIMINANT_SIZE { + return Err(DecodeError::TooShort); + } + let disc_bytes: [u8; 4] = buf[..DISCRIMINANT_SIZE].try_into().unwrap_or([0u8; 4]); + let discriminant = u32::from_le_bytes(disc_bytes); + let value = bincode::deserialize(&buf[DISCRIMINANT_SIZE..]).map_err(DecodeError::Bincode)?; + Ok((discriminant, value)) +} + +/// Encode a response (no discriminant prefix). +pub fn encode_response(value: &T) -> Vec { + bincode::serialize(value).unwrap_or_default() +} + +/// Decode a response (no discriminant prefix). +pub fn decode_response(buf: &[u8]) -> Result { + bincode::deserialize(buf).map_err(DecodeError::Bincode) +} + +#[derive(Debug)] +pub enum DecodeError { + TooShort, + Bincode(bincode::Error), + Io(std::io::Error), +} + +impl fmt::Display for DecodeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecodeError::TooShort => write!(f, "IPC message too short (missing discriminant)"), + DecodeError::Bincode(e) => write!(f, "IPC bincode decode error: {e}"), + DecodeError::Io(e) => write!(f, "IPC I/O error: {e}"), + } + } +} + +impl std::error::Error for DecodeError {} diff --git a/datadog-ipc/src/example_interface.rs b/datadog-ipc/src/example_interface.rs index ff92b9a009..3b9aa85936 100644 --- a/datadog-ipc/src/example_interface.rs +++ b/datadog-ipc/src/example_interface.rs @@ -9,95 +9,73 @@ use std::{ time::{Duration, Instant}, }; -use futures::future::{pending, ready, Pending, Ready}; -use tarpc::context::Context; -use tarpc::server::Channel; - -use super::{ - platform::PlatformHandle, - transport::{blocking::BlockingTransport, Transport}, -}; +use super::platform::PlatformHandle; extern crate self as datadog_ipc; -#[datadog_ipc_macros::impl_transfer_handles] -#[tarpc::service] +#[datadog_ipc_macros::service] pub trait ExampleInterface { - async fn notify() -> (); - async fn ping() -> (); + async fn notify(); + #[blocking] + async fn ping(); async fn time_now() -> Duration; async fn req_cnt() -> u32; - async fn store_file(#[SerializedHandle] file: PlatformHandle) -> (); - #[SerializedHandle] - async fn retrieve_file() -> Option>; + async fn store_file(#[SerializedHandle] file: PlatformHandle); } -pub type ExampleTransport = BlockingTransport; - -#[derive(Default, Clone, Debug)] +#[derive(Default, Clone)] pub struct ExampleServer { req_cnt: Arc, stored_files: Arc>>>, } +#[cfg(unix)] impl ExampleServer { - pub async fn accept_connection(self, channel: crate::platform::Channel) { - #[allow(clippy::unwrap_used)] - let server = tarpc::server::BaseChannel::new( - tarpc::server::Config::default(), - Transport::try_from(channel).unwrap(), - ); - - server.execute(self.serve()).await + pub async fn accept_connection(self, conn: crate::SeqpacketConn) { + serve_example_interface_connection(conn, Arc::new(self)).await } } impl ExampleInterface for ExampleServer { - type PingFut = Ready<()>; - - fn ping(self, _: Context) -> Self::PingFut { + fn notify( + &self, + _peer: datadog_ipc::PeerCredentials, + ) -> impl std::future::Future + Send + '_ { self.req_cnt.fetch_add(1, Ordering::AcqRel); - ready(()) + std::future::ready(()) } - type NotifyFut = Pending<()>; - - fn notify(self, _: Context) -> Self::NotifyFut { + fn ping( + &self, + _peer: datadog_ipc::PeerCredentials, + ) -> impl std::future::Future + Send + '_ { self.req_cnt.fetch_add(1, Ordering::AcqRel); - pending() // returning pending future, ensures the RPC system will not try to return a - // response to the client + std::future::ready(()) } - type TimeNowFut = Ready; - - fn time_now(self, _: Context) -> Self::TimeNowFut { + fn time_now( + &self, + _peer: datadog_ipc::PeerCredentials, + ) -> impl std::future::Future + Send + '_ { self.req_cnt.fetch_add(1, Ordering::AcqRel); - ready(Instant::now().elapsed()) + std::future::ready(Instant::now().elapsed()) } - type ReqCntFut = Ready; - - fn req_cnt(self, _: Context) -> Self::ReqCntFut { - ready(self.req_cnt.fetch_add(1, Ordering::AcqRel)) + fn req_cnt( + &self, + _peer: datadog_ipc::PeerCredentials, + ) -> impl std::future::Future + Send + '_ { + std::future::ready(self.req_cnt.fetch_add(1, Ordering::AcqRel)) } - type StoreFileFut = Ready<()>; - - fn store_file(self, _: Context, file: PlatformHandle) -> Self::StoreFileFut { + fn store_file( + &self, + _peer: datadog_ipc::PeerCredentials, + file: PlatformHandle, + ) -> impl std::future::Future + Send + '_ { self.req_cnt.fetch_add(1, Ordering::AcqRel); - #[allow(clippy::unwrap_used)] self.stored_files.lock().unwrap().push(file); - - ready(()) - } - - type RetrieveFileFut = Ready>>; - - fn retrieve_file(self, _: Context) -> Self::RetrieveFileFut { - self.req_cnt.fetch_add(1, Ordering::AcqRel); - - #[allow(clippy::unwrap_used)] - ready(self.stored_files.lock().unwrap().pop()) + std::future::ready(()) } } diff --git a/datadog-ipc/src/handles.rs b/datadog-ipc/src/handles.rs index 369cffb560..0382dafcce 100644 --- a/datadog-ipc/src/handles.rs +++ b/datadog-ipc/src/handles.rs @@ -45,126 +45,141 @@ impl TransferHandles for &T { } } -mod transport_impls { - use super::{HandlesTransport, TransferHandles}; +/// Collects raw file descriptors to be sent via `SCM_RIGHTS` alongside a message. +#[cfg(unix)] +pub struct FdSink(Vec); + +#[cfg(unix)] +impl FdSink { + pub fn new() -> Self { + FdSink(Vec::new()) + } - impl TransferHandles for Result - where - T: TransferHandles, - { - fn copy_handles(&self, transport: Transport) -> Result<(), Transport::Error> - where - Transport: HandlesTransport, - { - match self { - Ok(i) => i.copy_handles(transport), - Err(_) => Ok(()), - } - } + pub fn fds(&self) -> &[std::os::unix::io::RawFd] { + &self.0 + } - fn receive_handles( - &mut self, - transport: Transport, - ) -> Result<(), Transport::Error> - where - Transport: HandlesTransport, - { - match self { - Ok(i) => i.receive_handles(transport), - Err(_) => Ok(()), - } - } + pub fn into_fds(self) -> Vec { + self.0 } +} - impl TransferHandles for Option - where - T: TransferHandles, - { - fn copy_handles( - &self, - transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - Some(s) => s.copy_handles(transport), - None => Ok(()), - } - } +#[cfg(unix)] +impl Default for FdSink { + fn default() -> Self { + Self::new() + } +} + +#[cfg(unix)] +impl HandlesTransport for &mut FdSink { + type Error = std::convert::Infallible; - fn receive_handles( - &mut self, - transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - Some(s) => s.receive_handles(transport), - #[allow(clippy::todo)] - None => todo!(), - } + fn copy_handle(self, handle: super::platform::PlatformHandle) -> Result<(), Self::Error> { + if let Some(owned) = &handle.inner { + use std::os::unix::io::AsRawFd; + self.0.push(owned.as_raw_fd()); } + Ok(()) + } + + fn provide_handle( + self, + _hint: &super::platform::PlatformHandle, + ) -> Result, Self::Error> { + unreachable!("FdSink::provide_handle should never be called") } +} - use tarpc::{ClientMessage, Request, Response}; +/// Distributes received `SCM_RIGHTS` file descriptors into `PlatformHandle` fields. +/// +/// Created fresh for each received message — no global fd queue, no fd stranding. +#[cfg(unix)] +pub struct FdSource(std::collections::VecDeque); - impl TransferHandles for Response { - fn copy_handles( - &self, - transport: Transport, - ) -> Result<(), Transport::Error> { - self.message.copy_handles(transport) - } +#[cfg(unix)] +impl FdSource { + pub fn new(fds: Vec) -> Self { + FdSource(fds.into_iter().collect()) + } +} - fn receive_handles( - &mut self, - transport: Transport, - ) -> Result<(), Transport::Error> { - self.message.receive_handles(transport) - } +#[cfg(unix)] +impl HandlesTransport for &mut FdSource { + type Error = std::io::Error; + + fn copy_handle( + self, + _handle: super::platform::PlatformHandle, + ) -> Result<(), Self::Error> { + Ok(()) } - impl TransferHandles for ClientMessage + fn provide_handle( + self, + _hint: &super::platform::PlatformHandle, + ) -> Result, Self::Error> { + use std::os::unix::io::{FromRawFd, IntoRawFd}; + let fd = self.0.pop_front().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "no more SCM_RIGHTS file descriptors available", + ) + })?; + Ok(unsafe { super::platform::PlatformHandle::from_raw_fd(fd.into_raw_fd()) }) + } +} + +impl TransferHandles for Result +where + T: TransferHandles, +{ + fn copy_handles(&self, transport: Transport) -> Result<(), Transport::Error> where - T: TransferHandles, + Transport: HandlesTransport, { - fn copy_handles(&self, mover: M) -> Result<(), M::Error> - where - M: HandlesTransport, - { - match self { - ClientMessage::Request(r) => r.copy_handles(mover), - ClientMessage::Cancel { - trace_context: _, - request_id: _, - } => Ok(()), - _ => Ok(()), - } + match self { + Ok(i) => i.copy_handles(transport), + Err(_) => Ok(()), } - fn receive_handles

(&mut self, provider: P) -> Result<(), P::Error> - where - P: HandlesTransport, - { - match self { - ClientMessage::Request(r) => r.receive_handles(provider), - ClientMessage::Cancel { - trace_context: _, - request_id: _, - } => Ok(()), - _ => Ok(()), - } + } + + fn receive_handles( + &mut self, + transport: Transport, + ) -> Result<(), Transport::Error> + where + Transport: HandlesTransport, + { + match self { + Ok(i) => i.receive_handles(transport), + Err(_) => Ok(()), } } +} - impl TransferHandles for Request { - fn receive_handles

(&mut self, provider: P) -> Result<(), P::Error> - where - P: HandlesTransport, - { - self.message.receive_handles(provider) +impl TransferHandles for Option +where + T: TransferHandles, +{ + fn copy_handles( + &self, + transport: Transport, + ) -> Result<(), Transport::Error> { + match self { + Some(s) => s.copy_handles(transport), + None => Ok(()), } + } - fn copy_handles(&self, mover: M) -> Result<(), M::Error> - where - M: HandlesTransport, - { - self.message.copy_handles(mover) + fn receive_handles( + &mut self, + transport: Transport, + ) -> Result<(), Transport::Error> { + match self { + Some(s) => s.receive_handles(transport), + #[allow(clippy::todo)] + None => todo!(), } } } diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index d9ba538911..e19925616e 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -9,10 +9,16 @@ pub mod example_interface; pub mod handles; -pub mod transport; pub mod platform; pub mod rate_limiter; -pub mod sequential; -pub use tarpc; +pub mod codec; +pub mod client; + +pub use platform::{ + PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE, +}; +#[cfg(unix)] +pub use platform::{recv_raw_async, send_raw_async}; +pub use client::IpcClientConn; diff --git a/datadog-ipc/src/platform/channel/metadata.rs b/datadog-ipc/src/platform/channel/metadata.rs deleted file mode 100644 index 164025da56..0000000000 --- a/datadog-ipc/src/platform/channel/metadata.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use crate::handles::HandlesTransport; -use crate::platform::metadata::ChannelMetadata; -use crate::platform::PlatformHandle; -use std::io; -#[cfg(unix)] -use std::os::unix::prelude::AsRawFd; -#[cfg(windows)] -use std::os::windows::io::AsRawHandle; - -impl HandlesTransport for &mut ChannelMetadata { - type Error = io::Error; - - fn copy_handle<'h, T>(self, handle: PlatformHandle) -> Result<(), Self::Error> { - self.enqueue_for_sending(handle); - - Ok(()) - } - - fn provide_handle(self, hint: &PlatformHandle) -> Result, Self::Error> { - self.find_handle(hint).ok_or_else(|| { - #[cfg(unix)] - let handle = hint.as_raw_fd(); - #[cfg(windows)] - let handle = hint.as_raw_handle(); - io::Error::other(format!( - "can't provide expected handle for hint: {handle:?}" - )) - }) - } -} diff --git a/datadog-ipc/src/platform/channel/mod.rs b/datadog-ipc/src/platform/channel/mod.rs deleted file mode 100644 index 33f26a0578..0000000000 --- a/datadog-ipc/src/platform/channel/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -mod metadata; diff --git a/datadog-ipc/src/platform/mod.rs b/datadog-ipc/src/platform/mod.rs index 3d57f73075..95498c594e 100644 --- a/datadog-ipc/src/platform/mod.rs +++ b/datadog-ipc/src/platform/mod.rs @@ -16,6 +16,4 @@ pub use unix::*; #[cfg(windows)] pub use windows::*; -mod channel; - mod message; diff --git a/datadog-ipc/src/platform/unix/channel.rs b/datadog-ipc/src/platform/unix/channel.rs deleted file mode 100644 index 9af122168c..0000000000 --- a/datadog-ipc/src/platform/unix/channel.rs +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use crate::handles::TransferHandles; -use crate::platform::{Message, PlatformHandle}; -use nix::poll::{poll, PollFd, PollFlags, PollTimeout}; -use std::{ - io::{self, ErrorKind, Read, Write}, - os::{ - fd::AsFd, - unix::{ - net::UnixStream, - prelude::{AsRawFd, RawFd}, - }, - }, - time::Duration, -}; - -pub mod async_channel; -pub use async_channel::*; -pub mod metadata; - -use sendfd::{RecvWithFd, SendWithFd}; - -use self::metadata::ChannelMetadata; - -use super::MAX_FDS; - -#[derive(Debug)] -pub struct Channel { - inner: PlatformHandle, - pub metadata: ChannelMetadata, -} - -impl Clone for Channel { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - metadata: Default::default(), - } - } -} - -impl Channel { - pub fn set_read_timeout(&mut self, timeout: Option) -> io::Result<()> { - let sock = self.inner.as_socketlike_view()?; - sock.set_read_timeout(timeout) - } - - pub fn set_write_timeout(&mut self, timeout: Option) -> io::Result<()> { - let sock = self.inner.as_socketlike_view()?; - sock.set_write_timeout(timeout) - } - - pub fn set_nonblocking(&mut self, nonblocking: bool) -> io::Result<()> { - let sock = self.inner.as_socketlike_view()?; - sock.set_nonblocking(nonblocking) - } - - pub fn probe_readable(&self) -> bool { - #[allow(clippy::unwrap_used)] - let raw_fd = self.inner.as_owned_fd().unwrap().as_fd(); - - let mut fds = [PollFd::new(raw_fd, PollFlags::POLLIN)]; - poll(&mut fds, PollTimeout::ZERO).is_err() - || fds[0] - .revents() - .unwrap_or(PollFlags::empty()) - .intersects(PollFlags::POLLIN | PollFlags::POLLHUP | PollFlags::POLLERR) - } - - pub fn create_message(&mut self, item: T) -> Result, io::Error> - where - T: TransferHandles, - { - self.metadata.create_message(item) - } -} - -impl Read for Channel { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let mut fds = [0; MAX_FDS]; - let socket = self.inner.as_socketlike_view()?; - let (n, fd_cnt) = socket.recv_with_fd(buf, &mut fds)?; - self.metadata.receive_fds(&fds[..fd_cnt]); - Ok(n) - } -} - -impl Write for Channel { - fn write_all(&mut self, mut buf: &[u8]) -> Result<(), io::Error> { - let mut socket = &*self.inner.as_socketlike_view()?; - - while !buf.is_empty() { - let handles = self.metadata.drain_to_send(); - if handles.is_empty() { - break; - } - - let fds: Vec = handles.iter().map(AsRawFd::as_raw_fd).collect(); - loop { - match socket.send_with_fd(buf, &fds) { - Ok(0) => { - self.metadata.reenqueue_for_sending(handles); - return Err(io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - )); - } - Ok(n) => { - buf = &buf[n..]; - break; - } - Err(ref e) if e.kind() == ErrorKind::Interrupted => { /* retry */ } - Err(e) => { - self.metadata.reenqueue_for_sending(handles); - - return Err(e); - } - } - } - } - - while !buf.is_empty() { - match socket.write(buf) { - Ok(0) => { - return Err(io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - )); - } - Ok(n) => buf = &buf[n..], - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - Err(e) => return Err(e), - } - } - Ok(()) - } - - fn write(&mut self, buf: &[u8]) -> io::Result { - //TODO implement partial writes - self.write_all(buf).map(|_| buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - let mut socket = &*self.inner.as_socketlike_view()?; - socket.flush() - } -} - -impl From for PlatformHandle { - fn from(c: Channel) -> Self { - c.inner - } -} - -impl From> for Channel { - fn from(h: PlatformHandle) -> Self { - Channel { - inner: h, - metadata: Default::default(), - } - } -} - -impl From for Channel { - fn from(stream: UnixStream) -> Self { - Channel { - inner: PlatformHandle::from(stream), - metadata: Default::default(), - } - } -} diff --git a/datadog-ipc/src/platform/unix/channel/async_channel.rs b/datadog-ipc/src/platform/unix/channel/async_channel.rs deleted file mode 100644 index 5f545a2c5a..0000000000 --- a/datadog-ipc/src/platform/unix/channel/async_channel.rs +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use pin_project::pin_project; -use sendfd::{RecvWithFd, SendWithFd}; -use std::os::unix::net::UnixStream as StdUnixStream; -use std::{ - io, - os::unix::prelude::{AsRawFd, RawFd}, - sync::{Arc, Mutex}, - task::Poll, -}; -use tokio::{ - io::{AsyncRead, AsyncWrite}, - net::UnixStream, -}; - -use super::{Channel, ChannelMetadata, MAX_FDS}; -use crate::platform::PlatformHandle; - -#[derive(Debug)] -#[pin_project] -pub struct AsyncChannel { - #[pin] - inner: UnixStream, - pub metadata: Arc>, -} - -impl From for AsyncChannel { - fn from(stream: UnixStream) -> Self { - AsyncChannel { - inner: stream, - metadata: Arc::new(Mutex::new(ChannelMetadata::default())), - } - } -} - -impl TryFrom for AsyncChannel { - type Error = io::Error; - - fn try_from(value: Channel) -> Result { - let fd = PlatformHandle::::from(value).into_instance()?; - - fd.set_nonblocking(true)?; - Ok(AsyncChannel { - inner: UnixStream::from_std(fd)?, - metadata: Arc::new(Mutex::new(ChannelMetadata::default())), - }) - } -} - -impl AsyncWrite for AsyncChannel { - fn poll_write( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> Poll> { - let project = self.project(); - #[allow(clippy::unwrap_used)] - let handles = project.metadata.lock().unwrap().drain_to_send(); - - if !handles.is_empty() { - let fds: Vec = handles.iter().map(AsRawFd::as_raw_fd).collect(); - match project.inner.send_with_fd(buf, &fds) { - Ok(sent) => Poll::Ready(Ok(sent)), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - #[allow(clippy::unwrap_used)] - project - .metadata - .lock() - .unwrap() - .reenqueue_for_sending(handles); - project.inner.poll_write_ready(cx).map_ok(|_| 0) - } - Err(err) => Poll::Ready(Err(err)), - } - } else { - project.inner.poll_write(cx, buf) - } - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_shutdown( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - self.project().inner.poll_shutdown(cx) - } -} - -impl AsyncRead for AsyncChannel { - fn poll_read( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - let project = self.project(); - let mut fds = [0; MAX_FDS]; - - // Safety: this implementation is based on Tokio async read implementation, - // it is performing an UB operation by using uninitiallized memory - although in practice - // its somewhat defined there are still some unknowns WRT to future behaviors - // TODO: make sure this optimization is really needed - once BenchPlatform is connected to - // libdatadog benchmark unfilled_mut vs initialize_unfilled - and if the difference - // is negligible - then lets switch to implementation that doesn't use UB. - unsafe { - let b = &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]); - loop { - break match project.inner.recv_with_fd(b, &mut fds) { - Ok((bytes_received, descriptors_received)) => { - #[allow(clippy::unwrap_used)] - project - .metadata - .lock() - .unwrap() - .receive_fds(&fds[..descriptors_received]); - - buf.assume_init(bytes_received); - buf.advance(bytes_received); - - Poll::Ready(Ok(())) - } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - match project.inner.poll_read_ready(cx) { - Poll::Ready(Ok(())) => continue, - poll => poll, - } - } - Err(err) => Poll::Ready(Err(err)), - }; - } - } - } -} - -impl AsyncChannel { - pub fn handle(&self) -> i32 { - self.inner.as_raw_fd() - } -} diff --git a/datadog-ipc/src/platform/unix/channel/metadata.rs b/datadog-ipc/src/platform/unix/channel/metadata.rs deleted file mode 100644 index ce736dda31..0000000000 --- a/datadog-ipc/src/platform/unix/channel/metadata.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use std::{ - collections::VecDeque, - io, - os::fd::IntoRawFd, - os::unix::prelude::{AsRawFd, FromRawFd, RawFd}, -}; - -use io_lifetimes::OwnedFd; - -use crate::{ - handles::TransferHandles, - platform::{Message, PlatformHandle, MAX_FDS}, -}; - -#[derive(Debug)] -pub struct ChannelMetadata { - fds_to_send: Vec>, - fds_received: VecDeque, // Store as OwnedFd to prevent leaking them - pid: libc::pid_t, // must always be set to current Process ID -} - -impl Default for ChannelMetadata { - fn default() -> Self { - Self { - fds_to_send: Default::default(), - fds_received: Default::default(), - pid: nix::unistd::getpid().as_raw(), - } - } -} - -impl ChannelMetadata { - pub fn unwrap_message(&mut self, message: Message) -> Result - where - T: TransferHandles, - { - let mut item = message.item; - - item.receive_handles(self)?; - Ok(item) - } - - pub fn create_message(&mut self, item: T) -> Result, io::Error> - where - T: TransferHandles, - { - item.copy_handles(&mut *self)?; - - let message = Message { - item, - pid: self.pid, - }; - - Ok(message) - } - - pub(crate) fn enqueue_for_sending(&mut self, handle: PlatformHandle) { - self.fds_to_send.push(handle.to_untyped()) - } - - pub(crate) fn reenqueue_for_sending(&mut self, mut handles: Vec>) { - handles.append(&mut self.fds_to_send); - self.fds_to_send = handles; - } - - pub(crate) fn drain_to_send(&mut self) -> Vec> { - let drain = self.fds_to_send.drain(..); - - let mut cnt: i32 = MAX_FDS.try_into().unwrap_or(i32::MAX); - - let (to_send, leftover) = drain.partition(|_| { - cnt -= 1; - cnt >= 0 - }); - self.reenqueue_for_sending(leftover); - - to_send - } - - pub(crate) fn receive_fds(&mut self, fds: &[RawFd]) { - self.fds_received.append( - &mut fds - .iter() - .map(|fd| unsafe { OwnedFd::from_raw_fd(*fd) }) - .collect::>() - .into(), - ); - } - - pub(crate) fn find_handle(&mut self, hint: &PlatformHandle) -> Option> { - if hint.as_raw_fd() < 0 { - return Some(hint.clone()); - } - - let fd = self.fds_received.pop_front(); - - fd.map(|fd| unsafe { PlatformHandle::from_raw_fd(fd.into_raw_fd()) }) - } -} diff --git a/datadog-ipc/src/platform/unix/mod.rs b/datadog-ipc/src/platform/unix/mod.rs index eef432d5cc..f94115d8e6 100644 --- a/datadog-ipc/src/platform/unix/mod.rs +++ b/datadog-ipc/src/platform/unix/mod.rs @@ -3,11 +3,9 @@ mod platform_handle; -mod channel; -pub use channel::*; - pub mod locks; pub mod sockets; +pub use sockets::*; mod message; pub use message::*; @@ -29,120 +27,3 @@ pub unsafe extern "C" fn memfd_create(name: libc::c_void, flags: libc::c_uint) - libc::syscall(libc::SYS_memfd_create, name, flags) as libc::c_int } -#[cfg(test)] -mod single_threaded_tests { - use io_lifetimes::OwnedFd; - use pretty_assertions::assert_eq; - use std::{ - collections::BTreeMap, - fs::File, - io::{self, Read, Seek, Write}, - os::unix::prelude::{AsRawFd, RawFd}, - }; - - use crate::platform::{metadata::ChannelMetadata, unix::message::MAX_FDS}; - - use super::super::PlatformHandle; - - fn assert_platform_handle_is_valid_file( - handle: PlatformHandle, - ) -> PlatformHandle { - let mut file: File = unsafe { handle.to_any_type().into_instance().unwrap() }; - - write!(file, "test_string").unwrap(); - file.rewind().unwrap(); - - let mut data = String::new(); - file.read_to_string(&mut data).unwrap(); - assert_eq!("test_string", data); - - file.rewind().unwrap(); - PlatformHandle::from(file).to_untyped() - } - #[cfg(not(target_os = "macos"))] - fn get_open_file_descriptors( - pid: Option, - ) -> Result, io::Error> { - let proc = pid.map(|p| format!("{p}")).unwrap_or_else(|| "self".into()); - - let fds_path = std::path::Path::new("/proc").join(proc).join("fd"); - let fds = std::fs::read_dir(fds_path)? - .filter_map(|r| r.ok()) - .filter_map(|r| { - let link = std::fs::read_link(r.path()).unwrap_or_default(); - let link = link.into_os_string().into_string().ok().unwrap_or_default(); - let fd = r.file_name().into_string().ok().unwrap_or_default(); - fd.parse().ok().map(|fd| (fd, link)) - }) - .collect(); - - Ok(fds) - } - - #[cfg(target_os = "macos")] - fn get_open_file_descriptors( - _: Option, - ) -> Result, io::Error> { - //TODO implement this check for macos - Ok(BTreeMap::default()) - } - - fn assert_file_descriptors_unchanged( - reference_meta: &BTreeMap, - pid: Option, - ) { - let current_meta = get_open_file_descriptors(pid).unwrap(); - - assert_eq!(reference_meta, ¤t_meta); - } - - // tests checks global FD state - so it needs to run single-threaded - #[test] - fn test_channel_metadata_only_provides_valid_owned() { - let reference = get_open_file_descriptors(None).unwrap(); - let mut meta = ChannelMetadata::default(); - - // create real handles - let files: Vec = (0..) - .map(|_| tempfile::tempfile().unwrap()) - .take(MAX_FDS * 2) - .collect(); - let reference_open_files = get_open_file_descriptors(None).unwrap(); - - // used for checking order of reenqueue behaviour - let file_fds: Vec = files.iter().map(AsRawFd::as_raw_fd).collect(); - - files - .into_iter() - .for_each(|f| meta.enqueue_for_sending(f.into())); - - let first_batch: Vec> = meta - .drain_to_send() - .into_iter() - .map(assert_platform_handle_is_valid_file) - .collect(); - - assert_eq!(MAX_FDS, first_batch.len()); - - meta.reenqueue_for_sending(first_batch); - - let mut handles = meta.drain_to_send(); - let second_batch = meta.drain_to_send(); - - handles.extend(second_batch); - assert_eq!(MAX_FDS * 2, handles.len()); - assert_eq!(0, meta.drain_to_send().len()); - - let final_ordered_fds_list: Vec = handles.iter().map(AsRawFd::as_raw_fd).collect(); - assert_eq!(file_fds, final_ordered_fds_list); - - assert_file_descriptors_unchanged(&reference_open_files, None); - - // test and dispose of all handles - for handle in handles { - assert_platform_handle_is_valid_file(handle); - } - - assert_file_descriptors_unchanged(&reference, None); - } -} diff --git a/datadog-ipc/src/platform/unix/sockets.rs b/datadog-ipc/src/platform/unix/sockets.rs deleted file mode 100644 index b43b7097af..0000000000 --- a/datadog-ipc/src/platform/unix/sockets.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use std::{io, os::unix::net::UnixStream, path::Path}; -pub fn is_listening>(path: P) -> io::Result { - if !path.as_ref().exists() { - return Ok(false); - } - Ok(UnixStream::connect(path).is_ok()) -} -#[cfg(target_os = "linux")] -mod linux { - use std::{ - io, - os::unix::{ - net::{UnixListener, UnixStream}, - prelude::{AsRawFd, OsStrExt}, - }, - path::Path, - }; - - use io_lifetimes::OwnedFd; - use nix::sys::socket::{ - bind, connect, listen, socket, AddressFamily, Backlog, SockFlag, SockType, UnixAddr, - }; - - fn socket_stream() -> nix::Result { - socket( - AddressFamily::Unix, - SockType::Stream, - SockFlag::SOCK_CLOEXEC, - None, - ) - } - - pub fn connect_abstract>(path: P) -> io::Result { - let sock = socket_stream()?; - let addr = UnixAddr::new_abstract(path.as_ref().as_os_str().as_bytes())?; - connect(sock.as_raw_fd(), &addr)?; - Ok(sock.into()) - } - - pub fn bind_abstract>(path: P) -> io::Result { - let sock = socket_stream()?; - let addr = UnixAddr::new_abstract(path.as_ref().as_os_str().as_bytes())?; - bind(sock.as_raw_fd(), &addr)?; - // This was previously 128, but due to this bug in 0.29.0 which has - // been fixed but not released, we're using 127: - // https://github.com/nix-rust/nix/pull/2500 - const SOMAXCONN: i32 = 127; - listen(&sock, Backlog::new(SOMAXCONN)?)?; - Ok(sock.into()) - } -} - -#[cfg(target_os = "linux")] -pub use linux::*; diff --git a/datadog-ipc/src/platform/unix/sockets/linux.rs b/datadog-ipc/src/platform/unix/sockets/linux.rs new file mode 100644 index 0000000000..e7bca05b1d --- /dev/null +++ b/datadog-ipc/src/platform/unix/sockets/linux.rs @@ -0,0 +1,145 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Linux-specific IPC socket implementation using `AF_UNIX SOCK_SEQPACKET`. + +use super::{ + create_unix_socket, set_nonblocking, SeqpacketConn, SeqpacketListener, PeerCredentials, +}; +use nix::sys::socket::{accept, bind, connect, listen, AddressFamily, Backlog, SockType, UnixAddr}; +use std::{ + io, + os::unix::{ + io::{AsRawFd, FromRawFd, OwnedFd}, + prelude::OsStrExt, + }, + path::Path, +}; +use std::os::fd::RawFd; + +fn create_seqpacket_socket() -> io::Result { + create_unix_socket(SockType::SeqPacket) +} + +impl SeqpacketListener { + /// Bind to a filesystem path and start listening (SEQPACKET, backlog 128). + /// + /// Removes any stale socket file before binding (standard Unix practice). + pub fn bind(path: impl AsRef) -> io::Result { + let _ = std::fs::remove_file(path.as_ref()); + let fd = create_seqpacket_socket()?; + let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; + bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; + listen(&fd, Backlog::new(128).map_err(io::Error::from)?).map_err(io::Error::from)?; + Ok(Self { inner: fd }) + } + + /// Bind to a Linux abstract socket name and start listening. + pub fn bind_abstract(name: &[u8]) -> io::Result { + let fd = create_seqpacket_socket()?; + let addr = UnixAddr::new_abstract(name).map_err(io::Error::from)?; + bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; + listen(&fd, Backlog::new(128).map_err(io::Error::from)?).map_err(io::Error::from)?; + Ok(Self { inner: fd }) + } + + /// Accept a new connection (non-blocking in non-blocking mode). + /// + /// Skips intermittent connections left by `is_listening` probes: after `accept()`, peek to check + /// if the peer has already closed the connection (EOF). If so, discard and loop. + pub fn try_accept(&self) -> io::Result { + loop { + let new_fd = accept(self.inner.as_raw_fd()).map_err(io::Error::from)?; + let owned = unsafe { OwnedFd::from_raw_fd(new_fd) }; + let conn = SeqpacketConn::from_owned(owned)?; + // Peek to detect EOF phantom connections left by is_listening probes. + let mut probe = [0u8; 1]; + let n = unsafe { + libc::recv( + conn.inner.as_raw_fd(), + probe.as_mut_ptr() as *mut libc::c_void, + 1, + libc::MSG_PEEK | libc::MSG_DONTWAIT, + ) + }; + if n == 0 { + // EOF: peer closed before sending anything; discard this phantom connection. + continue; + } + return Ok(conn); + } + } +} + +impl SeqpacketConn { + /// Create a connected pair (SEQPACKET, for testing / in-process use). + pub fn socketpair() -> io::Result<(Self, Self)> { + let mut fds = [0i32; 2]; + if unsafe { + libc::socketpair(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0, fds.as_mut_ptr()) + } == -1 + { + return Err(io::Error::last_os_error()); + } + let fd0 = unsafe { OwnedFd::from_raw_fd(fds[0]) }; + let fd1 = unsafe { OwnedFd::from_raw_fd(fds[1]) }; + Ok((Self::from_owned(fd0)?, Self::from_owned(fd1)?)) + } + + /// Connect to a filesystem socket path. + pub fn connect(path: impl AsRef) -> io::Result { + let fd = create_seqpacket_socket()?; + let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; + connect(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; + Self::from_owned(fd) + } + + /// Connect to a Linux abstract socket name. + pub fn connect_abstract(name: &[u8]) -> io::Result { + let fd = create_seqpacket_socket()?; + let addr = UnixAddr::new_abstract(name).map_err(io::Error::from)?; + connect(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; + Self::from_owned(fd) + } +} + +/// Returns `true` if a SEQPACKET server is listening on `path`. +/// +/// Attempts `connect()` — succeeds only if a server is actively `accept()`-ing. +pub fn is_listening>(path: P) -> io::Result { + if !path.as_ref().exists() { + return Ok(false); + } + Ok(SeqpacketConn::connect(path).is_ok()) +} + +/// Connect to a Linux abstract socket (path used as name bytes). +pub fn connect_abstract>(path: P) -> io::Result { + SeqpacketConn::connect_abstract(path.as_ref().as_os_str().as_bytes()) +} + +/// Bind an abstract socket (path used as name bytes). +pub fn bind_abstract>(path: P) -> io::Result { + SeqpacketListener::bind_abstract(path.as_ref().as_os_str().as_bytes()) +} + +pub fn get_peer_credentials(fd: RawFd) -> io::Result { + let mut cred: libc::ucred = unsafe { std::mem::zeroed() }; + let mut len = std::mem::size_of::() as libc::socklen_t; + if unsafe { + libc::getsockopt( + fd, + libc::SOL_SOCKET, + libc::SO_PEERCRED, + &mut cred as *mut _ as *mut libc::c_void, + &mut len, + ) + } < 0 + { + return Err(io::Error::last_os_error()); + } + Ok(PeerCredentials { + pid: cred.pid as u32, + uid: cred.uid as u32, + }) +} diff --git a/datadog-ipc/src/platform/unix/sockets/macos.rs b/datadog-ipc/src/platform/unix/sockets/macos.rs new file mode 100644 index 0000000000..ba5e793df2 --- /dev/null +++ b/datadog-ipc/src/platform/unix/sockets/macos.rs @@ -0,0 +1,241 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! macOS IPC socket implementation using `AF_UNIX SOCK_DGRAM` with an fd-passing handshake. +//! +//! ## Connection protocol +//! +//! macOS does not support `AF_UNIX SOCK_SEQPACKET`, so we emulate the same semantics: +//! +//! **Server side** (`SeqpacketListener`): +//! - Binds a `SOCK_DGRAM` socket to a filesystem path (the "rendezvous" socket). +//! - Calls `try_accept()` which does `recvmsg()` and extracts the client fd from SCM_RIGHTS. +//! Messages without SCM_RIGHTS (liveness probes) are silently discarded. +//! +//! **Client side** (`SeqpacketConn::connect`): +//! - Creates a `socketpair(AF_UNIX, SOCK_DGRAM)` with 4 MiB send/recv buffers. +//! - Sends one socketpair end to the server's rendezvous path via a **fresh, unconnected** +//! DGRAM socket (using `sendmsg` with `SCM_RIGHTS`). The client retains the other end. +//! +//! **Liveness probe** (`is_listening`): +//! - Sends a 1-byte datagram **without** SCM_RIGHTS to the rendezvous socket. +//! - Success → live server. `ECONNRESET` → stale socket file. + +use super::{ + create_unix_socket, sendmsg, set_nonblocking, ControlMessage, MsgFlags, SeqpacketConn, + SeqpacketListener, UnixAddr, MAX_MESSAGE_SIZE, +}; +use nix::sys::socket::{bind, AddressFamily, SockFlag, SockType}; +use std::{ + io, + os::unix::io::{AsRawFd, FromRawFd, OwnedFd}, + path::Path, +}; +use std::os::fd::RawFd; +use crate::PeerCredentials; + +fn create_dgram_socket() -> io::Result { + create_unix_socket(SockType::Datagram) +} + +fn set_dgram_buffers(fd: i32) -> io::Result<()> { + let size = MAX_MESSAGE_SIZE as libc::c_int; + let len = std::mem::size_of::() as libc::socklen_t; + for opt in [libc::SO_SNDBUF, libc::SO_RCVBUF] { + if unsafe { + libc::setsockopt( + fd, + libc::SOL_SOCKET, + opt, + &size as *const _ as *const libc::c_void, + len, + ) + } < 0 + { + return Err(io::Error::last_os_error()); + } + } + Ok(()) +} + +// ── SeqpacketListener ──────────────────────────────────────────────────────── + +impl SeqpacketListener { + /// Bind to a filesystem path (DGRAM rendezvous socket; no `listen()` needed). + /// + /// Removes any stale socket file before binding (standard Unix practice). + pub fn bind(path: impl AsRef) -> io::Result { + let _ = std::fs::remove_file(path.as_ref()); + let fd = create_dgram_socket()?; + let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; + bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; + Ok(Self { inner: fd }) + } + + /// Accept a connection by receiving a client fd via SCM_RIGHTS. + /// + /// Returns `Err(WouldBlock)` when no connection is pending. + /// Silently discards messages without SCM_RIGHTS (liveness probes from `is_listening`). + pub fn try_accept(&self) -> io::Result { + loop { + let mut buf = [0u8; 1]; + let (_, owned_fds) = + super::recvmsg_raw(self.inner.as_raw_fd(), &mut buf, MsgFlags::MSG_DONTWAIT)?; + if let Some(client_fd) = owned_fds.into_iter().next() { + return SeqpacketConn::from_owned(client_fd); + } + // No SCM_RIGHTS: liveness probe — discard and try the next message. + // If the socket is empty, the next recvmsg call returns WouldBlock. + } + } +} + +impl SeqpacketConn { + /// Create a connected pair (SOCK_DGRAM with 4 MiB buffers, for testing / in-process use). + pub fn socketpair() -> io::Result<(Self, Self)> { + let mut fds = [0i32; 2]; + if unsafe { + libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) + } == -1 + { + return Err(io::Error::last_os_error()); + } + let fd0 = unsafe { OwnedFd::from_raw_fd(fds[0]) }; + let fd1 = unsafe { OwnedFd::from_raw_fd(fds[1]) }; + set_dgram_buffers(fd0.as_raw_fd())?; + set_dgram_buffers(fd1.as_raw_fd())?; + Ok((Self::from_owned(fd0)?, Self::from_owned(fd1)?)) + } + + /// Connect to a server at the given filesystem path using the fd-passing handshake. + /// + /// Creates a `SOCK_DGRAM` socketpair with 4 MiB buffers, then sends the server end + /// to the rendezvous socket via SCM_RIGHTS using a fresh unconnected DGRAM socket. + /// Returns the client end of the socketpair. + pub fn connect(path: impl AsRef) -> io::Result { + let mut fds = [0i32; 2]; + if unsafe { + libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) + } == -1 + { + return Err(io::Error::last_os_error()); + } + let fd_client = unsafe { OwnedFd::from_raw_fd(fds[0]) }; + let fd_server = unsafe { OwnedFd::from_raw_fd(fds[1]) }; + set_dgram_buffers(fd_client.as_raw_fd())?; + set_dgram_buffers(fd_server.as_raw_fd())?; + + // A fresh unconnected DGRAM socket is required for the handshake sendmsg. + // fd_client is already "connected" to fd_server and cannot reach the rendezvous path. + let handshake_fd = create_dgram_socket()?; + let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; + let server_raw = fd_server.as_raw_fd(); + let iov = [std::io::IoSlice::new(&[0u8])]; + sendmsg::( + handshake_fd.as_raw_fd(), + &iov, + &[ControlMessage::ScmRights(&[server_raw])], + MsgFlags::empty(), + Some(&addr), + ) + .map_err(io::Error::from)?; + // Do NOT drop fd_server here. On macOS, closing any local fd that references + // the peer end of a SOCK_DGRAM socketpair immediately disconnects this end + // (fd_client), even if fd_server is still alive in another process via + // SCM_RIGHTS. Keep fd_server alive in `_peer` for the lifetime of this + // SeqpacketConn so that sendmsg on fd_client continues to work. + Self::from_owned_pair(fd_client, fd_server) + } +} + +// ── Free functions ─────────────────────────────────────────────────────────── + +/// Returns `true` if a live server is listening at the given socket path. +/// +/// Sends a 1-byte probe datagram (no SCM_RIGHTS) to the path. +/// - Success → live server (the server's `try_accept` silently discards the probe). +/// - `ECONNRESET` → stale socket file (no live receiver). +pub fn is_listening>(path: P) -> io::Result { + if !path.as_ref().exists() { + return Ok(false); + } + let probe = nix::sys::socket::socket( + AddressFamily::Unix, + SockType::Datagram, + SockFlag::empty(), + None, + ) + .map_err(io::Error::from)?; + let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; + let iov = [std::io::IoSlice::new(&[0u8])]; + Ok( + sendmsg::(probe.as_raw_fd(), &iov, &[], MsgFlags::empty(), Some(&addr)) + .is_ok(), + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Verify that connect/accept round-trip works for both directions. + #[test] + fn test_connect_accept_send_recv() { + let tmpdir = tempfile::tempdir().expect("tempdir"); + let path = tmpdir.path().join("test.sock"); + let listener = SeqpacketListener::bind(&path).expect("bind"); + let client = SeqpacketConn::connect(&path).expect("connect"); + let server = listener.try_accept().expect("try_accept"); + + // Client → server + client.try_send_raw(&mut vec![1u8; 10], &[]).expect("client send"); + let mut buf = vec![0u8; 64]; + let (n, _) = server.try_recv_raw(&mut buf).expect("server recv"); + assert_eq!(&buf[..n], &[1u8; 10]); + + // Server → client (use a large enough buffer for 220 bytes) + let mut buf220 = vec![0u8; 256]; + server.try_send_raw(&mut vec![2u8; 220], &[]).expect("server send 220B"); + let (n, _) = client.try_recv_raw(&mut buf220).expect("client recv"); + assert_eq!(n, 220); + } + + /// Confirm macOS-specific SOCK_DGRAM socketpair behaviour: closing one end of a + /// socketpair in the same process disconnects the other end. This documents why + /// `SeqpacketConn::connect` keeps `fd_server` alive in `_peer`. + #[test] + fn test_socketpair_peer_drop_disconnects() { + let (conn0, conn1) = SeqpacketConn::socketpair().expect("socketpair"); + + // Both ends alive: send must succeed. + conn0.try_send_raw(&mut vec![42u8; 10], &[]).expect("send with peer alive"); + + // Drop the peer: on macOS this disconnects conn0. + drop(conn1); + assert!( + conn0.try_send_raw(&mut vec![42u8; 10], &[]).is_err(), + "expected send error after dropping peer on macOS" + ); + } +} + +pub fn get_peer_credentials(fd: RawFd) -> io::Result { + let mut pid: libc::pid_t = 0; + let mut len = std::mem::size_of::() as libc::socklen_t; + if unsafe { + libc::getsockopt( + fd, + libc::SOL_LOCAL, + libc::LOCAL_PEERPID, + &mut pid as *mut _ as *mut libc::c_void, + &mut len, + ) + } < 0 + { + return Err(io::Error::last_os_error()); + } + Ok(PeerCredentials { + pid: pid as u32, + uid: 0, + }) +} diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs new file mode 100644 index 0000000000..5c39632fbb --- /dev/null +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -0,0 +1,325 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Message-boundary-preserving IPC sockets for Unix. +//! +//! - Linux: `AF_UNIX SOCK_SEQPACKET` with `SCM_RIGHTS` for fd transfer. +//! - macOS: `AF_UNIX SOCK_DGRAM` with an fd-passing connection handshake. This emulates the +//! semantics which SOCK_SEQPACKET provides us on Linux. + +pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAddr}; +use nix::sys::socket::{recvmsg, sendmsg, AddressFamily, SockFlag, SockType}; +use std::{ + io, + os::unix::io::{AsRawFd, FromRawFd, OwnedFd, RawFd}, + time::Duration, +}; + +#[cfg(target_os = "linux")] +mod linux; +#[cfg(not(target_os = "linux"))] +mod macos; + +#[cfg(target_os = "linux")] +pub use linux::{bind_abstract, connect_abstract, is_listening}; +#[cfg(not(target_os = "linux"))] +pub use macos::is_listening; + +#[cfg(not(target_os = "macos"))] +use linux::get_peer_credentials; +#[cfg(target_os = "macos")] +use macos::get_peer_credentials; + +/// Maximum file descriptors transferable in a single message. +pub const MAX_FDS: usize = 20; + +/// Maximum IPC message payload size (4 MiB). +pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; + +/// Extra receive-buffer overhead for the wire format. Zero on Unix because fds are +/// transferred out-of-band via `SCM_RIGHTS`; non-zero on Windows (see `sockets.rs`). +pub const HANDLE_SUFFIX_SIZE: usize = 0; + +/// Credentials of the connected peer, obtained once at connection time. +#[derive(Debug, Clone, Copy, Default)] +pub struct PeerCredentials { + pub pid: u32, + pub uid: u32, +} + +// ── Shared socket helpers ──────────────────────────────────────────────────── + +pub(super) fn create_unix_socket(sock_type: SockType) -> io::Result { + let fd = nix::sys::socket::socket(AddressFamily::Unix, sock_type, SockFlag::empty(), None) + .map_err(io::Error::from)?; + // Set close-on-exec (portable across Linux and macOS). + let flags = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETFD) }; + if flags >= 0 { + unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_SETFD, flags | libc::FD_CLOEXEC) }; + } + Ok(fd) +} + +pub(super) fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { + let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if flags < 0 { + return Err(io::Error::last_os_error()); + } + let new_flags = if nonblocking { + flags | libc::O_NONBLOCK + } else { + flags & !libc::O_NONBLOCK + }; + if unsafe { libc::fcntl(fd, libc::F_SETFL, new_flags) } < 0 { + return Err(io::Error::last_os_error()); + } + Ok(()) +} + +pub(super) fn sendmsg_raw(fd: RawFd, data: &[u8], fds: &[RawFd], flags: MsgFlags) -> io::Result<()> { + let iov = [std::io::IoSlice::new(data)]; + if fds.is_empty() { + sendmsg::(fd, &iov, &[], flags, None) + } else { + sendmsg::(fd, &iov, &[ControlMessage::ScmRights(fds)], flags, None) + } + .map(|_| ()) + .map_err(io::Error::from) +} + +pub(super) fn recvmsg_raw( + fd: RawFd, + buf: &mut [u8], + flags: MsgFlags, +) -> io::Result<(usize, Vec)> { + let cmsg_space = unsafe { + libc::CMSG_SPACE((std::mem::size_of::() * MAX_FDS) as libc::c_uint) + } as usize; + let mut cmsg_buf = vec![0u8; cmsg_space]; + let mut iov = [std::io::IoSliceMut::new(buf)]; + + let msg = recvmsg::(fd, &mut iov, Some(&mut cmsg_buf), flags) + .map_err(io::Error::from)?; + + let bytes = msg.bytes; + if bytes == 0 { + // 0 bytes means EOF (peer closed connection), not a valid datagram. + // Legitimate acks are always at least 1 byte. + return Err(io::Error::from(io::ErrorKind::BrokenPipe)); + } + let mut owned_fds = Vec::new(); + for cmsg in msg.cmsgs().map_err(io::Error::from)? { + if let ControlMessageOwned::ScmRights(raw_fds) = cmsg { + for raw_fd in raw_fds { + owned_fds.push(unsafe { OwnedFd::from_raw_fd(raw_fd) }); + } + } + } + Ok((bytes, owned_fds)) +} + +pub(super) fn poll_with_timeout(fd: RawFd, event: libc::c_short, timeout: Option) -> io::Result<()> { + let timeout_ms: i32 = match timeout { + None => -1, + Some(d) => d.as_millis().min(i32::MAX as u128) as i32, + }; + let mut pfd = libc::pollfd { fd, events: event, revents: 0 }; + loop { + let ret = unsafe { libc::poll(&mut pfd, 1, timeout_ms) }; + if ret > 0 { + return Ok(()); + } + if ret == 0 { + return Err(io::Error::from(io::ErrorKind::TimedOut)); + } + let e = io::Error::last_os_error(); + if e.kind() != io::ErrorKind::Interrupted { + return Err(e); + } + } +} + +pub(super) fn poll_until_ready(fd: RawFd, event: libc::c_short) -> io::Result<()> { + poll_with_timeout(fd, event, None) +} + +// ── Types ──────────────────────────────────────────────────────────────────── + +/// A listening socket for accepting IPC connections. +/// +/// - Linux: `AF_UNIX SOCK_SEQPACKET` with `listen`/`accept`. +/// - macOS: `AF_UNIX SOCK_DGRAM` rendezvous socket; clients connect via fd-passing handshake. +/// +/// Also constructable from a pre-bound fd (e.g. received from a parent process). +/// Implements `IntoRawFd` so the fd can be transferred to a child process via `spawn_worker`. +pub struct SeqpacketListener { + pub inner: OwnedFd, +} + +impl SeqpacketListener { + /// Construct from a pre-bound fd (e.g. received from a parent process via `spawn_worker`). + pub fn from_owned_fd(fd: OwnedFd) -> Self { + Self { inner: fd } + } + + /// Wrap in a Tokio `AsyncFd` for use in async server accept loops. + /// + /// Sets the socket to non-blocking mode, then wraps in `AsyncFd`. + /// Requires a running Tokio runtime. + pub fn into_async_listener(self) -> io::Result> { + set_nonblocking(self.inner.as_raw_fd(), true)?; + tokio::io::unix::AsyncFd::new(self) + } + + pub fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl std::os::unix::io::AsRawFd for SeqpacketListener { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl std::os::unix::io::IntoRawFd for SeqpacketListener { + fn into_raw_fd(self) -> RawFd { + self.inner.into_raw_fd() + } +} + +/// A connected socket providing message-boundary-preserving IPC. +/// +/// - Linux: `AF_UNIX SOCK_SEQPACKET`. +/// - macOS: `AF_UNIX SOCK_DGRAM` socketpair endpoint (4 MiB buffers). +pub struct SeqpacketConn { + pub(super) inner: OwnedFd, + /// On macOS, closing any local fd for the peer end of a SOCK_DGRAM socketpair + /// immediately disconnects this socket, even if the peer is still alive in another + /// process. Keep `_peer` alive here so the connection remains valid until this + /// `SeqpacketConn` is dropped. + _peer: Option, + read_timeout: Option, + write_timeout: Option, +} + +impl SeqpacketConn { + pub(super) fn from_owned(fd: OwnedFd) -> io::Result { + set_nonblocking(fd.as_raw_fd(), true)?; + Ok(Self { inner: fd, _peer: None, read_timeout: None, write_timeout: None }) + } + + /// Create from a connected fd plus a peer fd that must be kept alive. + /// + /// On macOS, the peer fd must be kept open locally to maintain the SOCK_DGRAM + /// socketpair connection on this end. It is stored in `_peer` and closed when + /// this `SeqpacketConn` is dropped. + pub(super) fn from_owned_pair(client: OwnedFd, peer: OwnedFd) -> io::Result { + set_nonblocking(client.as_raw_fd(), true)?; + Ok(Self { inner: client, _peer: Some(peer), read_timeout: None, write_timeout: None }) + } + + /// Retrieve the peer process's credentials (pid, uid). + pub fn peer_credentials(&self) -> io::Result { + get_peer_credentials(self.inner.as_raw_fd()) + } + + /// Non-blocking send. Returns `Err(WouldBlock)` if the socket buffer is full. + /// + /// `data` is passed as `&mut Vec` for API symmetry with the Windows implementation + /// (which appends handle bytes in-place and truncates back after the write). On Unix the + /// Vec is never modified. + /// + /// Note: `O_NONBLOCK` is always set on `SeqpacketConn` sockets (via `from_owned`), so + /// `MSG_DONTWAIT` is not needed and is intentionally omitted — on macOS `AF_UNIX SOCK_DGRAM` + /// socketpairs, `MSG_DONTWAIT` can return EINVAL instead of EAGAIN. + pub fn try_send_raw(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { + sendmsg_raw(self.inner.as_raw_fd(), data, fds, MsgFlags::empty()) + } + + /// Blocking send. Polls for writability (respecting write_timeout), then sends. + pub fn send_raw_blocking(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { + let fd = self.inner.as_raw_fd(); + loop { + match sendmsg_raw(fd, data, fds, MsgFlags::empty()) { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + poll_with_timeout(fd, libc::POLLOUT, self.write_timeout)?; + } + Err(e) => return Err(e), + } + } + } + + /// Non-blocking receive. Returns `Err(WouldBlock)` if no message available. + pub fn try_recv_raw(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { + recvmsg_raw(self.inner.as_raw_fd(), buf, MsgFlags::empty()) + } + + /// Blocking receive. Polls for readability (respecting read_timeout), then receives. + pub fn recv_raw_blocking(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { + let fd = self.inner.as_raw_fd(); + loop { + match recvmsg_raw(fd, buf, MsgFlags::empty()) { + Ok(r) => return Ok(r), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + poll_with_timeout(fd, libc::POLLIN, self.read_timeout)?; + } + Err(e) => return Err(e), + } + } + } + + pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { + self.read_timeout = d; + Ok(()) + } + + pub fn set_write_timeout(&mut self, d: Option) -> io::Result<()> { + self.write_timeout = d; + Ok(()) + } + + /// Wrap in a Tokio `AsyncFd` for async server dispatch loops. + pub fn into_async_fd(self) -> io::Result> { + tokio::io::unix::AsyncFd::new(self.inner) + } + + pub fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +/// Async receive on a Tokio `AsyncFd`-wrapped IPC connection. +/// +/// Used by the server dispatch loop (generated by `#[service]` macro). +pub async fn recv_raw_async( + fd: &tokio::io::unix::AsyncFd, + buf: &mut [u8], +) -> io::Result<(usize, Vec)> { + loop { + let mut guard = fd.readable().await?; + match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), buf, MsgFlags::empty())) { + Ok(result) => return result, + Err(_would_block) => continue, + } + } +} + +/// Async send on a Tokio `AsyncFd`-wrapped IPC connection. +/// +/// Used by the server dispatch loop (generated by `#[service]` macro) to send acks and +/// responses without blocking the async thread. +pub async fn send_raw_async( + fd: &tokio::io::unix::AsyncFd, + data: &[u8], + fds: &[RawFd], +) -> io::Result<()> { + loop { + let mut guard = fd.writable().await?; + match guard.try_io(|inner| sendmsg_raw(inner.as_raw_fd(), data, fds, MsgFlags::empty())) { + Ok(result) => return result, + Err(_would_block) => continue, + } + } +} diff --git a/datadog-ipc/src/platform/windows/channel/metadata.rs b/datadog-ipc/src/platform/windows/channel/metadata.rs index dca43161f3..65b0ef6026 100644 --- a/datadog-ipc/src/platform/windows/channel/metadata.rs +++ b/datadog-ipc/src/platform/windows/channel/metadata.rs @@ -13,7 +13,7 @@ use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcess}; use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; use crate::{ - handles::TransferHandles, + handles::{HandlesTransport, TransferHandles}, platform::{Message, PlatformHandle}, }; @@ -76,6 +76,24 @@ impl ProcessHandle { } } +impl HandlesTransport for &mut ChannelMetadata { + type Error = io::Error; + + fn copy_handle(self, handle: PlatformHandle) -> Result<(), Self::Error> { + self.enqueue_for_sending(handle); + Ok(()) + } + + fn provide_handle(self, hint: &PlatformHandle) -> Result, Self::Error> { + self.find_handle(hint).ok_or_else(|| { + io::Error::new( + io::ErrorKind::NotFound, + "handle not found in received handles map", + ) + }) + } +} + impl Debug for ProcessHandle { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { diff --git a/datadog-ipc/src/platform/windows/mod.rs b/datadog-ipc/src/platform/windows/mod.rs index 919e0354c4..3500bad286 100644 --- a/datadog-ipc/src/platform/windows/mod.rs +++ b/datadog-ipc/src/platform/windows/mod.rs @@ -15,3 +15,6 @@ pub(crate) use mem_handle::*; mod named_pipe; pub use named_pipe::*; + +pub mod sockets; +pub use sockets::*; diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs new file mode 100644 index 0000000000..14b95de100 --- /dev/null +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -0,0 +1,561 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Windows IPC implementation using named pipes in message mode. +//! +//! ## Connection protocol +//! +//! Named pipes with `PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE` preserve message boundaries, +//! giving semantics equivalent to `AF_UNIX SOCK_SEQPACKET` on Linux. +//! +//! ## Handle transfer +//! +//! Windows has no `SCM_RIGHTS`. Handles are duplicated into the peer process before sending, +//! and the duplicated values are embedded as a wire-format suffix after the payload: +//! +//! ```text +//! [payload bytes] [u64 LE × handle_count: handle values in receiver] [u32 LE: handle_count] +//! ``` +//! +//! Because `PIPE_READMODE_MESSAGE` delivers the entire message in one `ReadFile` call, the +//! receiver can read directly into the caller-provided buffer, then strip the suffix in-place — +//! no intermediate copy needed. The caller's buffer must have at least `HANDLE_SUFFIX_SIZE` +//! bytes beyond the maximum expected payload size. + +use std::io; +use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; +use std::path::Path; +use std::ptr::{null, null_mut}; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Mutex, +}; + +// winapi – only used for things not cleanly available in windows-sys +use winapi::shared::minwindef::ULONG; +use winapi::shared::winerror::ERROR_PIPE_CONNECTED; +use winapi::um::handleapi::{CloseHandle, DuplicateHandle, INVALID_HANDLE_VALUE}; +use winapi::um::processthreadsapi::{GetCurrentProcess, GetCurrentProcessId, OpenProcess}; +use winapi::um::winbase::{GetNamedPipeClientProcessId, GetNamedPipeServerProcessId}; +use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; + +// windows-sys – used for all pipe/IO/threading syscalls +use windows_sys::Win32::Foundation::{HANDLE as SysHANDLE, WAIT_OBJECT_0, WAIT_TIMEOUT}; +use windows_sys::Win32::Storage::FileSystem::{ReadFile, WriteFile}; +use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; +use windows_sys::Win32::System::Pipes::{ + ConnectNamedPipe, CreateNamedPipeA, PeekNamedPipe, SetNamedPipeHandleState, + PIPE_NOWAIT, PIPE_READMODE_MESSAGE, PIPE_TYPE_MESSAGE, PIPE_UNLIMITED_INSTANCES, PIPE_WAIT, +}; +use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject, INFINITE}; + +// Named-pipe open-mode bits not available in windows-sys 0.48 +const PIPE_ACCESS_DUPLEX: u32 = 0x0000_0003; // PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND +const FILE_FLAG_OVERLAPPED_: u32 = 0x4000_0000; +const FILE_FLAG_FIRST_PIPE_INSTANCE_: u32 = 0x0008_0000; + +/// Maximum file descriptors (handles) transferable in a single message. +pub const MAX_FDS: usize = 20; + +/// Maximum IPC message payload size (4 MiB). +pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; + +/// Wire-format suffix overhead: 4-byte count + 8 bytes per handle slot. +/// +/// Receive buffers must be at least `expected_payload_max + HANDLE_SUFFIX_SIZE` bytes. +pub const HANDLE_SUFFIX_SIZE: usize = 4 + 8 * MAX_FDS; + +/// Credentials of the connected peer. +#[derive(Debug, Clone, Copy, Default)] +pub struct PeerCredentials { + pub pid: u32, + pub uid: u32, +} + +/// Append `handles` (duplicated into `peer_pid`) followed by the 4-byte count to `data`. +/// +/// On error the function returns without having fully appended. The caller is responsible +/// for truncating `data` back to the pre-call length if it wishes to restore the original. +fn append_handle_suffix( + data: &mut Vec, + handles: &[RawHandle], + peer_pid: u32, +) -> io::Result<()> { + let count = handles.len(); + + if count > 0 { + let peer_proc = unsafe { OpenProcess(PROCESS_DUP_HANDLE, 0, peer_pid) }; + if peer_proc.is_null() { + return Err(io::Error::last_os_error()); + } + for &h in handles { + let mut dup: HANDLE = null_mut(); + let ok = unsafe { + DuplicateHandle( + GetCurrentProcess(), + h as HANDLE, + peer_proc, + &mut dup, + 0, + 0, + DUPLICATE_SAME_ACCESS, + ) + }; + if ok == 0 { + let err = io::Error::last_os_error(); + unsafe { CloseHandle(peer_proc) }; + return Err(err); + } + data.extend_from_slice(&(dup as u64).to_le_bytes()); + } + unsafe { CloseHandle(peer_proc) }; + } + + data.extend_from_slice(&(count as u32).to_le_bytes()); + Ok(()) +} + +/// Read one message from `h` directly into `buf`. +/// +/// `buf` must be large enough to hold the entire wire message +/// (payload + `HANDLE_SUFFIX_SIZE`). If the message is larger than `buf`, `ReadFile` +/// returns `ERROR_MORE_DATA` and this function propagates the error. +/// +/// Returns `(payload_len, owned_handles)`. +fn pipe_read( + h: SysHANDLE, + buf: &mut [u8], + blocking: bool, +) -> io::Result<(usize, Vec)> { + if !blocking { + let mut avail: u32 = 0; + if unsafe { PeekNamedPipe(h, null_mut(), 0, null_mut(), &mut avail, null_mut()) } == 0 { + return Err(io::Error::last_os_error()); + } + if avail == 0 { + return Err(io::ErrorKind::WouldBlock.into()); + } + } + + let mut read: u32 = 0; + if unsafe { ReadFile(h, buf.as_mut_ptr() as _, buf.len() as u32, &mut read, null_mut()) } == 0 + { + return Err(io::Error::last_os_error()); + } + let n = read as usize; + + // Parse the suffix: last 4 bytes are handle_count (LE u32). + if n < 4 { + return Err(io::Error::from(io::ErrorKind::UnexpectedEof)); + } + let count_bytes: [u8; 4] = buf[n - 4..n] + .try_into() + .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; + let count = u32::from_le_bytes(count_bytes) as usize; + + // Before the count are 8 bytes × count handle values. + let handles_start = n + .checked_sub(4 + 8 * count) + .ok_or_else(|| io::Error::from(io::ErrorKind::InvalidData))?; + + let mut handles = Vec::with_capacity(count); + for i in 0..count { + let off = handles_start + 8 * i; + let val_bytes: [u8; 8] = buf[off..off + 8] + .try_into() + .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; + let val = u64::from_le_bytes(val_bytes); + handles.push(unsafe { OwnedHandle::from_raw_handle(val as RawHandle) }); + } + + // payload occupies buf[0..handles_start] + Ok((handles_start, handles)) +} + +fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { + if !blocking { + let mode = PIPE_NOWAIT; + unsafe { SetNamedPipeHandleState(h, &mode, null(), null()) }; + } + + let mut written: u32 = 0; + let ok = + unsafe { WriteFile(h, data.as_ptr() as _, data.len() as u32, &mut written, null_mut()) }; + + if !blocking { + let mode = PIPE_WAIT; + unsafe { SetNamedPipeHandleState(h, &mode, null(), null()) }; + } + + if ok == 0 { + let err = io::Error::last_os_error(); + if !blocking + && err.raw_os_error() + == Some(windows_sys::Win32::Foundation::ERROR_NO_DATA as i32) + { + return Err(io::ErrorKind::WouldBlock.into()); + } + return Err(err); + } + Ok(()) +} + +fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result { + let open_mode = PIPE_ACCESS_DUPLEX + | FILE_FLAG_OVERLAPPED_ + | if first_instance { + FILE_FLAG_FIRST_PIPE_INSTANCE_ + } else { + 0 + }; + + let h = unsafe { + CreateNamedPipeA( + name.as_ptr(), + open_mode, + PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, + (MAX_MESSAGE_SIZE + HANDLE_SUFFIX_SIZE) as u32, + (MAX_MESSAGE_SIZE + HANDLE_SUFFIX_SIZE) as u32, + 0, + null_mut(), + ) + }; + + if h == INVALID_HANDLE_VALUE as SysHANDLE { + return Err(io::Error::last_os_error()); + } + Ok(unsafe { OwnedHandle::from_raw_handle(h as RawHandle) }) +} + +fn path_to_null_terminated(path: &Path) -> Vec { + let s = path.to_string_lossy(); + let mut bytes = s.as_bytes().to_vec(); + bytes.push(0); + bytes +} + +fn make_overlapped(event: SysHANDLE) -> OVERLAPPED { + OVERLAPPED { + Internal: 0, + InternalHigh: 0, + Anonymous: OVERLAPPED_0 { Pointer: null_mut() }, + hEvent: event, + } +} + +// ── SeqpacketListener ───────────────────────────────────────────────────────── + +/// A named-pipe server that accepts message-mode IPC connections. +/// +/// `try_accept` swaps the connected pipe instance for a fresh server instance so the listener +/// remains ready for the next client. Interior mutability (`Mutex`) allows `&self` in +/// `try_accept`. +pub struct SeqpacketListener { + inner: Mutex, + name: Vec, // NUL-terminated ANSI pipe name, e.g. `\\.\\pipe\\…` +} + +unsafe impl Send for SeqpacketListener {} +unsafe impl Sync for SeqpacketListener {} + +impl SeqpacketListener { + /// Bind to a named pipe derived from `path` and prepare to accept connections. + /// + /// Uses `FILE_FLAG_FIRST_PIPE_INSTANCE` so that a second concurrent `bind` to the same path + /// fails with `ERROR_ACCESS_DENIED` — the signal used by `attempt_listen` to detect that + /// another process is already serving. + pub fn bind(path: impl AsRef) -> io::Result { + let name = path_to_null_terminated(path.as_ref()); + let handle = create_pipe_server(&name, true)?; + Ok(Self { + inner: Mutex::new(handle), + name, + }) + } + + /// Construct from a pre-bound handle received from a parent process. + /// + /// Reconstructs the pipe name via `NtQueryObject`. + pub fn from_owned_fd(fd: OwnedHandle) -> Self { + use crate::platform::named_pipe_name_from_raw_handle; + let name = named_pipe_name_from_raw_handle(fd.as_raw_handle()) + .map(|s| { let mut b = s.into_bytes(); b.push(0); b }) + .unwrap_or_default(); + Self { + inner: Mutex::new(fd), + name, + } + } + + /// Try to accept a pending connection (non-blocking). + /// + /// Returns `Err(WouldBlock)` when no client is waiting. + /// On success, the current pipe instance is handed to the `SeqpacketConn` and a fresh + /// server instance replaces it in the listener. + pub fn try_accept(&self) -> io::Result { + // Create the replacement server handle *before* taking the lock so that on failure + // we haven't mutated anything. + let new_server = create_pipe_server(&self.name, false)?; + + let mut guard = self + .inner + .lock() + .map_err(|_| io::Error::from(io::ErrorKind::Other))?; + let raw: SysHANDLE = guard.as_raw_handle() as SysHANDLE; + + // Use overlapped ConnectNamedPipe with a 0-ms wait for non-blocking behaviour. + let event = unsafe { CreateEventA(null_mut(), 1, 0, null_mut()) }; + if event == 0 { + return Err(io::Error::last_os_error()); + } + let mut ov = make_overlapped(event); + + let result = unsafe { ConnectNamedPipe(raw, &mut ov) }; + let connect_err = io::Error::last_os_error(); + + let connected = if result != 0 { + true + } else { + match connect_err.raw_os_error().map(|e| e as u32) { + Some(e) if e == ERROR_PIPE_CONNECTED => true, + Some(e) if e == windows_sys::Win32::Foundation::ERROR_IO_PENDING => { + match unsafe { WaitForSingleObject(event, 0) } { + WAIT_OBJECT_0 => { + let mut transferred: u32 = 0; + unsafe { GetOverlappedResult(raw, &ov, &mut transferred, 0) != 0 } + } + WAIT_TIMEOUT => { + unsafe { + CancelIo(raw); + // Wait for the cancellation to complete so the handle is clean. + let mut transferred: u32 = 0; + GetOverlappedResult(raw, &ov, &mut transferred, 1); + CloseHandle(event as HANDLE); + } + return Err(io::ErrorKind::WouldBlock.into()); + } + _ => { + unsafe { CloseHandle(event as HANDLE) }; + return Err(io::Error::last_os_error()); + } + } + } + _ => { + unsafe { CloseHandle(event as HANDLE) }; + return Err(connect_err); + } + } + }; + unsafe { CloseHandle(event as HANDLE) }; + + if !connected { + return Err(io::Error::last_os_error()); + } + + let mut client_pid: ULONG = 0; + unsafe { GetNamedPipeClientProcessId(guard.as_raw_handle() as HANDLE, &mut client_pid) }; + + // Swap: the connected handle goes to the SeqpacketConn; the fresh server replaces it. + let conn_handle = std::mem::replace(&mut *guard, new_server); + + Ok(SeqpacketConn { + inner: conn_handle, + peer_pid: client_pid, + read_timeout: None, + write_timeout: None, + }) + } + + pub fn as_raw_handle(&self) -> RawHandle { + self.inner + .lock() + .map(|g| g.as_raw_handle()) + .unwrap_or(null_mut()) + } +} + +impl std::os::windows::io::AsRawHandle for SeqpacketListener { + fn as_raw_handle(&self) -> RawHandle { + SeqpacketListener::as_raw_handle(self) + } +} + +impl std::os::windows::io::IntoRawHandle for SeqpacketListener { + fn into_raw_handle(self) -> RawHandle { + self.inner + .into_inner() + .map(|h| h.into_raw_handle()) + .unwrap_or(null_mut()) + } +} + +/// A connected named pipe providing message-boundary-preserving IPC. +pub struct SeqpacketConn { + pub(crate) inner: OwnedHandle, + peer_pid: u32, + read_timeout: Option, + write_timeout: Option, +} + +unsafe impl Send for SeqpacketConn {} + +impl SeqpacketConn { + /// Connect to a server at the given named pipe path (e.g. `\\\\.\\pipe\\…`). + pub fn connect(path: impl AsRef) -> io::Result { + use winapi::shared::winerror::ERROR_PIPE_BUSY; + use winapi::um::fileapi::{CreateFileA, OPEN_EXISTING}; + use winapi::um::winnt::{GENERIC_READ, GENERIC_WRITE}; + + let name = path_to_null_terminated(path.as_ref()); + let h = unsafe { + CreateFileA( + name.as_ptr() as *const i8, + GENERIC_READ | GENERIC_WRITE, + 0, + null_mut(), + OPEN_EXISTING, + 0, // synchronous, non-overlapped + null_mut(), + ) + }; + if h == INVALID_HANDLE_VALUE { + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) { + return Err(io::ErrorKind::ConnectionRefused.into()); + } + return Err(err); + } + + // Upgrade to message read-mode. + let mode = PIPE_READMODE_MESSAGE; + unsafe { SetNamedPipeHandleState(h as SysHANDLE, &mode, null(), null()) }; + + let inner = unsafe { OwnedHandle::from_raw_handle(h as RawHandle) }; + let mut server_pid: ULONG = 0; + unsafe { + GetNamedPipeServerProcessId(inner.as_raw_handle() as HANDLE, &mut server_pid); + } + Ok(Self { + inner, + peer_pid: server_pid, + read_timeout: None, + write_timeout: None, + }) + } + + /// Create an in-process connected pair (for testing). + pub fn socketpair() -> io::Result<(Self, Self)> { + static COUNTER: AtomicU64 = AtomicU64::new(0); + let n = COUNTER.fetch_add(1, Ordering::Relaxed); + let pid = unsafe { GetCurrentProcessId() }; + let name_str = format!(r"\\.\pipe\datadog-ipc-pair-{}-{}", pid, n); + let name = path_to_null_terminated(Path::new(&name_str)); + + let server_handle = create_pipe_server(&name, true)?; + + // Start ConnectNamedPipe asynchronously so we can connect from the same thread. + let event = unsafe { CreateEventA(null_mut(), 1, 0, null_mut()) }; + if event == 0 { + return Err(io::Error::last_os_error()); + } + let mut ov = make_overlapped(event); + let srv_raw = server_handle.as_raw_handle() as SysHANDLE; + unsafe { ConnectNamedPipe(srv_raw, &mut ov) }; + + let client = Self::connect(&name_str)?; + + // Wait for the server-side accept to complete. + unsafe { + WaitForSingleObject(event, INFINITE); + CloseHandle(event as HANDLE); + } + + let server = Self { + inner: server_handle, + peer_pid: pid, + read_timeout: None, + write_timeout: None, + }; + Ok((server, client)) + } + + /// Build a `SeqpacketConn` from a server-side pipe handle (after `ConnectNamedPipe`). + pub fn from_server_handle(handle: OwnedHandle, client_pid: u32) -> Self { + Self { + inner: handle, + peer_pid: client_pid, + read_timeout: None, + write_timeout: None, + } + } + + /// Retrieve the peer process's credentials (pid, uid). + pub fn peer_credentials(&self) -> io::Result { + Ok(PeerCredentials { + pid: self.peer_pid, + uid: 0, + }) + } + + /// Non-blocking send. + /// + /// Appends the handle suffix to `data` in-place, writes the message, then truncates `data` + /// back to its original length — whether the write succeeded or failed. On `WouldBlock` + /// the caller can retry without re-encoding `data`. + pub fn try_send_raw(&self, data: &mut Vec, handles: &[RawHandle]) -> io::Result<()> { + let orig_len = data.len(); + if let Err(e) = append_handle_suffix(data, handles, self.peer_pid) { + data.truncate(orig_len); + return Err(e); + } + let result = pipe_write(self.inner.as_raw_handle() as SysHANDLE, data, false); + data.truncate(orig_len); + result + } + + /// Blocking send. + pub fn send_raw_blocking(&self, data: &mut Vec, handles: &[RawHandle]) -> io::Result<()> { + let orig_len = data.len(); + if let Err(e) = append_handle_suffix(data, handles, self.peer_pid) { + data.truncate(orig_len); + return Err(e); + } + let result = pipe_write(self.inner.as_raw_handle() as SysHANDLE, data, true); + data.truncate(orig_len); + result + } + + /// Non-blocking receive. Returns `Err(WouldBlock)` when no message is available. + /// + /// `buf` must be at least `payload_max + HANDLE_SUFFIX_SIZE` bytes. + pub fn try_recv_raw(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { + pipe_read(self.inner.as_raw_handle() as SysHANDLE, buf, false) + } + + /// Blocking receive. + /// + /// `buf` must be at least `payload_max + HANDLE_SUFFIX_SIZE` bytes. + pub fn recv_raw_blocking(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { + pipe_read(self.inner.as_raw_handle() as SysHANDLE, buf, true) + } + + pub fn as_raw_handle(&self) -> RawHandle { + self.inner.as_raw_handle() + } + + pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { + self.read_timeout = d; + Ok(()) + } + + pub fn set_write_timeout(&mut self, d: Option) -> io::Result<()> { + self.write_timeout = d; + Ok(()) + } +} + +/// Returns `true` if a server is listening at the given named pipe path. +pub fn is_listening>(path: P) -> io::Result { + Ok(SeqpacketConn::connect(path).is_ok()) +} diff --git a/datadog-ipc/src/sequential.rs b/datadog-ipc/src/sequential.rs deleted file mode 100644 index 05bff05a4f..0000000000 --- a/datadog-ipc/src/sequential.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use futures::{Future, Stream}; -use std::fmt::Debug; -use std::{ - pin::Pin, - task::{Context, Poll}, -}; -use tarpc::server::{Channel, InFlightRequest, Requests, Serve}; -use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::OwnedPermit; - -#[allow(type_alias_bounds)] -type Request = (S, InFlightRequest); - -type PendingPermit = Pin< - Box>, SendError<()>>> + Send + 'static>, ->; - -/// Replaces tarpc::server::Channel::execute which spawns one task per message with an executor -/// that spawns a single worker and queues requests for this task. -/// -/// If the queue is full, the request is dropped and will be cancelled by tarpc unless -/// `with_backpressure` is configured for that request type. -pub fn execute_sequential( - reqs: Requests, - serve: S, - max_requests: usize, -) -> SequentialExecutor -where - C: Channel, - S: Serve + Send + 'static, - C::Req: Send + Debug + 'static, - C::Resp: Send + 'static, - S::Fut: Send, -{ - let (tx, mut rx) = tokio::sync::mpsc::channel::>(max_requests); - - tokio::spawn(async move { - loop { - let (serve, req) = match rx.recv().await { - None => return, - Some(s) => s, - }; - req.execute(serve).await; - } - }); - SequentialExecutor { - inner: reqs, - serve, - tx, - backpressure: |_| false, - pending: None, - } -} - -#[pin_project::pin_project] -pub struct SequentialExecutor -where - C: Channel + 'static, -{ - #[pin] - inner: Requests, - serve: S, - tx: tokio::sync::mpsc::Sender>, - /// Returns true for requests that must not be dropped when the queue is full. - /// The executor will pause reading new requests and wait for channel space instead. - backpressure: fn(&C::Req) -> bool, - /// Pending channel-space reservation for a backpressure request. - pending: Option<(PendingPermit, Request)>, -} - -impl Future for SequentialExecutor -where - C: Channel + 'static, - C::Req: Send + Debug + 'static, - C::Resp: Send + 'static, - S: Serve + Send + 'static + Clone, - S::Fut: Send, -{ - type Output = anyhow::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - // First flush any pending backpressure send before reading new requests. - { - let this = self.as_mut().project(); - if let Some((fut, _)) = this.pending.as_mut() { - match fut.as_mut().poll(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(_)) => return Poll::Ready(Ok(())), // worker dropped - Poll::Ready(Ok(permit)) => { - #[allow(clippy::unwrap_used)] // we've just checked this - let (_, item) = this.pending.take().unwrap(); - permit.send(item); - // fall through to poll next request - } - } - } - } - - // Read the next request off the transport. - match self.as_mut().project().inner.poll_next(cx) { - Poll::Ready(Some(Ok(resp))) => { - let backpressured = (self.backpressure)(&resp.get().message); - match self.as_ref().tx.try_send((self.serve.clone(), resp)) { - Ok(()) => {} // loop to pick up the next request - Err(err) => { - let (_, resp) = err.into_inner(); - if backpressured { - let fut = Box::pin(self.as_ref().tx.clone().reserve_owned()); - *self.as_mut().project().pending = - Some((fut, (self.serve.clone(), resp))); - } else { - tracing::warn!( - "Dropping {:?}: sequential executor queue is full", - resp.get().message - ); - } - } - } - } - Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e.into())), - Poll::Ready(None) => return Poll::Ready(Ok(())), - Poll::Pending => return Poll::Pending, - } - } - } -} - -impl SequentialExecutor -where - C: Channel + 'static, -{ - pub fn swap_sender( - &mut self, - mut sender: tokio::sync::mpsc::Sender>, - ) -> tokio::sync::mpsc::Sender> { - std::mem::swap(&mut self.tx, &mut sender); - sender - } - - /// Configures a predicate that identifies requests which must not be dropped when the queue - /// is full. For those requests the executor will pause reading and wait for channel space. - pub fn with_backpressure(mut self, backpressure: fn(&C::Req) -> bool) -> Self { - self.backpressure = backpressure; - self - } -} diff --git a/datadog-ipc/src/transport/blocking.rs b/datadog-ipc/src/transport/blocking.rs deleted file mode 100644 index 0299b02c40..0000000000 --- a/datadog-ipc/src/transport/blocking.rs +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use bytes::{BufMut, Bytes, BytesMut}; -use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::pin::pin; -use std::{ - io::{self, Read, Write}, - mem::MaybeUninit, - sync::{atomic::AtomicU64, Arc}, - time::Duration, -}; -use tarpc::{context::Context, ClientMessage, Request, Response}; - -use tokio_serde::{Deserializer, Serializer}; - -use tokio_util::codec::{Decoder, Encoder, LengthDelimitedCodec}; - -use crate::{handles::TransferHandles, platform::Channel}; - -use super::DefaultCodec; - -pub struct BlockingTransport { - requests_id: Arc, - codec: LengthDelimitedCodec, - read_buffer: BytesMut, - channel: Channel, - _phantom: PhantomData<(IncomingItem, OutgoingItem)>, -} - -impl From for BlockingTransport { - fn from(channel: Channel) -> Self { - BlockingTransport { - requests_id: Arc::from(AtomicU64::new(0)), - codec: Default::default(), - read_buffer: BytesMut::with_capacity(4000), - channel, - _phantom: Default::default(), - } - } -} - -#[cfg(unix)] -impl From - for BlockingTransport -{ - fn from(s: std::os::unix::net::UnixStream) -> Self { - BlockingTransport { - requests_id: Arc::from(AtomicU64::new(0)), - codec: Default::default(), - read_buffer: BytesMut::with_capacity(4000), - channel: Channel::from(s), - _phantom: Default::default(), - } - } -} - -impl BlockingTransport -where - IncomingItem: for<'de> Deserialize<'de> + TransferHandles, - OutgoingItem: Serialize + TransferHandles, -{ - fn read_item(&mut self) -> Result, io::Error> { - let buf = &mut self.read_buffer; - while buf.has_remaining_mut() { - buf.reserve(1); - match self.codec.decode(buf)? { - Some(frame) => { - let message = pin!(DefaultCodec::<_, ()>::default()).deserialize(&frame)?; - let item = self.channel.metadata.unwrap_message(message)?; - return Ok(item); - } - None => { - let n = unsafe { - let dst = buf.chunk_mut(); - let dst = &mut *(dst as *mut _ as *mut [MaybeUninit]); - let mut buf_window = tokio::io::ReadBuf::uninit(dst); - // this implementation is based on Tokio async read implementation, - // it is performing an UB operation by using uninitiallized memory - - // although in practice its somewhat defined - // there are still some unknowns WRT to future behaviors - - // TODO: make sure this optimization is really needed - once BenchPlatform - // is connected to libdatadog benchmark unfilled_mut - // vs initialize_unfilled - and if the difference is negligible - then lets - // switch to implementation that doesn't use UB. - let b = &mut *(buf_window.unfilled_mut() as *mut [MaybeUninit] - as *mut [u8]); - - let n = self.channel.read(b)?; - buf_window.assume_init(n); - buf_window.advance(n); - - buf_window.filled().len() - }; - - // Safety: This is guaranteed to be the number of initialized (and read) - // bytes due to the invariants provided by `ReadBuf::filled`. - unsafe { - buf.advance_mut(n); - } - } - } - } - Err(io::Error::other("couldn't read entire item")) - } - - fn do_send(&mut self, req: &ClientMessage<&OutgoingItem>) -> Result<(), io::Error> { - let msg = self.channel.create_message(req)?; - - let mut buf = BytesMut::new(); - let data = pin!(DefaultCodec::<(), _>::default()).serialize(&msg)?; - - // TODO: inefficient, copies the data twice, once to serialize and once with length before - self.codec.encode(data, &mut buf)?; - self.channel.write_all(&buf) - } - - fn new_client_message<'a>( - &self, - item: &'a OutgoingItem, - context: Context, - ) -> (u64, ClientMessage<&'a OutgoingItem>) { - let request_id = self - .requests_id - .fetch_add(1, std::sync::atomic::Ordering::Relaxed); - - ( - request_id, - ClientMessage::Request(Request { - context, - id: request_id, - message: item, - }), - ) - } - - pub fn set_nonblocking(&mut self, nonblocking: bool) -> io::Result<()> { - self.channel.set_nonblocking(nonblocking) - } - - pub fn set_read_timeout(&mut self, timeout: Option) -> io::Result<()> { - self.channel.set_read_timeout(timeout) - } - - pub fn set_write_timeout(&mut self, timeout: Option) -> io::Result<()> { - self.channel.set_write_timeout(timeout) - } - - pub fn is_closed(&self) -> bool { - // The blocking transport is not supposed to be readable on the client side unless it's a - // response. So, outside of waiting for a response, it will never be readable unless - // the server side closed its socket. - self.channel.probe_readable() - } - - pub fn send(&mut self, item: &OutgoingItem) -> io::Result<()> { - let mut ctx = Context::current(); - ctx.discard_response = true; - let (_, req) = self.new_client_message(item, ctx); - self.do_send(&req) - } - - pub fn call(&mut self, item: &OutgoingItem) -> io::Result { - let (request_id, req) = self.new_client_message(item, Context::current()); - self.do_send(&req)?; - - for resp in self { - let resp = resp?; - if resp.request_id == request_id { - return resp.message.map_err(|e| io::Error::new(e.kind, e.detail)); - } - } - Err(io::Error::other("Request is without a response")) - } - - /// This function allows testing a broken pipe - pub fn send_garbage(&mut self) -> io::Result<()> { - let mut buf = BytesMut::new(); - self.codec.encode(Bytes::from(vec![1u8; 100]), &mut buf)?; - self.channel.write_all(&buf)?; - loop { - std::thread::sleep(Duration::from_millis(1)); - self.channel.write_all(&[0])?; // write byte by byte until broken pipe - } - } -} - -impl Iterator for BlockingTransport -where - IncomingItem: for<'de> Deserialize<'de> + TransferHandles, - OutgoingItem: Serialize + TransferHandles, -{ - type Item = io::Result>; - - fn next(&mut self) -> Option>> { - Some(self.read_item()) - } -} diff --git a/datadog-ipc/src/transport/mod.rs b/datadog-ipc/src/transport/mod.rs deleted file mode 100644 index 81d9cda6c4..0000000000 --- a/datadog-ipc/src/transport/mod.rs +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -pub mod blocking; - -use std::{ - io, - pin::Pin, - sync::{Arc, Mutex}, - task::{Context, Poll}, -}; - -// TODO keep Json for now, however MessagePack seems to fail at deserialization - -use pin_project::pin_project; -use tokio_serde::formats::Bincode; - -use tokio_serde::Framed as SerdeFramed; - -use futures::{Sink, Stream}; -use serde::{Deserialize, Serialize}; - -use tokio_util::codec::{Framed, LengthDelimitedCodec}; - -use super::{ - handles::TransferHandles, - platform::{metadata::ChannelMetadata, AsyncChannel, Channel, Message}, -}; - -pub type DefaultCodec = Bincode; - -type DefaultSerdeFramed = SerdeFramed< - Framed, - Message, - Message, - DefaultCodec, Message>, ->; - -/// A transport that serializes to, and deserializes from, a byte stream. -#[pin_project] -pub struct Transport { - #[pin] - inner: DefaultSerdeFramed, - - channel_metadata: Arc>, -} - -impl Transport { - /// Returns the inner transport over which messages are sent and received. - pub fn get_ref(&self) -> &AsyncChannel { - self.inner.get_ref().get_ref() - } -} - -impl Stream for Transport -where - Item: for<'a> Deserialize<'a> + TransferHandles, - CodecError: Into>, - DefaultSerdeFramed: Stream, CodecError>>, -{ - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { - let this = self.project(); - this.inner - .poll_next(cx) - .map(|res| match res { - Some(Ok(message)) => Some( - #[allow(clippy::unwrap_used)] - this.channel_metadata - .lock() - .unwrap() - .unwrap_message(message) - .map_err(Into::into), - ), - Some(Err(e)) => Some(Err(e.into())), - None => None, - }) - .map_err(io::Error::other) - } -} - -impl Sink for Transport -where - SinkItem: Serialize + TransferHandles, - CodecError: Into>, - DefaultSerdeFramed: Sink, Error = CodecError>, -{ - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_ready(cx) - .map_err(io::Error::other) - } - - fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> { - let this = self.project(); - #[allow(clippy::unwrap_used)] - let mut message = this.channel_metadata.lock().unwrap(); - let message = message.create_message(item)?; - - this.inner.start_send(message).map_err(io::Error::other) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_flush(cx) - .map_err(io::Error::other) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_close(cx) - .map_err(io::Error::other) - } -} - -/// Constructs a new transport from a framed transport and a serialization codec. -fn new( - io: AsyncChannel, - codec: DefaultCodec, Message>, -) -> Transport -where - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, -{ - let channel_metadata = io.metadata.clone(); - let mut length_delimited = LengthDelimitedCodec::new(); - length_delimited.set_max_frame_length(100_000_000); - Transport { - inner: SerdeFramed::new(Framed::new(io, length_delimited), codec), - channel_metadata, - } -} - -pub type SymmetricalTransport = Transport; - -impl From for Transport -where - Item: for<'de> Deserialize<'de> + TransferHandles, - SinkItem: Serialize + TransferHandles, -{ - fn from(channel: AsyncChannel) -> Self { - let codec = DefaultCodec::default(); - new(channel, codec) - } -} - -impl TryFrom for Transport -where - Item: for<'de> Deserialize<'de> + TransferHandles, - SinkItem: Serialize + TransferHandles, -{ - type Error = >::Error; - - fn try_from(channel: Channel) -> Result { - Ok(Self::from(AsyncChannel::try_from(channel)?)) - } -} diff --git a/datadog-ipc/tarpc/Cargo.toml b/datadog-ipc/tarpc/Cargo.toml deleted file mode 100644 index 06d5144ca7..0000000000 --- a/datadog-ipc/tarpc/Cargo.toml +++ /dev/null @@ -1,118 +0,0 @@ -[package] -name = "tarpc" -version = "0.31.0" -rust-version = "1.58.0" -authors = [ - "Adam Wright ", - "Tim Kuehn ", -] -edition = "2021" -license = "MIT" -documentation = "https://docs.rs/tarpc" -homepage = "https://github.com/google/tarpc" -repository = "https://github.com/google/tarpc" -keywords = ["rpc", "network", "server", "api", "microservices"] -categories = ["asynchronous", "network-programming"] -readme = "README.md" -description = "An RPC framework for Rust with a focus on ease of use." -publish = false - -[lib] -bench = false - -[features] -default = [] - -serde1 = ["tarpc-plugins/serde1", "serde", "serde/derive"] -tokio1 = ["tokio/rt"] -serde-transport = ["serde1", "tokio1", "tokio-serde", "tokio-util/codec"] -serde-transport-json = ["tokio-serde/json"] -serde-transport-bincode = ["tokio-serde/bincode"] -tcp = ["tokio/net"] -unix = ["tokio/net"] -otel = ["opentelemetry", "tracing-opentelemetry"] - -full = [ - "serde1", - "tokio1", - "serde-transport", - "serde-transport-json", - "serde-transport-bincode", - "tcp", - "unix", - "otel" -] - -[badges] -travis-ci = { repository = "google/tarpc" } - -[dependencies] -anyhow = "1.0" -fnv = "1.0" -futures = "0.3" -humantime = "2.0" -pin-project = "1.0" -rand = "0.8" -serde = { optional = true, version = "1.0", features = ["derive"] } -static_assertions = "1.1.0" -tarpc-plugins = { path = "../plugins", version = "0.12" } -thiserror = "1.0" -tokio = { version = "1", features = ["time"] } -tokio-util = { version = "0.7.3", features = ["time"] } -tokio-serde = { optional = true, version = "0.8" } -tracing = { version = "0.1", default-features = false, features = [ - "attributes", - "log", -] } -tracing-opentelemetry = { version = "0.17.2", default-features = false, optional = true } -opentelemetry = { version = "0.17.0", default-features = false, optional = true } - - -[dev-dependencies] -assert_matches = "1.4" -bincode = "1.3" -bytes = { version = "1.11.1", features = ["serde"] } -flate2 = "1.0" -futures-test = "0.3" -opentelemetry = { version = "0.17.0", default-features = false, features = [ - "rt-tokio", -] } -opentelemetry-jaeger = { version = "0.16.0", features = ["rt-tokio"] } -pin-utils = "0.1.0-alpha" -serde_bytes = "0.11" -tracing-subscriber = { version = "0.3.22", features = ["env-filter"] } -tokio = { version = "1", features = ["full", "test-util"] } -tokio-serde = { version = "0.8", features = ["json", "bincode"] } -trybuild = "1.0" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[[example]] -name = "compression" -required-features = ["serde-transport", "tcp"] - -[[example]] -name = "tracing" -required-features = ["full"] - -[[example]] -name = "readme" -required-features = ["full"] - -[[example]] -name = "pubsub" -required-features = ["full"] - -[[example]] -name = "custom_transport" -required-features = ["serde1", "tokio1", "serde-transport"] - -[[test]] -name = "service_functional" -required-features = ["serde-transport"] - -[[test]] -name = "dataservice" -required-features = ["serde-transport", "tcp"] diff --git a/datadog-ipc/tarpc/LICENSE b/datadog-ipc/tarpc/LICENSE deleted file mode 100644 index 9d6eea67e1..0000000000 --- a/datadog-ipc/tarpc/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) - -Copyright 2016 Google Inc. All Rights Reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/datadog-ipc/tarpc/README.md b/datadog-ipc/tarpc/README.md deleted file mode 120000 index 32d46ee883..0000000000 --- a/datadog-ipc/tarpc/README.md +++ /dev/null @@ -1 +0,0 @@ -../README.md \ No newline at end of file diff --git a/datadog-ipc/tarpc/clippy.toml b/datadog-ipc/tarpc/clippy.toml deleted file mode 100644 index 2d90603c67..0000000000 --- a/datadog-ipc/tarpc/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -doc-valid-idents = ["gRPC"] diff --git a/datadog-ipc/tarpc/examples/compression.rs b/datadog-ipc/tarpc/examples/compression.rs deleted file mode 100644 index 8a23cd52a3..0000000000 --- a/datadog-ipc/tarpc/examples/compression.rs +++ /dev/null @@ -1,122 +0,0 @@ -use flate2::{read::DeflateDecoder, write::DeflateEncoder, Compression}; -use futures::{Sink, SinkExt, Stream, StreamExt, TryStreamExt}; -use serde::{Deserialize, Serialize}; -use serde_bytes::ByteBuf; -use std::{io, io::Read, io::Write}; -use tarpc::{ - client, context, - serde_transport::tcp, - server::{BaseChannel, Channel}, - tokio_serde::formats::Bincode, -}; - -/// Type of compression that should be enabled on the request. The transport is free to ignore this. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize)] -pub enum CompressionAlgorithm { - Deflate, -} - -#[derive(Debug, Deserialize, Serialize)] -pub enum CompressedMessage { - Uncompressed(T), - Compressed { - algorithm: CompressionAlgorithm, - payload: ByteBuf, - }, -} - -async fn compress(message: T) -> io::Result> -where - T: Serialize, -{ - let message = serialize(message)?; - let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(&message).unwrap(); - let compressed = encoder.finish()?; - Ok(CompressedMessage::Compressed { - algorithm: CompressionAlgorithm::Deflate, - payload: ByteBuf::from(compressed), - }) -} - -async fn decompress(message: CompressedMessage) -> io::Result -where - for<'a> T: Deserialize<'a>, -{ - match message { - CompressedMessage::Compressed { algorithm, payload } => { - if algorithm != CompressionAlgorithm::Deflate { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Compression algorithm {algorithm:?} not supported"), - )); - } - let mut deflater = DeflateDecoder::new(payload.as_slice()); - let mut payload = ByteBuf::new(); - deflater.read_to_end(&mut payload)?; - let message = deserialize(payload)?; - Ok(message) - } - CompressedMessage::Uncompressed(message) => Ok(message), - } -} - -fn serialize(t: T) -> io::Result { - bincode::serialize(&t) - .map(ByteBuf::from) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) -} - -fn deserialize(message: ByteBuf) -> io::Result -where - for<'a> D: Deserialize<'a>, -{ - bincode::deserialize(message.as_ref()).map_err(|e| io::Error::new(io::ErrorKind::Other, e)) -} - -fn add_compression( - transport: impl Stream>> - + Sink, Error = io::Error>, -) -> impl Stream> + Sink -where - Out: Serialize, - for<'a> In: Deserialize<'a>, -{ - transport.with(compress).and_then(decompress) -} - -#[tarpc::service] -pub trait World { - async fn hello(name: String) -> String; -} - -#[derive(Clone, Debug)] -struct HelloServer; - -#[tarpc::server] -impl World for HelloServer { - async fn hello(self, _: context::Context, name: String) -> String { - format!("Hey, {name}!") - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let mut incoming = tcp::listen("localhost:0", Bincode::default).await?; - let addr = incoming.local_addr(); - tokio::spawn(async move { - let transport = incoming.next().await.unwrap().unwrap(); - BaseChannel::with_defaults(add_compression(transport)) - .execute(HelloServer.serve()) - .await; - }); - - let transport = tcp::connect(addr, Bincode::default).await?; - let client = WorldClient::new(client::Config::default(), add_compression(transport)).spawn(); - - println!( - "{}", - client.hello(context::current(), "friend".into()).await? - ); - Ok(()) -} diff --git a/datadog-ipc/tarpc/examples/custom_transport.rs b/datadog-ipc/tarpc/examples/custom_transport.rs deleted file mode 100644 index 40bfa3388e..0000000000 --- a/datadog-ipc/tarpc/examples/custom_transport.rs +++ /dev/null @@ -1,48 +0,0 @@ -// use tarpc::context::Context; -// use tarpc::serde_transport as transport; -// use tarpc::server::{BaseChannel, Channel}; -// use tarpc::tokio_serde::formats::Bincode; -// use tarpc::tokio_util::codec::length_delimited::LengthDelimitedCodec; -// use tokio::net::{UnixListener, UnixStream}; -// -// #[tarpc::service] -// pub trait PingService { -// async fn ping(); -// } -// -// #[derive(Clone)] -// struct Service; -// -// #[tarpc::server] -// impl PingService for Service { -// async fn ping(self, _: Context) {} -// } -// -#[tokio::main] -async fn main() -> anyhow::Result<()> { - // let bind_addr = "/tmp/tarpc_on_unix_example.sock"; - // - // let _ = std::fs::remove_file(bind_addr); - // - // let listener = UnixListener::bind(bind_addr).unwrap(); - // let codec_builder = LengthDelimitedCodec::builder(); - // tokio::spawn(async move { - // loop { - // let (conn, _addr) = listener.accept().await.unwrap(); - // let framed = codec_builder.new_framed(conn); - // let transport = transport::new(framed, Bincode::default()); - // - // let fut = BaseChannel::with_defaults(transport).execute(Service.serve()); - // tokio::spawn(fut); - // } - // }); - // - // let conn = UnixStream::connect(bind_addr).await?; - // let transport = transport::new(codec_builder.new_framed(conn), Bincode::default()); - // PingServiceClient::new(Default::default(), transport) - // .spawn() - // .ping(tarpc::context::current()) - // .await?; - // - Ok(()) -} diff --git a/datadog-ipc/tarpc/examples/pubsub.rs b/datadog-ipc/tarpc/examples/pubsub.rs deleted file mode 100644 index d5bef34a80..0000000000 --- a/datadog-ipc/tarpc/examples/pubsub.rs +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -/// - The PubSub server sets up TCP listeners on 2 ports, the "subscriber" port and the "publisher" -/// port. Because both publishers and subscribers initiate their connections to the PubSub -/// server, the server requires no prior knowledge of either publishers or subscribers. -/// -/// - Subscribers connect to the server on the server's "subscriber" port. Once a connection is -/// established, the server acts as the client of the Subscriber service, initially requesting -/// the topics the subscriber is interested in, and subsequently sending topical messages to the -/// subscriber. -/// -/// - Publishers connect to the server on the "publisher" port and, once connected, they send -/// topical messages via Publisher service to the server. The server then broadcasts each -/// messages to all clients subscribed to the topic of that message. -/// -/// Subscriber Publisher PubSub Server -/// T1 | | | -/// T2 |-----Connect------------------------------------------------------>| -/// T3 | | | -/// T2 |<-------------------------------------------------------Topics-----| -/// T2 |-----(OK) Topics-------------------------------------------------->| -/// T3 | | | -/// T4 | |-----Connect-------------------->| -/// T5 | | | -/// T6 | |-----Publish-------------------->| -/// T7 | | | -/// T8 |<------------------------------------------------------Receive-----| -/// T9 |-----(OK) Receive------------------------------------------------->| -/// T10 | | | -/// T11 | |<--------------(OK) Publish------| -use anyhow::anyhow; -use futures::{ - channel::oneshot, - future::{self, AbortHandle}, - prelude::*, -}; -use publisher::Publisher as _; -use std::{ - collections::HashMap, - env, - error::Error, - io, - net::SocketAddr, - sync::{Arc, Mutex, RwLock}, -}; -use subscriber::Subscriber as _; -use tarpc::{ - client, context, - serde_transport::tcp, - server::{self, Channel}, - tokio_serde::formats::Json, -}; -use tokio::net::ToSocketAddrs; -use tracing::info; -use tracing_subscriber::prelude::*; - -pub mod subscriber { - #[tarpc::service] - pub trait Subscriber { - async fn topics() -> Vec; - async fn receive(topic: String, message: String); - } -} - -pub mod publisher { - #[tarpc::service] - pub trait Publisher { - async fn publish(topic: String, message: String); - } -} - -#[derive(Clone, Debug)] -struct Subscriber { - local_addr: SocketAddr, - topics: Vec, -} - -#[tarpc::server] -impl subscriber::Subscriber for Subscriber { - async fn topics(self, _: context::Context) -> Vec { - self.topics.clone() - } - - async fn receive(self, _: context::Context, topic: String, message: String) { - info!(local_addr = %self.local_addr, %topic, %message, "ReceivedMessage") - } -} - -struct SubscriberHandle(AbortHandle); - -impl Drop for SubscriberHandle { - fn drop(&mut self) { - self.0.abort(); - } -} - -impl Subscriber { - async fn connect( - publisher_addr: impl ToSocketAddrs, - topics: Vec, - ) -> anyhow::Result { - let publisher = tcp::connect(publisher_addr, Json::default).await?; - let local_addr = publisher.local_addr()?; - let mut handler = server::BaseChannel::with_defaults(publisher).requests(); - let subscriber = Subscriber { local_addr, topics }; - // The first request is for the topics being subscribed to. - match handler.next().await { - Some(init_topics) => init_topics?.execute(subscriber.clone().serve()).await, - None => { - return Err(anyhow!( - "[{}] Server never initialized the subscriber.", - local_addr - )) - } - }; - let (handler, abort_handle) = future::abortable(handler.execute(subscriber.serve())); - tokio::spawn(async move { - match handler.await { - Ok(()) | Err(future::Aborted) => info!(?local_addr, "subscriber shutdown."), - } - }); - Ok(SubscriberHandle(abort_handle)) - } -} - -#[derive(Debug)] -struct Subscription { - topics: Vec, -} - -#[derive(Clone, Debug)] -struct Publisher { - clients: Arc>>, - subscriptions: Arc>>>, -} - -struct PublisherAddrs { - publisher: SocketAddr, - subscriptions: SocketAddr, -} - -impl Publisher { - async fn start(self) -> io::Result { - let mut connecting_publishers = tcp::listen("localhost:0", Json::default).await?; - - let publisher_addrs = PublisherAddrs { - publisher: connecting_publishers.local_addr(), - subscriptions: self.clone().start_subscription_manager().await?, - }; - - info!(publisher_addr = %publisher_addrs.publisher, "listening for publishers.",); - tokio::spawn(async move { - // Because this is just an example, we know there will only be one publisher. In more - // realistic code, this would be a loop to continually accept new publisher - // connections. - let publisher = connecting_publishers.next().await.unwrap().unwrap(); - info!(publisher.peer_addr = ?publisher.peer_addr(), "publisher connected."); - - server::BaseChannel::with_defaults(publisher) - .execute(self.serve()) - .await - }); - - Ok(publisher_addrs) - } - - async fn start_subscription_manager(mut self) -> io::Result { - let mut connecting_subscribers = tcp::listen("localhost:0", Json::default) - .await? - .filter_map(|r| future::ready(r.ok())); - let new_subscriber_addr = connecting_subscribers.get_ref().local_addr(); - info!(?new_subscriber_addr, "listening for subscribers."); - - tokio::spawn(async move { - while let Some(conn) = connecting_subscribers.next().await { - let subscriber_addr = conn.peer_addr().unwrap(); - - let tarpc::client::NewClient { - client: subscriber, - dispatch, - } = subscriber::SubscriberClient::new(client::Config::default(), conn); - let (ready_tx, ready) = oneshot::channel(); - self.clone() - .start_subscriber_gc(subscriber_addr, dispatch, ready); - - // Populate the topics - self.initialize_subscription(subscriber_addr, subscriber) - .await; - - // Signal that initialization is done. - ready_tx.send(()).unwrap(); - } - }); - - Ok(new_subscriber_addr) - } - - async fn initialize_subscription( - &mut self, - subscriber_addr: SocketAddr, - subscriber: subscriber::SubscriberClient, - ) { - // Populate the topics - if let Ok(topics) = subscriber.topics(context::current()).await { - self.clients.lock().unwrap().insert( - subscriber_addr, - Subscription { - topics: topics.clone(), - }, - ); - - info!(%subscriber_addr, ?topics, "subscribed to new topics"); - let mut subscriptions = self.subscriptions.write().unwrap(); - for topic in topics { - subscriptions - .entry(topic) - .or_default() - .insert(subscriber_addr, subscriber.clone()); - } - } - } - - fn start_subscriber_gc( - self, - subscriber_addr: SocketAddr, - client_dispatch: impl Future> + Send + 'static, - subscriber_ready: oneshot::Receiver<()>, - ) { - tokio::spawn(async move { - if let Err(e) = client_dispatch.await { - info!( - %subscriber_addr, - error = %e, - "subscriber connection broken"); - } - // Don't clean up the subscriber until initialization is done. - let _ = subscriber_ready.await; - if let Some(subscription) = self.clients.lock().unwrap().remove(&subscriber_addr) { - info!( - "[{} unsubscribing from topics: {:?}", - subscriber_addr, subscription.topics - ); - let mut subscriptions = self.subscriptions.write().unwrap(); - for topic in subscription.topics { - let subscribers = subscriptions.get_mut(&topic).unwrap(); - subscribers.remove(&subscriber_addr); - if subscribers.is_empty() { - subscriptions.remove(&topic); - } - } - } - }); - } -} - -#[tarpc::server] -impl publisher::Publisher for Publisher { - async fn publish(self, _: context::Context, topic: String, message: String) { - info!("received message to publish."); - let mut subscribers = match self.subscriptions.read().unwrap().get(&topic) { - None => return, - Some(subscriptions) => subscriptions.clone(), - }; - let mut publications = Vec::new(); - for client in subscribers.values_mut() { - publications.push(client.receive(context::current(), topic.clone(), message.clone())); - } - // Ignore failing subscribers. In a real pubsub, you'd want to continually retry until - // subscribers ack. Of course, a lot would be different in a real pubsub :) - for response in future::join_all(publications).await { - if let Err(e) = response { - info!("failed to broadcast to subscriber: {}", e); - } - } - } -} - -/// Initializes an OpenTelemetry tracing subscriber with a Jaeger backend. -fn init_tracing(service_name: &str) -> anyhow::Result<()> { - env::set_var("OTEL_BSP_MAX_EXPORT_BATCH_SIZE", "12"); - let tracer = opentelemetry_jaeger::new_pipeline() - .with_service_name(service_name) - .with_max_packet_size(2usize.pow(13)) - .install_batch(opentelemetry::runtime::Tokio)?; - - tracing_subscriber::registry() - .with(tracing_subscriber::filter::EnvFilter::from_default_env()) - .with(tracing_subscriber::fmt::layer()) - .with(tracing_opentelemetry::layer().with_tracer(tracer)) - .try_init()?; - - Ok(()) -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - init_tracing("Pub/Sub")?; - - let addrs = Publisher { - clients: Arc::new(Mutex::new(HashMap::new())), - subscriptions: Arc::new(RwLock::new(HashMap::new())), - } - .start() - .await?; - - let _subscriber0 = Subscriber::connect( - addrs.subscriptions, - vec!["calculus".into(), "cool shorts".into()], - ) - .await?; - - let _subscriber1 = Subscriber::connect( - addrs.subscriptions, - vec!["cool shorts".into(), "history".into()], - ) - .await?; - - let publisher = publisher::PublisherClient::new( - client::Config::default(), - tcp::connect(addrs.publisher, Json::default).await?, - ) - .spawn(); - - publisher - .publish(context::current(), "calculus".into(), "sqrt(2)".into()) - .await?; - - publisher - .publish( - context::current(), - "cool shorts".into(), - "hello to all".into(), - ) - .await?; - - publisher - .publish(context::current(), "history".into(), "napoleon".to_string()) - .await?; - - drop(_subscriber0); - - publisher - .publish( - context::current(), - "cool shorts".into(), - "hello to who?".into(), - ) - .await?; - - opentelemetry::global::shutdown_tracer_provider(); - info!("done."); - - Ok(()) -} diff --git a/datadog-ipc/tarpc/examples/readme.rs b/datadog-ipc/tarpc/examples/readme.rs deleted file mode 100644 index 80792314f2..0000000000 --- a/datadog-ipc/tarpc/examples/readme.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use futures::future::{self, Ready}; -use tarpc::{ - client, context, - server::{self, Channel}, -}; - -/// This is the service definition. It looks a lot like a trait definition. -/// It defines one RPC, hello, which takes one arg, name, and returns a String. -#[tarpc::service] -pub trait World { - async fn hello(name: String) -> String; -} - -/// This is the type that implements the generated World trait. It is the business logic -/// and is used to start the server. -#[derive(Clone)] -struct HelloServer; - -impl World for HelloServer { - // Each defined rpc generates two items in the trait, a fn that serves the RPC, and - // an associated type representing the future output by the fn. - - type HelloFut = Ready; - - fn hello(self, _: context::Context, name: String) -> Self::HelloFut { - future::ready(format!("Hello, {name}!")) - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let (client_transport, server_transport) = tarpc::transport::channel::unbounded(); - - let server = server::BaseChannel::with_defaults(server_transport); - tokio::spawn(server.execute(HelloServer.serve())); - - // WorldClient is generated by the #[tarpc::service] attribute. It has a constructor `new` - // that takes a config and any Transport as input. - let client = WorldClient::new(client::Config::default(), client_transport).spawn(); - - // The client has an RPC method for each RPC defined in the annotated trait. It takes the same - // args as defined, with the addition of a Context, which is always the first arg. The Context - // specifies a deadline and trace information which can be helpful in debugging requests. - let hello = client.hello(context::current(), "Stim".to_string()).await?; - - println!("{hello}"); - - Ok(()) -} diff --git a/datadog-ipc/tarpc/examples/tracing.rs b/datadog-ipc/tarpc/examples/tracing.rs deleted file mode 100644 index 5b4b8fd5b5..0000000000 --- a/datadog-ipc/tarpc/examples/tracing.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use crate::{add::Add as AddService, double::Double as DoubleService}; -use futures::{future, prelude::*}; -use tarpc::{ - client, context, - server::{incoming::Incoming, BaseChannel}, - tokio_serde::formats::Json, -}; -use tracing_subscriber::prelude::*; - -pub mod add { - #[tarpc::service] - pub trait Add { - /// Add two ints together. - async fn add(x: i32, y: i32) -> i32; - } -} - -pub mod double { - #[tarpc::service] - pub trait Double { - /// 2 * x - async fn double(x: i32) -> Result; - } -} - -#[derive(Clone)] -struct AddServer; - -#[tarpc::server] -impl AddService for AddServer { - async fn add(self, _: context::Context, x: i32, y: i32) -> i32 { - x + y - } -} - -#[derive(Clone)] -struct DoubleServer { - add_client: add::AddClient, -} - -#[tarpc::server] -impl DoubleService for DoubleServer { - async fn double(self, _: context::Context, x: i32) -> Result { - self.add_client - .add(context::current(), x, x) - .await - .map_err(|e| e.to_string()) - } -} - -fn init_tracing(service_name: &str) -> anyhow::Result<()> { - let tracer = opentelemetry_jaeger::new_pipeline() - .with_service_name(service_name) - .with_auto_split_batch(true) - .with_max_packet_size(2usize.pow(13)) - .install_batch(opentelemetry::runtime::Tokio)?; - - tracing_subscriber::registry() - .with(tracing_subscriber::EnvFilter::from_default_env()) - .with(tracing_subscriber::fmt::layer()) - .with(tracing_opentelemetry::layer().with_tracer(tracer)) - .try_init()?; - - Ok(()) -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - init_tracing("tarpc_tracing_example")?; - - let add_listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default) - .await? - .filter_map(|r| future::ready(r.ok())); - let addr = add_listener.get_ref().local_addr(); - let add_server = add_listener - .map(BaseChannel::with_defaults) - .take(1) - .execute(AddServer.serve()); - tokio::spawn(add_server); - - let to_add_server = tarpc::serde_transport::tcp::connect(addr, Json::default).await?; - let add_client = add::AddClient::new(client::Config::default(), to_add_server).spawn(); - - let double_listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default) - .await? - .filter_map(|r| future::ready(r.ok())); - let addr = double_listener.get_ref().local_addr(); - let double_server = double_listener - .map(BaseChannel::with_defaults) - .take(1) - .execute(DoubleServer { add_client }.serve()); - tokio::spawn(double_server); - - let to_double_server = tarpc::serde_transport::tcp::connect(addr, Json::default).await?; - let double_client = - double::DoubleClient::new(client::Config::default(), to_double_server).spawn(); - - let ctx = context::current(); - for _ in 1..=5 { - tracing::info!("{:?}", double_client.double(ctx, 1).await?); - } - - opentelemetry::global::shutdown_tracer_provider(); - - Ok(()) -} diff --git a/datadog-ipc/tarpc/rustfmt.toml b/datadog-ipc/tarpc/rustfmt.toml deleted file mode 100644 index 32a9786fa1..0000000000 --- a/datadog-ipc/tarpc/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -edition = "2018" diff --git a/datadog-ipc/tarpc/src/cancellations.rs b/datadog-ipc/tarpc/src/cancellations.rs deleted file mode 100644 index 631c7b1cf8..0000000000 --- a/datadog-ipc/tarpc/src/cancellations.rs +++ /dev/null @@ -1,49 +0,0 @@ -use futures::{prelude::*, task::*}; -use std::pin::Pin; -use tokio::sync::mpsc; - -/// Sends request cancellation signals. -#[derive(Debug, Clone)] -pub struct RequestCancellation(mpsc::UnboundedSender); - -/// A stream of IDs of requests that have been canceled. -#[derive(Debug)] -pub struct CanceledRequests(mpsc::UnboundedReceiver); - -/// Returns a channel to send request cancellation messages. -pub fn cancellations() -> (RequestCancellation, CanceledRequests) { - // Unbounded because messages are sent in the drop fn. This is fine, because it's still - // bounded by the number of in-flight requests. - let (tx, rx) = mpsc::unbounded_channel(); - (RequestCancellation(tx), CanceledRequests(rx)) -} - -impl RequestCancellation { - /// Cancels the request with ID `request_id`. - /// - /// No validation is done of `request_id`. There is no way to know if the request id provided - /// corresponds to a request actually tracked by the backing channel. `RequestCancellation` is - /// a one-way communication channel. - /// - /// Once request data is cleaned up, a response will never be received by the client. This is - /// useful primarily when request processing ends prematurely for requests with long deadlines - /// which would otherwise continue to be tracked by the backing channel—a kind of leak. - pub fn cancel(&self, request_id: u64) { - let _ = self.0.send(request_id); - } -} - -impl CanceledRequests { - /// Polls for a cancelled request. - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.0.poll_recv(cx) - } -} - -impl Stream for CanceledRequests { - type Item = u64; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } -} diff --git a/datadog-ipc/tarpc/src/client.rs b/datadog-ipc/tarpc/src/client.rs deleted file mode 100644 index a9c3c2d41f..0000000000 --- a/datadog-ipc/tarpc/src/client.rs +++ /dev/null @@ -1,889 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! Provides a client that connects to a server and sends multiplexed requests. - -mod in_flight_requests; - -use crate::{ - cancellations::{cancellations, CanceledRequests, RequestCancellation}, - context, trace, ClientMessage, Request, Response, ServerError, Transport, -}; -use futures::{prelude::*, ready, stream::Fuse, task::*}; -use in_flight_requests::{DeadlineExceededError, InFlightRequests}; -use pin_project::pin_project; -use std::{ - error::Error, - fmt, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; -use tokio::sync::{mpsc, oneshot}; -use tracing::Span; - -/// Settings that control the behavior of the client. -#[derive(Clone, Debug)] -#[non_exhaustive] -pub struct Config { - /// The number of requests that can be in flight at once. - /// `max_in_flight_requests` controls the size of the map used by the client - /// for storing pending requests. - pub max_in_flight_requests: usize, - /// The number of requests that can be buffered client-side before being sent. - /// `pending_requests_buffer` controls the size of the channel clients use - /// to communicate with the request dispatch task. - pub pending_request_buffer: usize, -} - -impl Default for Config { - fn default() -> Self { - Config { - max_in_flight_requests: 1_000, - pending_request_buffer: 100, - } - } -} - -/// A channel and dispatch pair. The dispatch drives the sending and receiving of requests -/// and must be polled continuously or spawned. -pub struct NewClient { - /// The new client. - pub client: C, - /// The client's dispatch. - pub dispatch: D, -} - -impl NewClient -where - D: Future> + Send + 'static, - E: std::error::Error + Send + Sync + 'static, -{ - /// Helper method to spawn the dispatch on the default executor. - #[cfg(feature = "tokio1")] - #[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))] - pub fn spawn(self) -> C { - let dispatch = self.dispatch.unwrap_or_else(move |e| { - let e = anyhow::Error::new(e); - tracing::warn!("Connection broken: {:?}", e); - }); - tokio::spawn(dispatch); - self.client - } -} - -impl fmt::Debug for NewClient { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "NewClient") - } -} - -const _CHECK_USIZE: () = assert!( - std::mem::size_of::() <= std::mem::size_of::(), - "usize is too big to fit in u64" -); - -/// Handles communication from the client to request dispatch. -#[derive(Debug)] -pub struct Channel { - to_dispatch: mpsc::Sender>, - /// Channel to send a cancel message to the dispatcher. - cancellation: RequestCancellation, - /// The ID to use for the next request to stage. - next_request_id: Arc, -} - -impl Clone for Channel { - fn clone(&self) -> Self { - Self { - to_dispatch: self.to_dispatch.clone(), - cancellation: self.cancellation.clone(), - next_request_id: self.next_request_id.clone(), - } - } -} - -impl Channel { - /// Sends a request to the dispatch task to forward to the server, returning a [`Future`] that - /// resolves to the response. - #[tracing::instrument( - name = "RPC", - skip(self, ctx, request_name, request), - fields( - rpc.trace_id = tracing::field::Empty, - rpc.deadline = %humantime::format_rfc3339(ctx.deadline), - otel.kind = "client", - otel.name = request_name) - )] - pub async fn call( - &self, - mut ctx: context::Context, - request_name: &'static str, - request: Req, - ) -> Result { - let span = Span::current(); - ctx.trace_context = trace::Context::try_from(&span).unwrap_or_else(|_| { - tracing::trace!( - "OpenTelemetry subscriber not installed; making unsampled child context." - ); - ctx.trace_context.new_child() - }); - span.record("rpc.trace_id", tracing::field::display(ctx.trace_id())); - let (response_completion, mut response) = oneshot::channel(); - let request_id = - u64::try_from(self.next_request_id.fetch_add(1, Ordering::Relaxed)).unwrap(); - - // ResponseGuard impls Drop to cancel in-flight requests. It should be created before - // sending out the request; otherwise, the response future could be dropped after the - // request is sent out but before ResponseGuard is created, rendering the cancellation - // logic inactive. - let response_guard = ResponseGuard { - response: &mut response, - request_id, - cancellation: &self.cancellation, - cancel: true, - }; - self.to_dispatch - .send(DispatchRequest { - ctx, - span, - request_id, - request, - response_completion, - }) - .await - .map_err(|mpsc::error::SendError(_)| RpcError::Disconnected)?; - response_guard.response().await - } -} - -/// A server response that is completed by request dispatch when the corresponding response -/// arrives off the wire. -struct ResponseGuard<'a, Resp> { - response: &'a mut oneshot::Receiver, DeadlineExceededError>>, - cancellation: &'a RequestCancellation, - request_id: u64, - cancel: bool, -} - -/// An error that can occur in the processing of an RPC. This is not request-specific errors but -/// rather cross-cutting errors that can always occur. -#[derive(thiserror::Error, Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub enum RpcError { - /// The client disconnected from the server. - #[error("the client disconnected from the server")] - Disconnected, - /// The request exceeded its deadline. - #[error("the request exceeded its deadline")] - DeadlineExceeded, - /// The server aborted request processing. - #[error("the server aborted request processing")] - Server(#[from] ServerError), -} - -impl From for RpcError { - fn from(_: DeadlineExceededError) -> Self { - RpcError::DeadlineExceeded - } -} - -impl ResponseGuard<'_, Resp> { - async fn response(mut self) -> Result { - let response = (&mut self.response).await; - // Cancel drop logic once a response has been received. - self.cancel = false; - match response { - Ok(resp) => Ok(resp?.message?), - Err(oneshot::error::RecvError { .. }) => { - // The oneshot is Canceled when the dispatch task ends. In that case, - // there's nothing listening on the other side, so there's no point in - // propagating cancellation. - Err(RpcError::Disconnected) - } - } - } -} - -// Cancels the request when dropped, if not already complete. -impl Drop for ResponseGuard<'_, Resp> { - fn drop(&mut self) { - // The receiver needs to be closed to handle the edge case that the request has not - // yet been received by the dispatch task. It is possible for the cancel message to - // arrive before the request itself, in which case the request could get stuck in the - // dispatch map forever if the server never responds (e.g. if the server dies while - // responding). Even if the server does respond, it will have unnecessarily done work - // for a client no longer waiting for a response. To avoid this, the dispatch task - // checks if the receiver is closed before inserting the request in the map. By - // closing the receiver before sending the cancel message, it is guaranteed that if the - // dispatch task misses an early-arriving cancellation message, then it will see the - // receiver as closed. - self.response.close(); - if self.cancel { - self.cancellation.cancel(self.request_id); - } - } -} - -/// Returns a channel and dispatcher that manages the lifecycle of requests initiated by the -/// channel. -pub fn new( - config: Config, - transport: C, -) -> NewClient, RequestDispatch> -where - C: Transport, Response>, -{ - let (to_dispatch, pending_requests) = mpsc::channel(config.pending_request_buffer); - let (cancellation, canceled_requests) = cancellations(); - - NewClient { - client: Channel { - to_dispatch, - cancellation, - next_request_id: Arc::new(AtomicUsize::new(0)), - }, - dispatch: RequestDispatch { - config, - canceled_requests, - transport: transport.fuse(), - in_flight_requests: InFlightRequests::default(), - pending_requests, - }, - } -} - -/// Handles the lifecycle of requests, writing requests to the wire, managing cancellations, -/// and dispatching responses to the appropriate channel. -#[must_use] -#[pin_project] -#[derive(Debug)] -pub struct RequestDispatch { - /// Writes requests to the wire and reads responses off the wire. - #[pin] - transport: Fuse, - /// Requests waiting to be written to the wire. - pending_requests: mpsc::Receiver>, - /// Requests that were dropped. - canceled_requests: CanceledRequests, - /// Requests already written to the wire that haven't yet received responses. - in_flight_requests: InFlightRequests, - /// Configures limits to prevent unlimited resource usage. - config: Config, -} - -/// Critical errors that result in a Channel disconnecting. -#[derive(thiserror::Error, Debug)] -pub enum ChannelError -where - E: Error + Send + Sync + 'static, -{ - /// Could not read from the transport. - #[error("could not read from the transport")] - Read(#[source] E), - /// Could not ready the transport for writes. - #[error("could not ready the transport for writes")] - Ready(#[source] E), - /// Could not write to the transport. - #[error("could not write to the transport")] - Write(#[source] E), - /// Could not flush the transport. - #[error("could not flush the transport")] - Flush(#[source] E), - /// Could not close the write end of the transport. - #[error("could not close the write end of the transport")] - Close(#[source] E), - /// Could not poll expired requests. - #[error("could not poll expired requests")] - Timer(#[source] tokio::time::error::Error), -} - -impl RequestDispatch -where - C: Transport, Response>, -{ - fn in_flight_requests<'a>(self: &'a mut Pin<&mut Self>) -> &'a mut InFlightRequests { - self.as_mut().project().in_flight_requests - } - - fn transport_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut Fuse> { - self.as_mut().project().transport - } - - fn poll_ready<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.transport_pin_mut() - .poll_ready(cx) - .map_err(ChannelError::Ready) - } - - fn start_send( - self: &mut Pin<&mut Self>, - message: ClientMessage, - ) -> Result<(), ChannelError> { - self.transport_pin_mut() - .start_send(message) - .map_err(ChannelError::Write) - } - - fn poll_flush<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.transport_pin_mut() - .poll_flush(cx) - .map_err(ChannelError::Flush) - } - - fn poll_close<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.transport_pin_mut() - .poll_close(cx) - .map_err(ChannelError::Close) - } - - fn canceled_requests_mut<'a>(self: &'a mut Pin<&mut Self>) -> &'a mut CanceledRequests { - self.as_mut().project().canceled_requests - } - - fn pending_requests_mut<'a>( - self: &'a mut Pin<&mut Self>, - ) -> &'a mut mpsc::Receiver> { - self.as_mut().project().pending_requests - } - - fn pump_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - self.transport_pin_mut() - .poll_next(cx) - .map_err(ChannelError::Read) - .map_ok(|response| { - self.complete(response); - }) - } - - fn pump_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - enum ReceiverStatus { - Pending, - Closed, - } - - let pending_requests_status = match self.as_mut().poll_write_request(cx)? { - Poll::Ready(Some(())) => return Poll::Ready(Some(Ok(()))), - Poll::Ready(None) => ReceiverStatus::Closed, - Poll::Pending => ReceiverStatus::Pending, - }; - - let canceled_requests_status = match self.as_mut().poll_write_cancel(cx)? { - Poll::Ready(Some(())) => return Poll::Ready(Some(Ok(()))), - Poll::Ready(None) => ReceiverStatus::Closed, - Poll::Pending => ReceiverStatus::Pending, - }; - - // Receiving Poll::Ready(None) when polling expired requests never indicates "Closed", - // because there can temporarily be zero in-flight rquests. Therefore, there is no need to - // track the status like is done with pending and cancelled requests. - if let Poll::Ready(Some(_)) = self.in_flight_requests().poll_expired(cx) { - // Expired requests are considered complete; there is no compelling reason to send a - // cancellation message to the server, since it will have already exhausted its - // allotted processing time. - return Poll::Ready(Some(Ok(()))); - } - - match (pending_requests_status, canceled_requests_status) { - (ReceiverStatus::Closed, ReceiverStatus::Closed) => { - ready!(self.poll_close(cx)?); - Poll::Ready(None) - } - (ReceiverStatus::Pending, _) | (_, ReceiverStatus::Pending) => { - // No more messages to process, so flush any messages buffered in the transport. - ready!(self.poll_flush(cx)?); - - // Even if we fully-flush, we return Pending, because we have no more requests - // or cancellations right now. - Poll::Pending - } - } - } - - /// Yields the next pending request, if one is ready to be sent. - /// - /// Note that a request will only be yielded if the transport is *ready* to be written to (i.e. - /// start_send would succeed). - fn poll_next_request( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, ChannelError>>> { - if self.in_flight_requests().len() >= self.config.max_in_flight_requests { - tracing::info!( - "At in-flight request capacity ({}/{}).", - self.in_flight_requests().len(), - self.config.max_in_flight_requests - ); - - // No need to schedule a wakeup, because timers and responses are responsible - // for clearing out in-flight requests. - return Poll::Pending; - } - - ready!(self.ensure_writeable(cx)?); - - loop { - match ready!(self.pending_requests_mut().poll_recv(cx)) { - Some(request) => { - if request.response_completion.is_closed() { - let _entered = request.span.enter(); - tracing::info!("AbortRequest"); - continue; - } - - return Poll::Ready(Some(Ok(request))); - } - None => return Poll::Ready(None), - } - } - } - - /// Yields the next pending cancellation, and, if one is ready, cancels the associated request. - /// - /// Note that a request to cancel will only be yielded if the transport is *ready* to be - /// written to (i.e. start_send would succeed). - fn poll_next_cancellation( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - ready!(self.ensure_writeable(cx)?); - - loop { - match ready!(self.canceled_requests_mut().poll_next_unpin(cx)) { - Some(request_id) => { - if let Some((ctx, span)) = self.in_flight_requests().cancel_request(request_id) - { - return Poll::Ready(Some(Ok((ctx, span, request_id)))); - } - } - None => return Poll::Ready(None), - } - } - } - - /// Returns Ready if writing a message to the transport (i.e. via write_request or - /// write_cancel) would not fail due to a full buffer. If the transport is not ready to be - /// written to, flushes it until it is ready. - fn ensure_writeable<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - while self.poll_ready(cx)?.is_pending() { - ready!(self.poll_flush(cx)?); - } - Poll::Ready(Some(Ok(()))) - } - - fn poll_write_request<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - let DispatchRequest { - ctx, - span, - request_id, - request, - response_completion, - } = match ready!(self.as_mut().poll_next_request(cx)?) { - Some(dispatch_request) => dispatch_request, - None => return Poll::Ready(None), - }; - let entered = span.enter(); - // poll_next_request only returns Ready if there is room to buffer another request. - // Therefore, we can call write_request without fear of erroring due to a full - // buffer. - let request = ClientMessage::Request(Request { - id: request_id, - message: request, - context: context::Context { - deadline: ctx.deadline, - discard_response: false, - trace_context: ctx.trace_context, - }, - }); - self.start_send(request)?; - tracing::info!("SendRequest"); - drop(entered); - - self.in_flight_requests() - .insert_request(request_id, ctx, span, response_completion) - .expect("Request IDs should be unique"); - Poll::Ready(Some(Ok(()))) - } - - fn poll_write_cancel<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - let (context, span, request_id) = match ready!(self.as_mut().poll_next_cancellation(cx)?) { - Some(triple) => triple, - None => return Poll::Ready(None), - }; - let _entered = span.enter(); - - let cancel = ClientMessage::Cancel { - trace_context: context.trace_context, - request_id, - }; - self.start_send(cancel)?; - tracing::info!("CancelRequest"); - Poll::Ready(Some(Ok(()))) - } - - /// Sends a server response to the client task that initiated the associated request. - fn complete(mut self: Pin<&mut Self>, response: Response) -> bool { - self.in_flight_requests().complete_request(response) - } -} - -impl Future for RequestDispatch -where - C: Transport, Response>, -{ - type Output = Result<(), ChannelError>; - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - loop { - match (self.as_mut().pump_read(cx)?, self.as_mut().pump_write(cx)?) { - (Poll::Ready(None), _) => { - tracing::info!("Shutdown: read half closed, so shutting down."); - return Poll::Ready(Ok(())); - } - (read, Poll::Ready(None)) => { - if self.in_flight_requests.is_empty() { - tracing::info!("Shutdown: write half closed, and no requests in flight."); - return Poll::Ready(Ok(())); - } - tracing::info!( - "Shutdown: write half closed, and {} requests in flight.", - self.in_flight_requests().len() - ); - match read { - Poll::Ready(Some(())) => continue, - _ => return Poll::Pending, - } - } - (Poll::Ready(Some(())), _) | (_, Poll::Ready(Some(()))) => {} - _ => return Poll::Pending, - } - } - } -} - -/// A server-bound request sent from a [`Channel`] to request dispatch, which will then manage -/// the lifecycle of the request. -#[derive(Debug)] -struct DispatchRequest { - pub ctx: context::Context, - pub span: Span, - pub request_id: u64, - pub request: Req, - pub response_completion: oneshot::Sender, DeadlineExceededError>>, -} - -#[cfg(test)] -mod tests { - use super::{cancellations, Channel, DispatchRequest, RequestDispatch, ResponseGuard}; - use crate::{ - client::{ - in_flight_requests::{DeadlineExceededError, InFlightRequests}, - Config, - }, - context, - transport::{self, channel::UnboundedChannel}, - ClientMessage, Response, - }; - use assert_matches::assert_matches; - use futures::{prelude::*, task::*}; - use std::{ - convert::TryFrom, - pin::Pin, - sync::atomic::{AtomicUsize, Ordering}, - sync::Arc, - }; - use tokio::sync::{mpsc, oneshot}; - use tracing::Span; - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn response_completes_request_future() { - let (mut dispatch, mut _channel, mut server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - dispatch - .in_flight_requests - .insert_request(0, context::current(), Span::current(), tx) - .unwrap(); - server_channel - .send(Response { - request_id: 0, - message: Ok("Resp".into()), - }) - .await - .unwrap(); - assert_matches!(dispatch.as_mut().poll(cx), Poll::Pending); - assert_matches!(rx.try_recv(), Ok(Ok(Response { request_id: 0, message: Ok(resp) })) if resp == "Resp"); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn dispatch_response_cancels_on_drop() { - let (cancellation, mut canceled_requests) = cancellations(); - let (_, mut response) = oneshot::channel(); - drop(ResponseGuard:: { - response: &mut response, - cancellation: &cancellation, - request_id: 3, - cancel: true, - }); - // resp's drop() is run, which should send a cancel message. - let cx = &mut Context::from_waker(noop_waker_ref()); - assert_eq!(canceled_requests.poll_recv(cx), Poll::Ready(Some(3))); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn dispatch_response_doesnt_cancel_after_complete() { - let (cancellation, mut canceled_requests) = cancellations(); - let (tx, mut response) = oneshot::channel(); - tx.send(Ok(Response { - request_id: 0, - message: Ok("well done"), - })) - .unwrap(); - // resp's drop() is run, but should not send a cancel message. - ResponseGuard { - response: &mut response, - cancellation: &cancellation, - request_id: 3, - cancel: true, - } - .response() - .await - .unwrap(); - drop(cancellation); - let cx = &mut Context::from_waker(noop_waker_ref()); - assert_eq!(canceled_requests.poll_recv(cx), Poll::Ready(None)); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn stage_request() { - let (mut dispatch, mut channel, _server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - let _resp = send_request(&mut channel, "hi", tx, &mut rx).await; - - #[allow(unstable_name_collisions)] - let req = dispatch.as_mut().poll_next_request(cx).ready(); - assert!(req.is_some()); - - let req = req.unwrap(); - assert_eq!(req.request_id, 0); - assert_eq!(req.request, "hi".to_string()); - } - - // Regression test for https://github.com/google/tarpc/issues/220 - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn stage_request_channel_dropped_doesnt_panic() { - let (mut dispatch, mut channel, mut server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - let _ = send_request(&mut channel, "hi", tx, &mut rx).await; - drop(channel); - - assert!(dispatch.as_mut().poll(cx).is_ready()); - send_response( - &mut server_channel, - Response { - request_id: 0, - message: Ok("hello".into()), - }, - ) - .await; - dispatch.await.unwrap(); - } - - #[allow(unstable_name_collisions)] - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn stage_request_response_future_dropped_is_canceled_before_sending() { - let (mut dispatch, mut channel, _server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - let _ = send_request(&mut channel, "hi", tx, &mut rx).await; - - // Drop the channel so polling returns none if no requests are currently ready. - drop(channel); - // Test that a request future dropped before it's processed by dispatch will cause the request - // to not be added to the in-flight request map. - assert!(dispatch.as_mut().poll_next_request(cx).ready().is_none()); - } - - #[allow(unstable_name_collisions)] - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn stage_request_response_future_dropped_is_canceled_after_sending() { - let (mut dispatch, mut channel, _server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - let req = send_request(&mut channel, "hi", tx, &mut rx).await; - - assert!(dispatch.as_mut().pump_write(cx).ready().is_some()); - assert!(!dispatch.in_flight_requests.is_empty()); - - // Test that a request future dropped after it's processed by dispatch will cause the request - // to be removed from the in-flight request map. - drop(req); - assert_matches!( - dispatch.as_mut().poll_next_cancellation(cx), - Poll::Ready(Some(Ok(_))) - ); - assert!(dispatch.in_flight_requests.is_empty()); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn stage_request_response_closed_skipped() { - let (mut dispatch, mut channel, _server_channel) = set_up(); - let cx = &mut Context::from_waker(noop_waker_ref()); - let (tx, mut rx) = oneshot::channel(); - - // Test that a request future that's closed its receiver but not yet canceled its request -- - // i.e. still in `drop fn` -- will cause the request to not be added to the in-flight request - // map. - let resp = send_request(&mut channel, "hi", tx, &mut rx).await; - resp.response.close(); - - assert!(dispatch.as_mut().poll_next_request(cx).is_pending()); - } - - fn set_up() -> ( - Pin< - Box< - RequestDispatch< - String, - String, - UnboundedChannel, ClientMessage>, - >, - >, - >, - Channel, - UnboundedChannel, Response>, - ) { - let _ = tracing_subscriber::fmt().with_test_writer().try_init(); - - let (to_dispatch, pending_requests) = mpsc::channel(1); - let (cancellation, canceled_requests) = cancellations(); - let (client_channel, server_channel) = transport::channel::unbounded(); - - let dispatch = RequestDispatch:: { - transport: client_channel.fuse(), - pending_requests, - canceled_requests, - in_flight_requests: InFlightRequests::default(), - config: Config::default(), - }; - - let channel = Channel { - to_dispatch, - cancellation, - next_request_id: Arc::new(AtomicUsize::new(0)), - }; - - (Box::pin(dispatch), channel, server_channel) - } - - async fn send_request<'a>( - channel: &'a mut Channel, - request: &str, - response_completion: oneshot::Sender, DeadlineExceededError>>, - response: &'a mut oneshot::Receiver, DeadlineExceededError>>, - ) -> ResponseGuard<'a, String> { - let request_id = - u64::try_from(channel.next_request_id.fetch_add(1, Ordering::Relaxed)).unwrap(); - let request = DispatchRequest { - ctx: context::current(), - span: Span::current(), - request_id, - request: request.to_string(), - response_completion, - }; - let response_guard = ResponseGuard { - response, - cancellation: &channel.cancellation, - request_id, - cancel: true, - }; - channel.to_dispatch.send(request).await.unwrap(); - response_guard - } - - async fn send_response( - channel: &mut UnboundedChannel, Response>, - response: Response, - ) { - channel.send(response).await.unwrap(); - } - - trait PollTest { - type T; - #[allow(dead_code)] - fn unwrap(self) -> Poll; - fn ready(self) -> Self::T; - } - - impl PollTest for Poll>> - where - E: ::std::fmt::Display, - { - type T = Option; - - fn unwrap(self) -> Poll> { - match self { - Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(t)), - Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(Err(e))) => panic!("{}", e.to_string()), - Poll::Pending => Poll::Pending, - } - } - - fn ready(self) -> Option { - match self { - Poll::Ready(Some(Ok(t))) => Some(t), - Poll::Ready(None) => None, - Poll::Ready(Some(Err(e))) => panic!("{}", e.to_string()), - Poll::Pending => panic!("Pending"), - } - } - } -} diff --git a/datadog-ipc/tarpc/src/client/in_flight_requests.rs b/datadog-ipc/tarpc/src/client/in_flight_requests.rs deleted file mode 100644 index a7e5fb53b2..0000000000 --- a/datadog-ipc/tarpc/src/client/in_flight_requests.rs +++ /dev/null @@ -1,134 +0,0 @@ -use crate::{ - context, - util::{Compact, TimeUntil}, - Response, -}; -use fnv::FnvHashMap; -use std::{ - collections::hash_map, - task::{Context, Poll}, -}; -use tokio::sync::oneshot; -use tokio_util::time::delay_queue::{self, DelayQueue}; -use tracing::Span; - -/// Requests already written to the wire that haven't yet received responses. -#[derive(Debug)] -pub struct InFlightRequests { - request_data: FnvHashMap>, - deadlines: DelayQueue, -} - -impl Default for InFlightRequests { - fn default() -> Self { - Self { - request_data: Default::default(), - deadlines: Default::default(), - } - } -} - -/// The request exceeded its deadline. -#[derive(thiserror::Error, Debug)] -#[non_exhaustive] -#[error("the request exceeded its deadline")] -pub struct DeadlineExceededError; - -#[derive(Debug)] -struct RequestData { - ctx: context::Context, - span: Span, - response_completion: oneshot::Sender, DeadlineExceededError>>, - /// The key to remove the timer for the request's deadline. - deadline_key: delay_queue::Key, -} - -/// An error returned when an attempt is made to insert a request with an ID that is already in -/// use. -#[derive(Debug)] -pub struct AlreadyExistsError; - -impl InFlightRequests { - /// Returns the number of in-flight requests. - pub fn len(&self) -> usize { - self.request_data.len() - } - - /// Returns true iff there are no requests in flight. - pub fn is_empty(&self) -> bool { - self.request_data.is_empty() - } - - /// Starts a request, unless a request with the same ID is already in flight. - pub fn insert_request( - &mut self, - request_id: u64, - ctx: context::Context, - span: Span, - response_completion: oneshot::Sender, DeadlineExceededError>>, - ) -> Result<(), AlreadyExistsError> { - match self.request_data.entry(request_id) { - hash_map::Entry::Vacant(vacant) => { - let timeout = ctx.deadline.time_until(); - let deadline_key = self.deadlines.insert(request_id, timeout); - vacant.insert(RequestData { - ctx, - span, - response_completion, - deadline_key, - }); - Ok(()) - } - hash_map::Entry::Occupied(_) => Err(AlreadyExistsError), - } - } - - /// Removes a request without aborting. Returns true iff the request was found. - pub fn complete_request(&mut self, response: Response) -> bool { - if let Some(request_data) = self.request_data.remove(&response.request_id) { - let _entered = request_data.span.enter(); - tracing::info!("ReceiveResponse"); - self.request_data.compact(0.1); - self.deadlines.remove(&request_data.deadline_key); - let _ = request_data.response_completion.send(Ok(response)); - return true; - } - - tracing::debug!( - "No in-flight request found for request_id = {}.", - response.request_id - ); - - // If the response completion was absent, then the request was already canceled. - false - } - - /// Cancels a request without completing (typically used when a request handle was dropped - /// before the request completed). - pub fn cancel_request(&mut self, request_id: u64) -> Option<(context::Context, Span)> { - if let Some(request_data) = self.request_data.remove(&request_id) { - self.request_data.compact(0.1); - self.deadlines.remove(&request_data.deadline_key); - Some((request_data.ctx, request_data.span)) - } else { - None - } - } - - /// Yields a request that has expired, completing it with a TimedOut error. - /// The caller should send cancellation messages for any yielded request ID. - pub fn poll_expired(&mut self, cx: &mut Context) -> Poll> { - self.deadlines.poll_expired(cx).map(|expired| { - let request_id = expired?.into_inner(); - if let Some(request_data) = self.request_data.remove(&request_id) { - let _entered = request_data.span.enter(); - tracing::error!("DeadlineExceeded"); - self.request_data.compact(0.1); - let _ = request_data - .response_completion - .send(Err(DeadlineExceededError)); - } - Some(request_id) - }) - } -} diff --git a/datadog-ipc/tarpc/src/context.rs b/datadog-ipc/tarpc/src/context.rs deleted file mode 100644 index 3ffd34f2c6..0000000000 --- a/datadog-ipc/tarpc/src/context.rs +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! Provides a request context that carries a deadline and trace context. This context is sent from -//! client to server and is used by the server to enforce response deadlines. - -use crate::trace::{self, TraceId}; -#[cfg(feature = "opentelemetry")] -use opentelemetry::trace::TraceContextExt; -use static_assertions::assert_impl_all; -use std::time::{Duration, SystemTime}; -#[cfg(feature = "opentelemetry")] -use tracing_opentelemetry::OpenTelemetrySpanExt; - -/// A request context that carries request-scoped information like deadlines and trace information. -/// It is sent from client to server and is used by the server to enforce response deadlines. -/// -/// The context should not be stored directly in a server implementation, because the context will -/// be different for each request in scope. -#[derive(Clone, Copy, Debug)] -#[non_exhaustive] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct Context { - /// When the client expects the request to be complete by. The server should cancel the request - /// if it is not complete by this time. - #[cfg_attr(feature = "serde1", serde(default = "ten_seconds_from_now"))] - // Serialized as a Duration to prevent clock skew issues. - #[cfg_attr(feature = "serde1", serde(with = "absolute_to_relative_time"))] - pub deadline: SystemTime, - - /// A client might not care about response at all, in this case server must discard reponse object - pub discard_response: bool, - /// Uniquely identifies requests originating from the same source. - /// When a service handles a request by making requests itself, those requests should - /// include the same `trace_id` as that included on the original request. This way, - /// users can trace related actions across a distributed system. - pub trace_context: trace::Context, -} - -#[cfg(feature = "serde1")] -mod absolute_to_relative_time { - pub use serde::{Deserialize, Deserializer, Serialize, Serializer}; - pub use std::time::{Duration, SystemTime}; - - pub fn serialize(deadline: &SystemTime, serializer: S) -> Result - where - S: Serializer, - { - let deadline = deadline - .duration_since(SystemTime::now()) - .unwrap_or(Duration::ZERO); - deadline.serialize(serializer) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let deadline = Duration::deserialize(deserializer)?; - Ok(SystemTime::now() + deadline) - } - - #[cfg(test)] - #[derive(serde::Serialize, serde::Deserialize)] - struct AbsoluteToRelative(#[serde(with = "self")] SystemTime); - - #[test] - fn test_serialize() { - let now = SystemTime::now(); - let deadline = now + Duration::from_secs(10); - let serialized_deadline = bincode::serialize(&AbsoluteToRelative(deadline)).unwrap(); - let deserialized_deadline: Duration = bincode::deserialize(&serialized_deadline).unwrap(); - // TODO: how to avoid flakiness? - assert!(deserialized_deadline > Duration::from_secs(9)); - } - - #[test] - fn test_deserialize() { - let deadline = Duration::from_secs(10); - let serialized_deadline = bincode::serialize(&deadline).unwrap(); - let AbsoluteToRelative(deserialized_deadline) = - bincode::deserialize(&serialized_deadline).unwrap(); - // TODO: how to avoid flakiness? - assert!(deserialized_deadline > SystemTime::now() + Duration::from_secs(9)); - } -} - -assert_impl_all!(Context: Send, Sync); - -fn ten_seconds_from_now() -> SystemTime { - SystemTime::now() + Duration::from_secs(10) -} - -/// Returns the context for the current request, or a default Context if no request is active. -pub fn current() -> Context { - Context::current() -} - -#[derive(Clone)] -struct Deadline(SystemTime); - -impl Default for Deadline { - fn default() -> Self { - Self(ten_seconds_from_now()) - } -} - -impl Context { - /// Returns the context for the current request, or a default Context if no request is active. - #[cfg(feature = "opentelemetry")] - pub fn current() -> Self { - let span = tracing::Span::current(); - Self { - trace_context: trace::Context::try_from(&span) - .unwrap_or_else(|_| trace::Context::default()), - discard_response: false, - deadline: span - .context() - .get::() - .cloned() - .unwrap_or_default() - .0, - } - } - /// Returns the context for the current request, or a default Context if no request is active. - #[cfg(not(feature = "opentelemetry"))] - pub fn current() -> Self { - let span = tracing::Span::current(); - Self { - trace_context: trace::Context::try_from(&span) - .unwrap_or_else(|_| trace::Context::default()), - discard_response: false, - deadline: Deadline::default().0, - } - } - - /// Returns the ID of the request-scoped trace. - pub fn trace_id(&self) -> &TraceId { - &self.trace_context.trace_id - } -} - -/// An extension trait for [`tracing::Span`] for propagating tarpc Contexts. -#[allow(dead_code)] -pub(crate) trait SpanExt { - /// Sets the given context on this span. Newly-created spans will be children of the given - /// context's trace context. - fn set_context(&self, context: &Context); -} - -#[cfg(feature = "opentelemetry")] -impl SpanExt for tracing::Span { - fn set_context(&self, context: &Context) { - self.set_parent( - opentelemetry::Context::new() - .with_remote_span_context(opentelemetry::trace::SpanContext::new( - opentelemetry::trace::TraceId::from(context.trace_context.trace_id), - opentelemetry::trace::SpanId::from(context.trace_context.span_id), - opentelemetry::trace::TraceFlags::from(context.trace_context.sampling_decision), - true, - opentelemetry::trace::TraceState::default(), - )) - .with_value(Deadline(context.deadline)), - ); - } -} diff --git a/datadog-ipc/tarpc/src/lib.rs b/datadog-ipc/tarpc/src/lib.rs deleted file mode 100644 index 131d1205ae..0000000000 --- a/datadog-ipc/tarpc/src/lib.rs +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. -//! *Disclaimer*: This is not an official Google product. -//! -//! tarpc is an RPC framework for rust with a focus on ease of use. Defining a -//! service can be done in just a few lines of code, and most of the boilerplate of -//! writing a server is taken care of for you. -//! -//! [Documentation](https://docs.rs/crate/tarpc/) -//! -//! ## What is an RPC framework? -//! "RPC" stands for "Remote Procedure Call," a function call where the work of -//! producing the return value is being done somewhere else. When an rpc function is -//! invoked, behind the scenes the function contacts some other process somewhere -//! and asks them to evaluate the function instead. The original function then -//! returns the value produced by the other process. -//! -//! RPC frameworks are a fundamental building block of most microservices-oriented -//! architectures. Two well-known ones are [gRPC](http://www.grpc.io) and -//! [Cap'n Proto](https://capnproto.org/). -//! -//! tarpc differentiates itself from other RPC frameworks by defining the schema in code, -//! rather than in a separate language such as .proto. This means there's no separate compilation -//! process, and no context switching between different languages. -//! -//! Some other features of tarpc: -//! - Pluggable transport: any type implementing `Stream + Sink` can be -//! used as a transport to connect the client and server. -//! - `Send + 'static` optional: if the transport doesn't require it, neither does tarpc! -//! - Cascading cancellation: dropping a request will send a cancellation message to the server. -//! The server will cease any unfinished work on the request, subsequently cancelling any of its -//! own requests, repeating for the entire chain of transitive dependencies. -//! - Configurable deadlines and deadline propagation: request deadlines default to 10s if -//! unspecified. The server will automatically cease work when the deadline has passed. Any -//! requests sent by the server that use the request context will propagate the request deadline. -//! For example, if a server is handling a request with a 10s deadline, does 2s of work, then -//! sends a request to another server, that server will see an 8s deadline. -//! - Distributed tracing: tarpc is instrumented with -//! [tracing](https://github.com/tokio-rs/tracing) primitives extended with -//! [OpenTelemetry](https://opentelemetry.io/) traces. Using a compatible tracing subscriber like -//! [Jaeger](https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger), -//! each RPC can be traced through the client, server, and other dependencies downstream of the -//! server. Even for applications not connected to a distributed tracing collector, the -//! instrumentation can also be ingested by regular loggers like -//! [env_logger](https://github.com/env-logger-rs/env_logger/). -//! - Serde serialization: enabling the `serde1` Cargo feature will make service requests and -//! responses `Serialize + Deserialize`. It's entirely optional, though: in-memory transports can -//! be used, as well, so the price of serialization doesn't have to be paid when it's not needed. -//! -//! ## Usage -//! Add to your `Cargo.toml` dependencies: -//! -//! ```toml -//! tarpc = "0.29" -//! ``` -//! -//! The `tarpc::service` attribute expands to a collection of items that form an rpc service. -//! These generated types make it easy and ergonomic to write servers with less boilerplate. -//! Simply implement the generated service trait, and you're off to the races! -//! -//! ## Example -//! -//! This example uses [tokio](https://tokio.rs), so add the following dependencies to -//! your `Cargo.toml`: -//! -//! ```toml -//! anyhow = "1.0" -//! futures = "0.3" -//! tarpc = { version = "0.29", features = ["tokio1"] } -//! tokio = { version = "1.0", features = ["macros"] } -//! ``` -//! -//! In the following example, we use an in-process channel for communication between -//! client and server. In real code, you will likely communicate over the network. -//! For a more real-world example, see [example-service](example-service). -//! -//! First, let's set up the dependencies and service definition. -//! -//! ```rust -//! # extern crate futures; -//! -//! use futures::{ -//! future::{self, Ready}, -//! prelude::*, -//! }; -//! use tarpc::{ -//! client, context, -//! server::{self, incoming::Incoming, Channel}, -//! }; -//! -//! // This is the service definition. It looks a lot like a trait definition. -//! // It defines one RPC, hello, which takes one arg, name, and returns a String. -//! #[tarpc::service] -//! trait World { -//! /// Returns a greeting for name. -//! async fn hello(name: String) -> String; -//! } -//! ``` -//! -//! This service definition generates a trait called `World`. Next we need to -//! implement it for our Server struct. -//! -//! ```rust -//! # extern crate futures; -//! # use futures::{ -//! # future::{self, Ready}, -//! # prelude::*, -//! # }; -//! # use tarpc::{ -//! # client, context, -//! # server::{self, incoming::Incoming}, -//! # }; -//! # // This is the service definition. It looks a lot like a trait definition. -//! # // It defines one RPC, hello, which takes one arg, name, and returns a String. -//! # #[tarpc::service] -//! # trait World { -//! # /// Returns a greeting for name. -//! # async fn hello(name: String) -> String; -//! # } -//! // This is the type that implements the generated World trait. It is the business logic -//! // and is used to start the server. -//! #[derive(Clone)] -//! struct HelloServer; -//! -//! impl World for HelloServer { -//! // Each defined rpc generates two items in the trait, a fn that serves the RPC, and -//! // an associated type representing the future output by the fn. -//! -//! type HelloFut = Ready; -//! -//! fn hello(self, _: context::Context, name: String) -> Self::HelloFut { -//! future::ready(format!("Hello, {name}!")) -//! } -//! } -//! ``` -//! -//! Lastly let's write our `main` that will start the server. While this example uses an -//! [in-process channel](transport::channel), tarpc also ships a generic [`serde_transport`] -//! behind the `serde-transport` feature, with additional [TCP](serde_transport::tcp) functionality -//! available behind the `tcp` feature. -//! -//! ```rust -//! # extern crate futures; -//! # use futures::{ -//! # future::{self, Ready}, -//! # prelude::*, -//! # }; -//! # use tarpc::{ -//! # client, context, -//! # server::{self, Channel}, -//! # }; -//! # // This is the service definition. It looks a lot like a trait definition. -//! # // It defines one RPC, hello, which takes one arg, name, and returns a String. -//! # #[tarpc::service] -//! # trait World { -//! # /// Returns a greeting for name. -//! # async fn hello(name: String) -> String; -//! # } -//! # // This is the type that implements the generated World trait. It is the business logic -//! # // and is used to start the server. -//! # #[derive(Clone)] -//! # struct HelloServer; -//! # impl World for HelloServer { -//! # // Each defined rpc generates two items in the trait, a fn that serves the RPC, and -//! # // an associated type representing the future output by the fn. -//! # type HelloFut = Ready; -//! # fn hello(self, _: context::Context, name: String) -> Self::HelloFut { -//! # future::ready(format!("Hello, {name}!")) -//! # } -//! # } -//! # #[cfg(any(not(feature = "tokio1"), miri))] -//! # fn main() {} -//! # #[cfg(all(feature = "tokio1", not(miri)))] -//! #[tokio::main] -//! async fn main() -> anyhow::Result<()> { -//! let (client_transport, server_transport) = tarpc::transport::channel::unbounded(); -//! -//! let server = server::BaseChannel::with_defaults(server_transport); -//! tokio::spawn(server.execute(HelloServer.serve())); -//! -//! // WorldClient is generated by the #[tarpc::service] attribute. It has a constructor `new` -//! // that takes a config and any Transport as input. -//! let mut client = WorldClient::new(client::Config::default(), client_transport).spawn(); -//! -//! // The client has an RPC method for each RPC defined in the annotated trait. It takes the same -//! // args as defined, with the addition of a Context, which is always the first arg. The Context -//! // specifies a deadline and trace information which can be helpful in debugging requests. -//! let hello = client.hello(context::current(), "Stim".to_string()).await?; -//! -//! println!("{hello}"); -//! -//! Ok(()) -//! } -//! ``` -//! -//! ## Service Documentation -//! -//! Use `cargo doc` as you normally would to see the documentation created for all -//! items expanded by a `service!` invocation. -#![deny(missing_docs)] -#![allow(clippy::type_complexity)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(feature = "serde1")] -#[doc(hidden)] -pub use serde; - -#[cfg(feature = "serde-transport")] -pub use {tokio_serde, tokio_util}; - -#[cfg(feature = "serde-transport")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde-transport")))] -pub mod serde_transport; - -pub mod trace; - -#[cfg(feature = "serde1")] -pub use tarpc_plugins::derive_serde; - -/// The main macro that creates RPC services. -/// -/// Rpc methods are specified, mirroring trait syntax: -/// -/// ``` -/// #[tarpc::service] -/// trait Service { -/// /// Say hello -/// async fn hello(name: String) -> String; -/// } -/// ``` -/// -/// Attributes can be attached to each rpc. These attributes -/// will then be attached to the generated service traits' -/// corresponding `fn`s, as well as to the client stubs' RPCs. -/// -/// The following items are expanded in the enclosing module: -/// -/// * `trait Service` -- defines the RPC service. -/// * `fn serve` -- turns a service impl into a request handler. -/// * `Client` -- a client stub with a fn for each RPC. -/// * `fn new_stub` -- creates a new Client stub. -pub use tarpc_plugins::service; - -/// A utility macro that can be used for RPC server implementations. -/// -/// Syntactic sugar to make using async functions in the server implementation -/// easier. It does this by rewriting code like this, which would normally not -/// compile because async functions are disallowed in trait implementations: -/// -/// ```rust -/// # use tarpc::context; -/// # use std::net::SocketAddr; -/// #[tarpc::service] -/// trait World { -/// async fn hello(name: String) -> String; -/// } -/// -/// #[derive(Clone)] -/// struct HelloServer(SocketAddr); -/// -/// #[tarpc::server] -/// impl World for HelloServer { -/// async fn hello(self, _: context::Context, name: String) -> String { -/// format!("Hello, {name}! You are connected from {:?}.", self.0) -/// } -/// } -/// ``` -/// -/// Into code like this, which matches the service trait definition: -/// -/// ```rust -/// # use tarpc::context; -/// # use std::pin::Pin; -/// # use futures::Future; -/// # use std::net::SocketAddr; -/// #[derive(Clone)] -/// struct HelloServer(SocketAddr); -/// -/// #[tarpc::service] -/// trait World { -/// async fn hello(name: String) -> String; -/// } -/// -/// impl World for HelloServer { -/// type HelloFut = Pin + Send>>; -/// -/// fn hello(self, _: context::Context, name: String) -> Pin -/// + Send>> { -/// Box::pin(async move { -/// format!("Hello, {name}! You are connected from {:?}.", self.0) -/// }) -/// } -/// } -/// ``` -/// -/// Note that this won't touch functions unless they have been annotated with -/// `async`, meaning that this should not break existing code. -pub use tarpc_plugins::server; - -pub(crate) mod cancellations; -pub mod client; -pub mod context; -pub mod server; -pub mod transport; -pub(crate) mod util; - -pub use crate::transport::sealed::Transport; - -use anyhow::Context as _; -use futures::task::*; -use std::{error::Error, fmt::Display, io, time::SystemTime}; - -/// A message from a client to a server. -#[derive(Debug)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -#[non_exhaustive] -pub enum ClientMessage { - /// A request initiated by a user. The server responds to a request by invoking a - /// service-provided request handler. The handler completes with a [`response`](Response), which - /// the server sends back to the client. - Request(Request), - /// A command to cancel an in-flight request, automatically sent by the client when a response - /// future is dropped. - /// - /// When received, the server will immediately cancel the main task (top-level future) of the - /// request handler for the associated request. Any tasks spawned by the request handler will - /// not be canceled, because the framework layer does not - /// know about them. - Cancel { - /// The trace context associates the message with a specific chain of causally-related actions, - /// possibly orchestrated across many distributed systems. - #[cfg_attr(feature = "serde1", serde(default))] - trace_context: trace::Context, - /// The ID of the request to cancel. - request_id: u64, - }, -} - -/// A request from a client to a server. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct Request { - /// Trace context, deadline, and other cross-cutting concerns. - pub context: context::Context, - /// Uniquely identifies the request across all requests sent over a single channel. - pub id: u64, - /// The request body. - pub message: T, -} - -/// A response from a server to a client. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -#[non_exhaustive] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct Response { - /// The ID of the request being responded to. - pub request_id: u64, - /// The response body, or an error if the request failed. - pub message: Result, -} - -/// An error indicating the server aborted the request early, e.g., due to request throttling. -#[derive(thiserror::Error, Clone, Debug, PartialEq, Eq, Hash)] -#[error("{kind:?}: {detail}")] -#[non_exhaustive] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct ServerError { - #[cfg_attr( - feature = "serde1", - serde(serialize_with = "util::serde::serialize_io_error_kind_as_u32") - )] - #[cfg_attr( - feature = "serde1", - serde(deserialize_with = "util::serde::deserialize_io_error_kind_from_u32") - )] - /// The type of error that occurred to fail the request. - pub kind: io::ErrorKind, - /// A message describing more detail about the error that occurred. - pub detail: String, -} - -impl Request { - /// Returns the deadline for this request. - pub fn deadline(&self) -> &SystemTime { - &self.context.deadline - } -} - -#[allow(dead_code)] -pub(crate) trait PollContext { - fn context(self, context: C) -> Poll>> - where - C: Display + Send + Sync + 'static; - - fn with_context(self, f: F) -> Poll>> - where - C: Display + Send + Sync + 'static, - F: FnOnce() -> C; -} - -impl PollContext for Poll>> -where - E: Error + Send + Sync + 'static, -{ - fn context(self, context: C) -> Poll>> - where - C: Display + Send + Sync + 'static, - { - self.map(|o| o.map(|r| r.context(context))) - } - - fn with_context(self, f: F) -> Poll>> - where - C: Display + Send + Sync + 'static, - F: FnOnce() -> C, - { - self.map(|o| o.map(|r| r.with_context(f))) - } -} diff --git a/datadog-ipc/tarpc/src/serde_transport.rs b/datadog-ipc/tarpc/src/serde_transport.rs deleted file mode 100644 index 05ee81155e..0000000000 --- a/datadog-ipc/tarpc/src/serde_transport.rs +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2019 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! A generic Serde-based `Transport` that can serialize anything supported by `tokio-serde` via any medium that implements `AsyncRead` and `AsyncWrite`. - -#![deny(missing_docs)] - -use futures::{prelude::*, task::*}; -use pin_project::pin_project; -use serde::{Deserialize, Serialize}; -use std::{error::Error, io, pin::Pin}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_serde::{Framed as SerdeFramed, *}; -use tokio_util::codec::{length_delimited::LengthDelimitedCodec, Framed}; - -/// A transport that serializes to, and deserializes from, a byte stream. -#[pin_project] -pub struct Transport { - #[pin] - inner: SerdeFramed, Item, SinkItem, Codec>, -} - -impl Transport { - /// Returns the inner transport over which messages are sent and received. - pub fn get_ref(&self) -> &S { - self.inner.get_ref().get_ref() - } -} - -impl Stream for Transport -where - S: AsyncWrite + AsyncRead, - Item: for<'a> Deserialize<'a>, - Codec: Deserializer, - CodecError: Into>, - SerdeFramed, Item, SinkItem, Codec>: - Stream>, -{ - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { - self.project() - .inner - .poll_next(cx) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } -} - -impl Sink for Transport -where - S: AsyncWrite, - SinkItem: Serialize, - Codec: Serializer, - CodecError: Into>, - SerdeFramed, Item, SinkItem, Codec>: - Sink, -{ - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_ready(cx) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } - - fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> { - self.project() - .inner - .start_send(item) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_flush(cx) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .poll_close(cx) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } -} - -/// Constructs a new transport from a framed transport and a serialization codec. -pub fn new( - framed_io: Framed, - codec: Codec, -) -> Transport -where - S: AsyncWrite + AsyncRead, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, -{ - Transport { - inner: SerdeFramed::new(framed_io, codec), - } -} - -impl From<(S, Codec)> for Transport -where - S: AsyncWrite + AsyncRead, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, -{ - fn from((io, codec): (S, Codec)) -> Self { - new(Framed::new(io, LengthDelimitedCodec::new()), codec) - } -} - -#[cfg(feature = "tcp")] -#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] -/// TCP support for generic transport using Tokio. -pub mod tcp { - use { - super::*, - futures::ready, - std::{marker::PhantomData, net::SocketAddr}, - tokio::net::{TcpListener, TcpStream, ToSocketAddrs}, - tokio_util::codec::length_delimited, - }; - - impl Transport { - /// Returns the peer address of the underlying TcpStream. - pub fn peer_addr(&self) -> io::Result { - self.inner.get_ref().get_ref().peer_addr() - } - /// Returns the local address of the underlying TcpStream. - pub fn local_addr(&self) -> io::Result { - self.inner.get_ref().get_ref().local_addr() - } - } - - /// A connection Future that also exposes the length-delimited framing config. - #[must_use] - #[pin_project] - pub struct Connect { - #[pin] - inner: T, - codec_fn: CodecFn, - config: length_delimited::Builder, - ghost: PhantomData<(fn(SinkItem), fn() -> Item)>, - } - - impl Future for Connect - where - T: Future>, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - type Output = io::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let io = ready!(self.as_mut().project().inner.poll(cx))?; - Poll::Ready(Ok(new(self.config.new_framed(io), (self.codec_fn)()))) - } - } - - impl Connect { - /// Returns an immutable reference to the length-delimited codec's config. - pub fn config(&self) -> &length_delimited::Builder { - &self.config - } - - /// Returns a mutable reference to the length-delimited codec's config. - pub fn config_mut(&mut self) -> &mut length_delimited::Builder { - &mut self.config - } - } - - /// Connects to `addr`, wrapping the connection in a TCP transport. - pub fn connect( - addr: A, - codec_fn: CodecFn, - ) -> Connect>, Item, SinkItem, CodecFn> - where - A: ToSocketAddrs, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - Connect { - inner: TcpStream::connect(addr), - codec_fn, - config: LengthDelimitedCodec::builder(), - ghost: PhantomData, - } - } - - /// Listens on `addr`, wrapping accepted connections in TCP transports. - pub async fn listen( - addr: A, - codec_fn: CodecFn, - ) -> io::Result> - where - A: ToSocketAddrs, - Item: for<'de> Deserialize<'de>, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - let listener = TcpListener::bind(addr).await?; - let local_addr = listener.local_addr()?; - Ok(Incoming { - listener, - codec_fn, - local_addr, - config: LengthDelimitedCodec::builder(), - ghost: PhantomData, - }) - } - - /// A [`TcpListener`] that wraps connections in [transports](Transport). - #[pin_project] - #[derive(Debug)] - pub struct Incoming { - listener: TcpListener, - local_addr: SocketAddr, - codec_fn: CodecFn, - config: length_delimited::Builder, - ghost: PhantomData<(fn() -> Item, fn(SinkItem), Codec)>, - } - - impl Incoming { - /// Returns the address being listened on. - pub fn local_addr(&self) -> SocketAddr { - self.local_addr - } - - /// Returns an immutable reference to the length-delimited codec's config. - pub fn config(&self) -> &length_delimited::Builder { - &self.config - } - - /// Returns a mutable reference to the length-delimited codec's config. - pub fn config_mut(&mut self) -> &mut length_delimited::Builder { - &mut self.config - } - } - - impl Stream for Incoming - where - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - type Item = io::Result>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let conn: TcpStream = - ready!(Pin::new(&mut self.as_mut().project().listener).poll_accept(cx)?).0; - Poll::Ready(Some(Ok(new( - self.config.new_framed(conn), - (self.codec_fn)(), - )))) - } - } -} - -#[cfg(all(unix, feature = "unix"))] -#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "unix"))))] -/// Unix Domain Socket support for generic transport using Tokio. -pub mod unix { - use { - super::*, - futures::ready, - std::{marker::PhantomData, path::Path}, - tokio::net::{unix::SocketAddr, UnixListener, UnixStream}, - tokio_util::codec::length_delimited, - }; - - impl Transport { - /// Returns the socket address of the remote half of the underlying [`UnixStream`]. - pub fn peer_addr(&self) -> io::Result { - self.inner.get_ref().get_ref().peer_addr() - } - /// Returns the socket address of the local half of the underlying [`UnixStream`]. - pub fn local_addr(&self) -> io::Result { - self.inner.get_ref().get_ref().local_addr() - } - } - - /// A connection Future that also exposes the length-delimited framing config. - #[must_use] - #[pin_project] - pub struct Connect { - #[pin] - inner: T, - codec_fn: CodecFn, - config: length_delimited::Builder, - ghost: PhantomData<(fn(SinkItem), fn() -> Item)>, - } - - impl Future for Connect - where - T: Future>, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - type Output = io::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let io = ready!(self.as_mut().project().inner.poll(cx))?; - Poll::Ready(Ok(new(self.config.new_framed(io), (self.codec_fn)()))) - } - } - - impl Connect { - /// Returns an immutable reference to the length-delimited codec's config. - pub fn config(&self) -> &length_delimited::Builder { - &self.config - } - - /// Returns a mutable reference to the length-delimited codec's config. - pub fn config_mut(&mut self) -> &mut length_delimited::Builder { - &mut self.config - } - } - - /// Connects to socket named by `path`, wrapping the connection in a Unix Domain Socket - /// transport. - pub fn connect( - path: P, - codec_fn: CodecFn, - ) -> Connect>, Item, SinkItem, CodecFn> - where - P: AsRef, - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - Connect { - inner: UnixStream::connect(path), - codec_fn, - config: LengthDelimitedCodec::builder(), - ghost: PhantomData, - } - } - - /// Listens on the socket named by `path`, wrapping accepted connections in Unix Domain Socket - /// transports. - pub async fn listen( - path: P, - codec_fn: CodecFn, - ) -> io::Result> - where - P: AsRef, - Item: for<'de> Deserialize<'de>, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - let listener = UnixListener::bind(path)?; - let local_addr = listener.local_addr()?; - Ok(Incoming { - listener, - codec_fn, - local_addr, - config: LengthDelimitedCodec::builder(), - ghost: PhantomData, - }) - } - - /// A [`UnixListener`] that wraps connections in [transports](Transport). - #[pin_project] - #[derive(Debug)] - pub struct Incoming { - listener: UnixListener, - local_addr: SocketAddr, - codec_fn: CodecFn, - config: length_delimited::Builder, - ghost: PhantomData<(fn() -> Item, fn(SinkItem), Codec)>, - } - - impl Incoming { - /// Returns the the socket address being listened on. - pub fn local_addr(&self) -> &SocketAddr { - &self.local_addr - } - - /// Returns an immutable reference to the length-delimited codec's config. - pub fn config(&self) -> &length_delimited::Builder { - &self.config - } - - /// Returns a mutable reference to the length-delimited codec's config. - pub fn config_mut(&mut self) -> &mut length_delimited::Builder { - &mut self.config - } - } - - impl Stream for Incoming - where - Item: for<'de> Deserialize<'de>, - SinkItem: Serialize, - Codec: Serializer + Deserializer, - CodecFn: Fn() -> Codec, - { - type Item = io::Result>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let conn: UnixStream = ready!(self.as_mut().project().listener.poll_accept(cx)?).0; - Poll::Ready(Some(Ok(new( - self.config.new_framed(conn), - (self.codec_fn)(), - )))) - } - } - - /// A temporary `PathBuf` that lives in `std::env::temp_dir` and is removed on drop. - pub struct TempPathBuf(std::path::PathBuf); - - impl TempPathBuf { - /// A named socket that results in `/` - pub fn new>(name: S) -> Self { - let mut sock = std::env::temp_dir(); - sock.push(name.as_ref()); - Self(sock) - } - - /// Appends a random hex string to the socket name resulting in - /// `/_` - pub fn with_random>(name: S) -> Self { - Self::new(format!("{}_{:016x}", name.as_ref(), rand::random::())) - } - } - - impl AsRef for TempPathBuf { - fn as_ref(&self) -> &std::path::Path { - self.0.as_path() - } - } - - impl Drop for TempPathBuf { - fn drop(&mut self) { - // This will remove the file pointed to by this PathBuf if it exists, however Err's can - // be returned such as attempting to remove a non-existing file, or one which we don't - // have permission to remove. In these cases the Err is swallowed - let _ = std::fs::remove_file(&self.0); - } - } - - #[cfg(test)] - mod tests { - use super::*; - use tokio_serde::formats::SymmetricalJson; - - #[test] - fn temp_path_buf_non_random() { - let sock = TempPathBuf::new("test_non_random"); - let mut good = std::env::temp_dir(); - good.push("test_non_random"); - assert_eq!(sock.as_ref(), good); - assert_eq!(sock.as_ref().file_name().unwrap(), "test_non_random"); - } - - #[test] - fn temp_path_buf_random() { - let sock = TempPathBuf::with_random("test"); - let good = std::env::temp_dir(); - assert!(sock.as_ref().starts_with(good)); - // Since there are 16 random characters we just assert the file_name has the right name - // and starts with the correct string 'test_' - // file name: test_xxxxxxxxxxxxxxxx - // test = 4 - // _ = 1 - // = 16 - // total = 21 - let fname = sock.as_ref().file_name().unwrap().to_string_lossy(); - assert!(fname.starts_with("test_")); - assert_eq!(fname.len(), 21); - } - - #[test] - fn temp_path_buf_non_existing() { - let sock = TempPathBuf::with_random("test"); - let sock_path = std::path::PathBuf::from(sock.as_ref()); - - // No actual file has been created yet - assert!(!sock_path.exists()); - // Should not panic - std::mem::drop(sock); - assert!(!sock_path.exists()); - } - - #[test] - fn temp_path_buf_existing_file() { - let sock = TempPathBuf::with_random("test"); - let sock_path = std::path::PathBuf::from(sock.as_ref()); - let _file = std::fs::File::create(&sock).unwrap(); - assert!(sock_path.exists()); - std::mem::drop(sock); - assert!(!sock_path.exists()); - } - - #[test] - fn temp_path_buf_preexisting_file() { - let mut pre_existing = std::env::temp_dir(); - pre_existing.push("test_preexisting"); - let _file = std::fs::File::create(&pre_existing).unwrap(); - let sock = TempPathBuf::new("test_preexisting"); - let sock_path = std::path::PathBuf::from(sock.as_ref()); - assert!(sock_path.exists()); - std::mem::drop(sock); - assert!(!sock_path.exists()); - } - - #[tokio::test] - async fn temp_path_buf_for_socket() { - let sock = TempPathBuf::with_random("test"); - // Save path for testing after drop - let sock_path = std::path::PathBuf::from(sock.as_ref()); - // create the actual socket - let _ = listen(&sock, SymmetricalJson::::default).await; - assert!(sock_path.exists()); - std::mem::drop(sock); - assert!(!sock_path.exists()); - } - } -} - -#[cfg(test)] -mod tests { - use super::Transport; - use assert_matches::assert_matches; - use futures::{task::*, Sink, Stream}; - use pin_utils::pin_mut; - use std::{ - io::{self, Cursor}, - pin::Pin, - }; - use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - use tokio_serde::formats::SymmetricalJson; - - fn ctx() -> Context<'static> { - Context::from_waker(noop_waker_ref()) - } - - struct TestIo(Cursor>); - - impl AsyncRead for TestIo { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - AsyncRead::poll_read(Pin::new(&mut self.0), cx, buf) - } - } - - impl AsyncWrite for TestIo { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - AsyncWrite::poll_write(Pin::new(&mut self.0), cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - AsyncWrite::poll_flush(Pin::new(&mut self.0), cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - AsyncWrite::poll_shutdown(Pin::new(&mut self.0), cx) - } - } - - #[test] - fn close() { - let (tx, _rx) = crate::transport::channel::bounded::<(), ()>(0); - pin_mut!(tx); - assert_matches!(tx.as_mut().poll_close(&mut ctx()), Poll::Ready(Ok(()))); - assert_matches!(tx.as_mut().start_send(()), Err(_)); - } - - #[test] - fn test_stream() { - let data: &[u8] = b"\x00\x00\x00\x18\"Test one, check check.\""; - let transport = Transport::from(( - TestIo(Cursor::new(Vec::from(data))), - SymmetricalJson::::default(), - )); - pin_mut!(transport); - - assert_matches!( - transport.as_mut().poll_next(&mut ctx()), - Poll::Ready(Some(Ok(ref s))) if s == "Test one, check check."); - assert_matches!(transport.as_mut().poll_next(&mut ctx()), Poll::Ready(None)); - } - - #[test] - fn test_sink() { - let writer = Cursor::new(vec![]); - let mut transport = Box::pin(Transport::from(( - TestIo(writer), - SymmetricalJson::::default(), - ))); - - assert_matches!( - transport.as_mut().poll_ready(&mut ctx()), - Poll::Ready(Ok(())) - ); - assert_matches!( - transport - .as_mut() - .start_send("Test one, check check.".into()), - Ok(()) - ); - assert_matches!( - transport.as_mut().poll_flush(&mut ctx()), - Poll::Ready(Ok(())) - ); - assert_eq!( - transport.get_ref().0.get_ref(), - b"\x00\x00\x00\x18\"Test one, check check.\"" - ); - } - - #[cfg(feature = "tcp")] - #[tokio::test] - async fn tcp() -> io::Result<()> { - use super::tcp; - use futures::{SinkExt, StreamExt}; - - let mut listener = tcp::listen("0.0.0.0:0", SymmetricalJson::::default).await?; - let addr = listener.local_addr(); - tokio::spawn(async move { - let mut transport = listener.next().await.unwrap().unwrap(); - let message = transport.next().await.unwrap().unwrap(); - transport.send(message).await.unwrap(); - }); - let mut transport = tcp::connect(addr, SymmetricalJson::::default).await?; - transport.send(String::from("test")).await?; - assert_matches!(transport.next().await, Some(Ok(s)) if s == "test"); - assert_matches!(transport.next().await, None); - Ok(()) - } - - #[cfg(all(unix, feature = "unix"))] - #[tokio::test] - async fn uds() -> io::Result<()> { - use super::unix; - use super::*; - - let sock = unix::TempPathBuf::with_random("uds"); - let mut listener = unix::listen(&sock, SymmetricalJson::::default).await?; - tokio::spawn(async move { - let mut transport = listener.next().await.unwrap().unwrap(); - let message = transport.next().await.unwrap().unwrap(); - transport.send(message).await.unwrap(); - }); - let mut transport = unix::connect(&sock, SymmetricalJson::::default).await?; - transport.send(String::from("test")).await?; - assert_matches!(transport.next().await, Some(Ok(s)) if s == "test"); - assert_matches!(transport.next().await, None); - Ok(()) - } -} diff --git a/datadog-ipc/tarpc/src/server.rs b/datadog-ipc/tarpc/src/server.rs deleted file mode 100644 index f6692a7e8d..0000000000 --- a/datadog-ipc/tarpc/src/server.rs +++ /dev/null @@ -1,1242 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! Provides a server that concurrently handles many connections sending multiplexed requests. - -#[cfg(feature = "opentelemetry")] -use crate::context::SpanExt; -use crate::{ - cancellations::{cancellations, CanceledRequests, RequestCancellation}, - context::{self}, - trace, ClientMessage, Request, Response, Transport, -}; -use ::tokio::sync::mpsc; -use futures::{ - future::{AbortRegistration, Abortable}, - prelude::*, - ready, - stream::Fuse, - task::*, -}; -use in_flight_requests::{AlreadyExistsError, InFlightRequests}; -use pin_project::pin_project; -use std::fmt::Debug; -use std::{error::Error, fmt, marker::PhantomData, pin::Pin}; -use tracing::{info_span, instrument::Instrument, Span}; - -mod in_flight_requests; -#[cfg(test)] -mod testing; - -/// Provides functionality to apply server limits. -pub mod limits; - -/// Provides helper methods for streams of Channels. -pub mod incoming; - -/// Provides convenience functionality for tokio-enabled applications. -#[cfg(feature = "tokio1")] -#[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))] -pub mod tokio; - -/// Settings that control the behavior of [channels](Channel). -#[derive(Clone, Debug)] -pub struct Config { - /// Controls the buffer size of the in-process channel over which a server's handlers send - /// responses to the [`Channel`]. In other words, this is the number of responses that can sit - /// in the outbound queue before request handlers begin blocking. - pub pending_response_buffer: usize, -} - -impl Default for Config { - fn default() -> Self { - Config { - pending_response_buffer: 100, - } - } -} - -impl Config { - /// Returns a channel backed by `transport` and configured with `self`. - pub fn channel(self, transport: T) -> BaseChannel - where - T: Transport, ClientMessage>, - { - BaseChannel::new(self, transport) - } -} - -/// Equivalent to a `FnOnce(Req) -> impl Future`. -pub trait Serve { - /// Type of response. - type Resp; - - /// Type of response future. - type Fut: Future; - - /// Extracts a method name from the request. - fn method(&self, _request: &Req) -> Option<&'static str> { - None - } - - /// Responds to a single request. - fn serve(self, ctx: context::Context, req: Req) -> Self::Fut; -} - -impl Serve for F -where - F: FnOnce(context::Context, Req) -> Fut, - Fut: Future, -{ - type Resp = Resp; - type Fut = Fut; - - fn serve(self, ctx: context::Context, req: Req) -> Self::Fut { - self(ctx, req) - } -} - -/// BaseChannel is the standard implementation of a [`Channel`]. -/// -/// BaseChannel manages a [`Transport`](Transport) of client [`messages`](ClientMessage) and -/// implements a [`Stream`] of [requests](TrackedRequest). See the [`Channel`] documentation for -/// how to use channels. -/// -/// Besides requests, the other type of client message handled by `BaseChannel` is [cancellation -/// messages](ClientMessage::Cancel). `BaseChannel` does not allow direct access to cancellation -/// messages. Instead, it internally handles them by cancelling corresponding requests (removing -/// the corresponding in-flight requests and aborting their handlers). -#[pin_project] -pub struct BaseChannel { - config: Config, - /// Writes responses to the wire and reads requests off the wire. - #[pin] - transport: Fuse, - /// In-flight requests that were dropped by the server before completion. - #[pin] - canceled_requests: CanceledRequests, - /// Notifies `canceled_requests` when a request is canceled. - request_cancellation: RequestCancellation, - /// Holds data necessary to clean up in-flight requests. - in_flight_requests: InFlightRequests, - /// Types the request and response. - ghost: PhantomData<(Req, Resp)>, -} - -impl BaseChannel -where - T: Transport, ClientMessage>, -{ - /// Creates a new channel backed by `transport` and configured with `config`. - pub fn new(config: Config, transport: T) -> Self { - let (request_cancellation, canceled_requests) = cancellations(); - BaseChannel { - config, - transport: transport.fuse(), - canceled_requests, - request_cancellation, - in_flight_requests: InFlightRequests::default(), - ghost: PhantomData, - } - } - - /// Creates a new channel backed by `transport` and configured with the defaults. - pub fn with_defaults(transport: T) -> Self { - Self::new(Config::default(), transport) - } - - /// Returns the inner transport over which messages are sent and received. - pub fn get_ref(&self) -> &T { - self.transport.get_ref() - } - - /// Returns the inner transport over which messages are sent and received. - pub fn get_pin_ref(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().transport.get_pin_mut() - } - - fn in_flight_requests_mut<'a>(self: &'a mut Pin<&mut Self>) -> &'a mut InFlightRequests { - self.as_mut().project().in_flight_requests - } - - fn canceled_requests_pin_mut<'a>( - self: &'a mut Pin<&mut Self>, - ) -> Pin<&'a mut CanceledRequests> { - self.as_mut().project().canceled_requests - } - - fn transport_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut Fuse> { - self.as_mut().project().transport - } - - fn start_request( - mut self: Pin<&mut Self>, - mut request: Request, - ) -> Result, AlreadyExistsError> { - let span = info_span!( - "RPC", - rpc.trace_id = %request.context.trace_id(), - rpc.deadline = %humantime::format_rfc3339(request.context.deadline), - otel.kind = "server", - otel.name = tracing::field::Empty, - ); - #[cfg(feature = "opentelemetry")] - span.set_context(&request.context); - request.context.trace_context = trace::Context::try_from(&span).unwrap_or_else(|_| { - tracing::trace!( - "OpenTelemetry subscriber not installed; making unsampled \ - child context." - ); - request.context.trace_context.new_child() - }); - let entered = span.enter(); - tracing::debug!("ReceiveRequest {request:?}"); - let start = self.in_flight_requests_mut().start_request( - request.id, - request.context.deadline, - span.clone(), - ); - match start { - Ok(abort_registration) => { - drop(entered); - Ok(TrackedRequest { - abort_registration, - span, - response_guard: ResponseGuard { - request_id: request.id, - request_cancellation: self.request_cancellation.clone(), - cancel: false, - }, - request, - }) - } - Err(AlreadyExistsError) => { - tracing::trace!("DuplicateRequest"); - Err(AlreadyExistsError) - } - } - } -} - -impl fmt::Debug for BaseChannel { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "BaseChannel") - } -} - -/// A request response which may be discarded. -#[derive(Debug)] -pub enum RequestResponse { - /// Indicates nothing is going to be sent back to the sender. - Discarded { - /// The id of the request to discard. - request_id: u64, - }, - /// The response to be sent back to the sender. - Response(Response), -} - -/// A request tracked by a [`Channel`]. -#[derive(Debug)] -pub struct TrackedRequest { - /// The request sent by the client. - pub request: Request, - /// A registration to abort a future when the [`Channel`] that produced this request stops - /// tracking it. - pub abort_registration: AbortRegistration, - /// A span representing the server processing of this request. - pub span: Span, - /// An inert response guard. Becomes active in an InFlightRequest. - pub response_guard: ResponseGuard, -} - -/// The server end of an open connection with a client, receiving requests from, and sending -/// responses to, the client. `Channel` is a [`Transport`] with request lifecycle management. -/// -/// The ways to use a Channel, in order of simplest to most complex, is: -/// 1. [`Channel::execute`] - Requires the `tokio1` feature. This method is best for those who -/// do not have specific scheduling needs and whose services are `Send + 'static`. -/// 2. [`Channel::requests`] - This method is best for those who need direct access to individual -/// requests, or are not using `tokio`, or want control over [futures](Future) scheduling. -/// [`Requests`] is a stream of [`InFlightRequests`](InFlightRequest), each which has an -/// [`execute`](InFlightRequest::execute) method. If using `execute`, request processing will -/// automatically cease when either the request deadline is reached or when a corresponding -/// cancellation message is received by the Channel. -/// 3. [`Stream::next`](futures::stream::StreamExt::next) / -/// [`Sink::send`](futures::sink::SinkExt::send) - A user is free to manually read requests -/// from, and send responses into, a Channel in lieu of the previous methods. Channels stream -/// [`TrackedRequests`](TrackedRequest), which, in addition to the request itself, contains the -/// server [`Span`], request lifetime [`AbortRegistration`], and an inert [`ResponseGuard`]. -/// Wrapping response logic in an [`Abortable`] future using the abort registration will ensure -/// that the response does not execute longer than the request deadline. The `Channel` itself -/// will clean up request state once either the deadline expires, or the response guard is -/// dropped, or a response is sent. -/// -/// Channels must be implemented using the decorator pattern: the only way to create a -/// `TrackedRequest` is to get one from another `Channel`. Ultimately, all `TrackedRequests` are -/// created by [`BaseChannel`]. -pub trait Channel -where - Self: - Transport::Resp>, TrackedRequest<::Req>>, -{ - /// Type of request item. - type Req; - - /// Type of response sink item. - type Resp; - - /// The wrapped transport. - type Transport; - - /// Configuration of the channel. - fn config(&self) -> &Config; - - /// Returns the number of in-flight requests over this channel. - fn in_flight_requests(&self) -> usize; - - /// Returns the transport underlying the channel. - fn transport(&self) -> &Self::Transport; - - /// Caps the number of concurrent requests to `limit`. An error will be returned for requests - /// over the concurrency limit. - /// - /// Note that this is a very - /// simplistic throttling heuristic. It is easy to set a number that is too low for the - /// resources available to the server. For production use cases, a more advanced throttler is - /// likely needed. - fn max_concurrent_requests( - self, - limit: usize, - ) -> limits::requests_per_channel::MaxRequests - where - Self: Sized, - { - limits::requests_per_channel::MaxRequests::new(self, limit) - } - - /// Returns a stream of requests that automatically handle request cancellation and response - /// routing. - /// - /// This is a terminal operation. After calling `requests`, the channel cannot be retrieved, - /// and the only way to complete requests is via [`Requests::execute`] or - /// [`InFlightRequest::execute`]. - fn requests(self) -> Requests - where - Self: Sized, - { - let (responses_tx, responses) = mpsc::channel(self.config().pending_response_buffer); - - Requests { - channel: self, - pending_responses: responses, - responses_tx, - } - } - - /// Runs the channel until completion by executing all requests using the given service - /// function. Request handlers are run concurrently by [spawning](::tokio::spawn) on tokio's - /// default executor. - #[cfg(feature = "tokio1")] - #[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))] - fn execute(self, serve: S) -> self::tokio::TokioChannelExecutor, S> - where - Self: Sized, - S: Serve + Send + 'static, - S::Fut: Send, - Self::Req: Send + Debug + 'static, - Self::Resp: Send + 'static, - { - self.requests().execute(serve) - } -} - -/// Critical errors that result in a Channel disconnecting. -#[derive(thiserror::Error, Debug)] -pub enum ChannelError -where - E: Error + Send + Sync + 'static, -{ - /// An error occurred reading from, or writing to, the transport. - #[error("an error occurred in the transport: {0}")] - Transport(#[source] E), - /// An error occurred while polling expired requests. - #[error("an error occurred while polling expired requests: {0}")] - Timer(#[source] ::tokio::time::error::Error), -} - -impl Stream for BaseChannel -where - T: Transport, ClientMessage>, -{ - type Item = Result, ChannelError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[derive(Clone, Copy, Debug)] - enum ReceiverStatus { - Ready, - Pending, - Closed, - } - - impl ReceiverStatus { - fn combine(self, other: Self) -> Self { - use ReceiverStatus::*; - match (self, other) { - (Ready, _) | (_, Ready) => Ready, - (Closed, Closed) => Closed, - (Pending, Closed) | (Closed, Pending) | (Pending, Pending) => Pending, - } - } - } - - use ReceiverStatus::*; - - loop { - let cancellation_status = match self.canceled_requests_pin_mut().poll_recv(cx) { - Poll::Ready(Some(request_id)) => { - if let Some(span) = self.in_flight_requests_mut().remove_request(request_id) { - let _entered = span.enter(); - tracing::info!("ResponseCancelled"); - } - Ready - } - // Pending cancellations don't block Channel closure, because all they do is ensure - // the Channel's internal state is cleaned up. But Channel closure also cleans up - // the Channel state, so there's no reason to wait on a cancellation before - // closing. - // - // Ready(None) can't happen, since `self` holds a Cancellation. - Poll::Pending | Poll::Ready(None) => Closed, - }; - - let expiration_status = match self.in_flight_requests_mut().poll_expired(cx) { - // No need to send a response, since the client wouldn't be waiting for one - // anymore. - Poll::Ready(Some(_)) => Ready, - Poll::Ready(None) => Closed, - Poll::Pending => Pending, - }; - - let request_status = match self - .transport_pin_mut() - .poll_next(cx) - .map_err(ChannelError::Transport)? - { - Poll::Ready(Some(message)) => match message { - ClientMessage::Request(request) => { - match self.as_mut().start_request(request) { - Ok(request) => return Poll::Ready(Some(Ok(request))), - Err(AlreadyExistsError) => { - // Instead of closing the channel if a duplicate request is sent, - // just ignore it, since it's already being processed. Note that we - // cannot return Poll::Pending here, since nothing has scheduled a - // wakeup yet. - continue; - } - } - } - ClientMessage::Cancel { - trace_context, - request_id, - } => { - if !self.in_flight_requests_mut().cancel_request(request_id) { - tracing::trace!( - rpc.trace_id = %trace_context.trace_id, - "Received cancellation, but response handler is already complete.", - ); - } - Ready - } - }, - Poll::Ready(None) => Closed, - Poll::Pending => Pending, - }; - - tracing::trace!( - "Expired requests: {:?}, Inbound: {:?}", - expiration_status, - request_status - ); - match cancellation_status - .combine(expiration_status) - .combine(request_status) - { - Ready => continue, - Closed => return Poll::Ready(None), - Pending => return Poll::Pending, - } - } - } -} - -impl Sink> for BaseChannel -where - T: Transport, ClientMessage>, - T::Error: Error, -{ - type Error = ChannelError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project() - .transport - .poll_ready(cx) - .map_err(ChannelError::Transport) - } - - fn start_send( - mut self: Pin<&mut Self>, - response: RequestResponse, - ) -> Result<(), Self::Error> { - match response { - RequestResponse::Response(response) => { - if let Some(span) = self - .in_flight_requests_mut() - .remove_request(response.request_id) - { - let _entered = span.enter(); - tracing::info!("SendResponse"); - self.project() - .transport - .start_send(response) - .map_err(ChannelError::Transport) - } else { - // If the request isn't tracked anymore, there's no need to send the response. - Ok(()) - } - } - RequestResponse::Discarded { request_id } => { - self.in_flight_requests_mut().remove_request(request_id); - Ok(()) - } - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - tracing::trace!("poll_flush"); - self.project() - .transport - .poll_flush(cx) - .map_err(ChannelError::Transport) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project() - .transport - .poll_close(cx) - .map_err(ChannelError::Transport) - } -} - -impl AsRef for BaseChannel { - fn as_ref(&self) -> &T { - self.transport.get_ref() - } -} - -impl Channel for BaseChannel -where - T: Transport, ClientMessage>, -{ - type Req = Req; - type Resp = Resp; - type Transport = T; - - fn config(&self) -> &Config { - &self.config - } - - fn in_flight_requests(&self) -> usize { - self.in_flight_requests.len() - } - - fn transport(&self) -> &Self::Transport { - self.get_ref() - } -} - -/// A stream of requests coming over a channel. `Requests` also drives the sending of responses, so -/// it must be continually polled to ensure progress. -#[pin_project] -pub struct Requests -where - C: Channel, -{ - #[pin] - channel: C, - /// Responses waiting to be written to the wire. - pending_responses: mpsc::Receiver>, - /// Handed out to request handlers to fan in responses. - responses_tx: mpsc::Sender>, -} - -impl Requests -where - C: Channel, -{ - /// Returns a reference to the inner channel over which messages are sent and received. - pub fn channel(&self) -> &C { - &self.channel - } - - /// Returns the inner channel over which messages are sent and received. - pub fn channel_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut C> { - self.as_mut().project().channel - } - - /// Returns the inner channel over which messages are sent and received. - pub fn pending_responses_mut<'a>( - self: &'a mut Pin<&mut Self>, - ) -> &'a mut mpsc::Receiver> { - self.as_mut().project().pending_responses - } - - fn pump_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, C::Error>>> { - self.channel_pin_mut().poll_next(cx).map_ok( - |TrackedRequest { - request, - abort_registration, - span, - mut response_guard, - }| { - // The response guard becomes active once in an InFlightRequest. - response_guard.cancel = true; - InFlightRequest { - request, - abort_registration, - span, - response_guard, - response_tx: self.responses_tx.clone(), - } - }, - ) - } - - fn pump_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - read_half_closed: bool, - ) -> Poll>> { - match self.as_mut().poll_next_response(cx)? { - Poll::Ready(Some(response)) => { - // A Ready result from poll_next_response means the Channel is ready to be written - // to. Therefore, we can call start_send without worry of a full buffer. - self.channel_pin_mut().start_send(response)?; - Poll::Ready(Some(Ok(()))) - } - Poll::Ready(None) => { - // Shutdown can't be done before we finish pumping out remaining responses. - ready!(self.channel_pin_mut().poll_flush(cx)?); - Poll::Ready(None) - } - Poll::Pending => { - // No more requests to process, so flush any requests buffered in the transport. - ready!(self.channel_pin_mut().poll_flush(cx)?); - - // Being here means there are no staged requests and all written responses are - // fully flushed. So, if the read half is closed and there are no in-flight - // requests, then we can close the write half. - if read_half_closed && self.channel.in_flight_requests() == 0 { - Poll::Ready(None) - } else { - Poll::Pending - } - } - } - } - - /// Yields a response ready to be written to the Channel sink. - /// - /// Note that a response will only be yielded if the Channel is *ready* to be written to (i.e. - /// start_send would succeed). - fn poll_next_response( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, C::Error>>> { - ready!(self.ensure_writeable(cx)?); - - match ready!(self.pending_responses_mut().poll_recv(cx)) { - Some(response) => Poll::Ready(Some(Ok(response))), - None => { - // This branch likely won't happen, since the Requests stream is holding a Sender. - Poll::Ready(None) - } - } - } - - /// Returns Ready if writing a message to the Channel would not fail due to a full buffer. If - /// the Channel is not ready to be written to, flushes it until it is ready. - fn ensure_writeable<'a>( - self: &'a mut Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - while self.channel_pin_mut().poll_ready(cx)?.is_pending() { - ready!(self.channel_pin_mut().poll_flush(cx)?); - } - Poll::Ready(Some(Ok(()))) - } -} - -impl fmt::Debug for Requests -where - C: Channel, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "Requests") - } -} - -/// A fail-safe to ensure requests are properly canceled if request processing is aborted before -/// completing. -#[derive(Debug)] -pub struct ResponseGuard { - request_cancellation: RequestCancellation, - request_id: u64, - cancel: bool, -} - -impl Drop for ResponseGuard { - fn drop(&mut self) { - if self.cancel { - self.request_cancellation.cancel(self.request_id); - } - } -} - -/// A request produced by [Channel::requests]. -/// -/// If dropped without calling [`execute`](InFlightRequest::execute), a cancellation message will -/// be sent to the Channel to clean up associated request state. -#[derive(Debug)] -pub struct InFlightRequest { - request: Request, - abort_registration: AbortRegistration, - response_guard: ResponseGuard, - span: Span, - response_tx: mpsc::Sender>, -} - -impl InFlightRequest { - /// Returns a reference to the request. - pub fn get(&self) -> &Request { - &self.request - } - - /// Returns a [future](Future) that executes the request using the given [service - /// function](Serve). The service function's output is automatically sent back to the [Channel] - /// that yielded this request. The request will be executed in the scope of this request's - /// context. - /// - /// The returned future will stop executing when the first of the following conditions is met: - /// - /// 1. The channel that yielded this request receives a [cancellation - /// message](ClientMessage::Cancel) for this request. - /// 2. The request [deadline](crate::context::Context::deadline) is reached. - /// 3. The service function completes. - /// - /// If the returned Future is dropped before completion, a cancellation message will be sent to - /// the Channel to clean up associated request state. - pub async fn execute(self, serve: S) - where - S: Serve, - { - let Self { - response_tx, - mut response_guard, - abort_registration, - span, - request: - Request { - context, - message, - id: request_id, - }, - } = self; - let method = serve.method(&message); - // TODO(https://github.com/rust-lang/rust-clippy/issues/9111) - // remove when clippy is fixed - #[allow(unknown_lints)] - #[allow(clippy::needless_borrow)] - #[allow(clippy::needless_borrows_for_generic_args)] - span.record("otel.name", &method.unwrap_or("")); - let _ = Abortable::new( - async move { - tracing::info!("BeginRequest for request {request_id}"); - let response = serve.serve(context, message).await; - - tracing::info!("CompleteRequest"); - if context.discard_response { - let response = RequestResponse::Discarded { request_id }; - let _ = response_tx.send(response).await; - tracing::debug!("DiscardingResponse for request {request_id}"); - } else { - let response = RequestResponse::Response(Response { - request_id, - message: Ok(response), - }); - let _ = response_tx.send(response).await; - tracing::debug!("BufferResponse for request {request_id}"); - } - }, - abort_registration, - ) - .instrument(span) - .await; - // Request processing has completed, meaning either the channel canceled the request or - // a request was sent back to the channel. Either way, the channel will clean up the - // request data, so the request does not need to be canceled. - response_guard.cancel = false; - } -} - -impl Stream for Requests -where - C: Channel, -{ - type Item = Result, C::Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - let read = self.as_mut().pump_read(cx)?; - let read_closed = matches!(read, Poll::Ready(None)); - match (read, self.as_mut().pump_write(cx, read_closed)?) { - (Poll::Ready(None), Poll::Ready(None)) => { - return Poll::Ready(None); - } - (Poll::Ready(Some(request_handler)), _) => { - return Poll::Ready(Some(Ok(request_handler))); - } - (_, Poll::Ready(Some(()))) => {} - _ => { - return Poll::Pending; - } - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{in_flight_requests::AlreadyExistsError, BaseChannel, Channel, Config, Requests}; - use crate::server::RequestResponse; - use crate::{ - context, trace, - transport::channel::{self, UnboundedChannel}, - ClientMessage, Request, Response, - }; - use assert_matches::assert_matches; - use futures::{ - future::{pending, AbortRegistration, Abortable, Aborted}, - prelude::*, - Future, - }; - use futures_test::task::noop_context; - use std::{fmt::Debug, pin::Pin, task::Poll}; - - fn test_channel() -> ( - Pin, Response>>>>, - UnboundedChannel, ClientMessage>, - ) { - let (tx, rx) = crate::transport::channel::unbounded(); - (Box::pin(BaseChannel::new(Config::default(), rx)), tx) - } - - fn test_requests() -> ( - Pin< - Box< - Requests< - BaseChannel, Response>>, - >, - >, - >, - UnboundedChannel, ClientMessage>, - ) { - let (tx, rx) = crate::transport::channel::unbounded(); - ( - Box::pin(BaseChannel::new(Config::default(), rx).requests()), - tx, - ) - } - - fn test_bounded_requests( - capacity: usize, - ) -> ( - Pin< - Box< - Requests< - BaseChannel, Response>>, - >, - >, - >, - channel::Channel, ClientMessage>, - ) { - let (tx, rx) = crate::transport::channel::bounded(capacity); - // Add 1 because capacity 0 is not supported (but is supported by transport::channel::bounded). - let config = Config { - pending_response_buffer: capacity + 1, - }; - (Box::pin(BaseChannel::new(config, rx).requests()), tx) - } - - fn fake_request(req: Req) -> ClientMessage { - ClientMessage::Request(Request { - context: context::current(), - id: 0, - message: req, - }) - } - - fn test_abortable( - abort_registration: AbortRegistration, - ) -> impl Future> { - Abortable::new(pending(), abort_registration) - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_start_send_duplicate_request_returns_error() { - let (mut channel, _tx) = test_channel::<(), ()>(); - - channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - assert_matches!( - channel.as_mut().start_request(Request { - id: 0, - context: context::current(), - message: () - }), - Err(AlreadyExistsError) - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_poll_next_aborts_multiple_requests() { - let (mut channel, _tx) = test_channel::<(), ()>(); - - tokio::time::pause(); - let req0 = channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - let req1 = channel - .as_mut() - .start_request(Request { - id: 1, - context: context::current(), - message: (), - }) - .unwrap(); - tokio::time::advance(std::time::Duration::from_secs(1000)).await; - - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Pending - ); - assert_matches!(test_abortable(req0.abort_registration).await, Err(Aborted)); - assert_matches!(test_abortable(req1.abort_registration).await, Err(Aborted)); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_poll_next_aborts_canceled_request() { - let (mut channel, mut tx) = test_channel::<(), ()>(); - - tokio::time::pause(); - let req = channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - - tx.send(ClientMessage::Cancel { - trace_context: trace::Context::default(), - request_id: 0, - }) - .await - .unwrap(); - - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Pending - ); - - assert_matches!(test_abortable(req.abort_registration).await, Err(Aborted)); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_with_closed_transport_and_in_flight_request_returns_pending() { - let (mut channel, tx) = test_channel::<(), ()>(); - - tokio::time::pause(); - let _abort_registration = channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - - drop(tx); - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Pending - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_with_closed_transport_and_no_in_flight_requests_returns_closed() { - let (mut channel, tx) = test_channel::<(), ()>(); - drop(tx); - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Ready(None) - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_poll_next_yields_request() { - let (mut channel, mut tx) = test_channel::<(), ()>(); - tx.send(fake_request(())).await.unwrap(); - - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Ready(Some(Ok(_))) - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_poll_next_aborts_request_and_yields_request() { - let (mut channel, mut tx) = test_channel::<(), ()>(); - - tokio::time::pause(); - let req = channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - tokio::time::advance(std::time::Duration::from_secs(1000)).await; - - tx.send(fake_request(())).await.unwrap(); - - assert_matches!( - channel.as_mut().poll_next(&mut noop_context()), - Poll::Ready(Some(Ok(_))) - ); - assert_matches!(test_abortable(req.abort_registration).await, Err(Aborted)); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn base_channel_start_send_removes_in_flight_request() { - let (mut channel, _tx) = test_channel::<(), ()>(); - - channel - .as_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - assert_eq!(channel.in_flight_requests(), 1); - channel - .as_mut() - .start_send(RequestResponse::Response(Response { - request_id: 0, - message: Ok(()), - })) - .unwrap(); - assert_eq!(channel.in_flight_requests(), 0); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn in_flight_request_drop_cancels_request() { - let (mut requests, mut tx) = test_requests::<(), ()>(); - tx.send(fake_request(())).await.unwrap(); - - let request = match requests.as_mut().poll_next(&mut noop_context()) { - Poll::Ready(Some(Ok(request))) => request, - result => panic!("Unexpected result: {result:?}"), - }; - drop(request); - - let poll = requests - .as_mut() - .channel_pin_mut() - .poll_next(&mut noop_context()); - assert!(poll.is_pending()); - let in_flight_requests = requests.channel().in_flight_requests(); - assert_eq!(in_flight_requests, 0); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn in_flight_requests_successful_execute_doesnt_cancel_request() { - let (mut requests, mut tx) = test_requests::<(), ()>(); - tx.send(fake_request(())).await.unwrap(); - - let request = match requests.as_mut().poll_next(&mut noop_context()) { - Poll::Ready(Some(Ok(request))) => request, - result => panic!("Unexpected result: {result:?}"), - }; - request.execute(|_, _| async {}).await; - assert!(requests - .as_mut() - .channel_pin_mut() - .canceled_requests - .poll_recv(&mut noop_context()) - .is_pending()); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn requests_poll_next_response_returns_pending_when_buffer_full() { - let (mut requests, _tx) = test_bounded_requests::<(), ()>(0); - - // Response written to the transport. - requests - .as_mut() - .channel_pin_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - requests - .as_mut() - .channel_pin_mut() - .start_send(RequestResponse::Response(Response { - request_id: 0, - message: Ok(()), - })) - .unwrap(); - - // Response waiting to be written. - requests - .as_mut() - .project() - .responses_tx - .send(RequestResponse::Response(Response { - request_id: 1, - message: Ok(()), - })) - .await - .unwrap(); - - requests - .as_mut() - .channel_pin_mut() - .start_request(Request { - id: 1, - context: context::current(), - message: (), - }) - .unwrap(); - - assert_matches!( - requests.as_mut().poll_next_response(&mut noop_context()), - Poll::Pending - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn requests_pump_write_returns_pending_when_buffer_full() { - let (mut requests, _tx) = test_bounded_requests::<(), ()>(0); - - // Response written to the transport. - requests - .as_mut() - .channel_pin_mut() - .start_request(Request { - id: 0, - context: context::current(), - message: (), - }) - .unwrap(); - requests - .as_mut() - .channel_pin_mut() - .start_send(RequestResponse::Response(Response { - request_id: 0, - message: Ok(()), - })) - .unwrap(); - - // Response waiting to be written. - requests - .as_mut() - .channel_pin_mut() - .start_request(Request { - id: 1, - context: context::current(), - message: (), - }) - .unwrap(); - requests - .as_mut() - .project() - .responses_tx - .send(RequestResponse::Response(Response { - request_id: 1, - message: Ok(()), - })) - .await - .unwrap(); - - assert_matches!( - requests.as_mut().pump_write(&mut noop_context(), true), - Poll::Pending - ); - // Assert that the pending response was not polled while the channel was blocked. - assert_matches!( - requests.as_mut().pending_responses_mut().recv().await, - Some(_) - ); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn requests_pump_read() { - let (mut requests, mut tx) = test_requests::<(), ()>(); - - // Response written to the transport. - tx.send(fake_request(())).await.unwrap(); - - assert_matches!( - requests.as_mut().pump_read(&mut noop_context()), - Poll::Ready(Some(Ok(_))) - ); - assert_eq!(requests.channel.in_flight_requests(), 1); - } -} diff --git a/datadog-ipc/tarpc/src/server/in_flight_requests.rs b/datadog-ipc/tarpc/src/server/in_flight_requests.rs deleted file mode 100644 index 471ae69e12..0000000000 --- a/datadog-ipc/tarpc/src/server/in_flight_requests.rs +++ /dev/null @@ -1,225 +0,0 @@ -use crate::util::{Compact, TimeUntil}; -use fnv::FnvHashMap; -use futures::future::{AbortHandle, AbortRegistration}; -use std::{ - collections::hash_map, - task::{Context, Poll}, - time::SystemTime, -}; -use tokio_util::time::delay_queue::{self, DelayQueue}; -use tracing::Span; - -/// A data structure that tracks in-flight requests. It aborts requests, -/// either on demand or when a request deadline expires. -#[derive(Debug, Default)] -pub struct InFlightRequests { - request_data: FnvHashMap, - deadlines: DelayQueue, -} - -/// Data needed to clean up a single in-flight request. -#[derive(Debug)] -struct RequestData { - /// Aborts the response handler for the associated request. - abort_handle: AbortHandle, - /// The key to remove the timer for the request's deadline. - deadline_key: delay_queue::Key, - /// The client span. - span: Span, -} - -/// An error returned when a request attempted to start with the same ID as a request already -/// in flight. -#[derive(Debug)] -pub struct AlreadyExistsError; - -impl InFlightRequests { - /// Returns the number of in-flight requests. - pub fn len(&self) -> usize { - self.request_data.len() - } - - /// Starts a request, unless a request with the same ID is already in flight. - pub fn start_request( - &mut self, - request_id: u64, - deadline: SystemTime, - span: Span, - ) -> Result { - match self.request_data.entry(request_id) { - hash_map::Entry::Vacant(vacant) => { - let timeout = deadline.time_until(); - let (abort_handle, abort_registration) = AbortHandle::new_pair(); - let deadline_key = self.deadlines.insert(request_id, timeout); - vacant.insert(RequestData { - abort_handle, - deadline_key, - span, - }); - Ok(abort_registration) - } - hash_map::Entry::Occupied(_) => Err(AlreadyExistsError), - } - } - - /// Cancels an in-flight request. Returns true iff the request was found. - pub fn cancel_request(&mut self, request_id: u64) -> bool { - if let Some(RequestData { - span, - abort_handle, - deadline_key, - }) = self.request_data.remove(&request_id) - { - let _entered = span.enter(); - self.request_data.compact(0.1); - abort_handle.abort(); - self.deadlines.remove(&deadline_key); - tracing::info!("ReceiveCancel for request {request_id}"); - true - } else { - false - } - } - - /// Removes a request without aborting. Returns true iff the request was found. - /// This method should be used when a response is being sent. - pub fn remove_request(&mut self, request_id: u64) -> Option { - if let Some(request_data) = self.request_data.remove(&request_id) { - self.request_data.compact(0.1); - self.deadlines.remove(&request_data.deadline_key); - Some(request_data.span) - } else { - None - } - } - - /// Yields a request that has expired, aborting any ongoing processing of that request. - pub fn poll_expired(&mut self, cx: &mut Context) -> Poll> { - if self.deadlines.is_empty() { - // TODO(https://github.com/tokio-rs/tokio/issues/4161) - // This is a workaround for DelayQueue not always treating this case correctly. - return Poll::Ready(None); - } - self.deadlines.poll_expired(cx).map(|expired| { - let expired = expired?; - if let Some(RequestData { - abort_handle, span, .. - }) = self.request_data.remove(expired.get_ref()) - { - let _entered = span.enter(); - self.request_data.compact(0.1); - abort_handle.abort(); - tracing::error!("DeadlineExceeded for request {}", expired.get_ref()); - } - Some(expired.into_inner()) - }) - } -} - -/// When InFlightRequests is dropped, any outstanding requests are aborted. -impl Drop for InFlightRequests { - fn drop(&mut self) { - self.request_data - .values() - .for_each(|request_data| request_data.abort_handle.abort()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use assert_matches::assert_matches; - use futures::{ - future::{pending, Abortable}, - FutureExt, - }; - use futures_test::task::noop_context; - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn start_request_increases_len() { - let mut in_flight_requests = InFlightRequests::default(); - assert_eq!(in_flight_requests.len(), 0); - in_flight_requests - .start_request(0, SystemTime::now(), Span::current()) - .unwrap(); - assert_eq!(in_flight_requests.len(), 1); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn polling_expired_aborts() { - let mut in_flight_requests = InFlightRequests::default(); - let abort_registration = in_flight_requests - .start_request(0, SystemTime::now(), Span::current()) - .unwrap(); - let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration)); - - tokio::time::pause(); - tokio::time::advance(std::time::Duration::from_secs(1000)).await; - - assert_matches!( - in_flight_requests.poll_expired(&mut noop_context()), - Poll::Ready(Some(_)) - ); - assert_matches!( - abortable_future.poll_unpin(&mut noop_context()), - Poll::Ready(Err(_)) - ); - assert_eq!(in_flight_requests.len(), 0); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn cancel_request_aborts() { - let mut in_flight_requests = InFlightRequests::default(); - let abort_registration = in_flight_requests - .start_request(0, SystemTime::now(), Span::current()) - .unwrap(); - let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration)); - - assert!(in_flight_requests.cancel_request(0)); - assert_matches!( - abortable_future.poll_unpin(&mut noop_context()), - Poll::Ready(Err(_)) - ); - assert_eq!(in_flight_requests.len(), 0); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn remove_request_doesnt_abort() { - let mut in_flight_requests = InFlightRequests::default(); - assert!(in_flight_requests.deadlines.is_empty()); - - let abort_registration = in_flight_requests - .start_request( - 0, - SystemTime::now() + std::time::Duration::from_secs(10), - Span::current(), - ) - .unwrap(); - let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration)); - - // Precondition: Pending expiration - assert_matches!( - in_flight_requests.poll_expired(&mut noop_context()), - Poll::Pending - ); - assert!(!in_flight_requests.deadlines.is_empty()); - - assert_matches!(in_flight_requests.remove_request(0), Some(_)); - // Postcondition: No pending expirations - assert!(in_flight_requests.deadlines.is_empty()); - assert_matches!( - in_flight_requests.poll_expired(&mut noop_context()), - Poll::Ready(None) - ); - assert_matches!( - abortable_future.poll_unpin(&mut noop_context()), - Poll::Pending - ); - assert_eq!(in_flight_requests.len(), 0); - } -} diff --git a/datadog-ipc/tarpc/src/server/incoming.rs b/datadog-ipc/tarpc/src/server/incoming.rs deleted file mode 100644 index c1107658f6..0000000000 --- a/datadog-ipc/tarpc/src/server/incoming.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::{ - limits::{channels_per_key::MaxChannelsPerKey, requests_per_channel::MaxRequestsPerChannel}, - Channel, -}; -#[cfg(feature = "tokio1")] -use super::{tokio::TokioServerExecutor, Serve}; -use futures::prelude::*; -use std::fmt::Debug; -use std::{fmt, hash::Hash}; - -/// An extension trait for [streams](futures::prelude::Stream) of [`Channels`](Channel). -pub trait Incoming -where - Self: Sized + Stream, - C: Channel, - C::Req: Debug, -{ - /// Enforces channel per-key limits. - fn max_channels_per_key(self, n: u32, keymaker: KF) -> MaxChannelsPerKey - where - K: fmt::Display + Eq + Hash + Clone + Unpin, - KF: Fn(&C) -> K, - { - MaxChannelsPerKey::new(self, n, keymaker) - } - - /// Caps the number of concurrent requests per channel. - fn max_concurrent_requests_per_channel(self, n: usize) -> MaxRequestsPerChannel { - MaxRequestsPerChannel::new(self, n) - } - - /// [Executes](Channel::execute) each incoming channel. Each channel will be handled - /// concurrently by spawning on tokio's default executor, and each request will be also - /// be spawned on tokio's default executor. - #[cfg(feature = "tokio1")] - #[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))] - fn execute(self, serve: S) -> TokioServerExecutor - where - S: Serve, - { - TokioServerExecutor::new(self, serve) - } -} - -impl Incoming for S -where - S: Sized + Stream, - C: Channel, - C::Req: Debug, -{ -} diff --git a/datadog-ipc/tarpc/src/server/limits.rs b/datadog-ipc/tarpc/src/server/limits.rs deleted file mode 100644 index c74dba91ba..0000000000 --- a/datadog-ipc/tarpc/src/server/limits.rs +++ /dev/null @@ -1,5 +0,0 @@ -/// Provides functionality to limit the number of active channels. -pub mod channels_per_key; - -/// Provides a [channel](crate::server::Channel) that limits the number of in-flight requests. -pub mod requests_per_channel; diff --git a/datadog-ipc/tarpc/src/server/limits/channels_per_key.rs b/datadog-ipc/tarpc/src/server/limits/channels_per_key.rs deleted file mode 100644 index 06b6a1fcf3..0000000000 --- a/datadog-ipc/tarpc/src/server/limits/channels_per_key.rs +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use crate::{ - server::{self, Channel}, - util::Compact, -}; -use fnv::FnvHashMap; -use futures::{prelude::*, ready, stream::Fuse, task::*}; -use pin_project::pin_project; -use std::sync::{Arc, Weak}; -use std::{collections::hash_map::Entry, fmt, hash::Hash, pin::Pin}; -use tokio::sync::mpsc; -use tracing::{debug, info, trace}; - -/// An [`Incoming`](crate::server::incoming::Incoming) stream that drops new channels based on -/// per-key limits. -/// -/// The decision to drop a Channel is made once at the time the Channel materializes. Once a -/// Channel is yielded, it will not be prematurely dropped. -#[pin_project] -#[derive(Debug)] -pub struct MaxChannelsPerKey -where - K: Eq + Hash, -{ - #[pin] - listener: Fuse, - channels_per_key: u32, - dropped_keys: mpsc::UnboundedReceiver, - dropped_keys_tx: mpsc::UnboundedSender, - key_counts: FnvHashMap>>, - keymaker: F, -} - -/// A channel that is tracked by [`MaxChannelsPerKey`]. -#[pin_project] -#[derive(Debug)] -pub struct TrackedChannel { - #[pin] - inner: C, - tracker: Arc>, -} - -#[derive(Debug)] -struct Tracker { - key: Option, - dropped_keys: mpsc::UnboundedSender, -} - -impl Drop for Tracker { - fn drop(&mut self) { - // Don't care if the listener is dropped. - let _ = self.dropped_keys.send(self.key.take().unwrap()); - } -} - -impl Stream for TrackedChannel -where - C: Stream, -{ - type Item = ::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.inner_pin_mut().poll_next(cx) - } -} - -impl Sink for TrackedChannel -where - C: Sink, -{ - type Error = C::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.inner_pin_mut().poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.inner_pin_mut().start_send(item) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.inner_pin_mut().poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.inner_pin_mut().poll_close(cx) - } -} - -impl AsRef for TrackedChannel { - fn as_ref(&self) -> &C { - &self.inner - } -} - -impl Channel for TrackedChannel -where - C: Channel, -{ - type Req = C::Req; - type Resp = C::Resp; - type Transport = C::Transport; - - fn config(&self) -> &server::Config { - self.inner.config() - } - - fn in_flight_requests(&self) -> usize { - self.inner.in_flight_requests() - } - - fn transport(&self) -> &Self::Transport { - self.inner.transport() - } -} - -impl TrackedChannel { - /// Returns the inner channel. - pub fn get_ref(&self) -> &C { - &self.inner - } - - /// Returns the pinned inner channel. - fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut C> { - self.as_mut().project().inner - } -} - -impl MaxChannelsPerKey -where - K: Eq + Hash, - S: Stream, - F: Fn(&S::Item) -> K, -{ - /// Sheds new channels to stay under configured limits. - pub(crate) fn new(listener: S, channels_per_key: u32, keymaker: F) -> Self { - let (dropped_keys_tx, dropped_keys) = mpsc::unbounded_channel(); - MaxChannelsPerKey { - listener: listener.fuse(), - channels_per_key, - dropped_keys, - dropped_keys_tx, - key_counts: FnvHashMap::default(), - keymaker, - } - } -} - -impl MaxChannelsPerKey -where - S: Stream, - K: fmt::Display + Eq + Hash + Clone + Unpin, - F: Fn(&S::Item) -> K, -{ - fn listener_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut Fuse> { - self.as_mut().project().listener - } - - fn handle_new_channel( - mut self: Pin<&mut Self>, - stream: S::Item, - ) -> Result, K> { - let key = (self.as_mut().keymaker)(&stream); - let tracker = self.as_mut().increment_channels_for_key(key.clone())?; - - trace!( - channel_filter_key = %key, - open_channels = Arc::strong_count(&tracker), - max_open_channels = self.channels_per_key, - "Opening channel"); - - Ok(TrackedChannel { - tracker, - inner: stream, - }) - } - - fn increment_channels_for_key(self: Pin<&mut Self>, key: K) -> Result>, K> { - let self_ = self.project(); - let dropped_keys = self_.dropped_keys_tx; - match self_.key_counts.entry(key.clone()) { - Entry::Vacant(vacant) => { - let tracker = Arc::new(Tracker { - key: Some(key), - dropped_keys: dropped_keys.clone(), - }); - - vacant.insert(Arc::downgrade(&tracker)); - Ok(tracker) - } - Entry::Occupied(mut o) => { - let count = o.get().strong_count(); - if count >= TryFrom::try_from(*self_.channels_per_key).unwrap() { - info!( - channel_filter_key = %key, - open_channels = count, - max_open_channels = *self_.channels_per_key, - "At open channel limit"); - Err(key) - } else { - Ok(o.get().upgrade().unwrap_or_else(|| { - let tracker = Arc::new(Tracker { - key: Some(key), - dropped_keys: dropped_keys.clone(), - }); - - *o.get_mut() = Arc::downgrade(&tracker); - tracker - })) - } - } - } - } - - fn poll_listener( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, K>>> { - match ready!(self.listener_pin_mut().poll_next_unpin(cx)) { - Some(codec) => Poll::Ready(Some(self.handle_new_channel(codec))), - None => Poll::Ready(None), - } - } - - fn poll_closed_channels(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let self_ = self.project(); - match ready!(self_.dropped_keys.poll_recv(cx)) { - Some(key) => { - debug!( - channel_filter_key = %key, - "All channels dropped"); - self_.key_counts.remove(&key); - self_.key_counts.compact(0.1); - Poll::Ready(()) - } - None => unreachable!("Holding a copy of closed_channels and didn't close it."), - } - } -} - -impl Stream for MaxChannelsPerKey -where - S: Stream, - K: fmt::Display + Eq + Hash + Clone + Unpin, - F: Fn(&S::Item) -> K, -{ - type Item = TrackedChannel; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - loop { - match ( - self.as_mut().poll_listener(cx), - self.as_mut().poll_closed_channels(cx), - ) { - (Poll::Ready(Some(Ok(channel))), _) => { - return Poll::Ready(Some(channel)); - } - (Poll::Ready(Some(Err(_))), _) => { - continue; - } - (_, Poll::Ready(())) => continue, - (Poll::Pending, Poll::Pending) => return Poll::Pending, - (Poll::Ready(None), Poll::Pending) => { - trace!("Shutting down listener."); - return Poll::Ready(None); - } - } - } - } -} -#[cfg(test)] -fn ctx() -> Context<'static> { - use futures::task::*; - - Context::from_waker(noop_waker_ref()) -} - -#[test] -fn tracker_drop() { - use assert_matches::assert_matches; - - let (tx, mut rx) = mpsc::unbounded_channel(); - Tracker { - key: Some(1), - dropped_keys: tx, - }; - assert_matches!(rx.poll_recv(&mut ctx()), Poll::Ready(Some(1))); -} - -#[test] -fn tracked_channel_stream() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - let (chan_tx, chan) = futures::channel::mpsc::unbounded(); - let (dropped_keys, _) = mpsc::unbounded_channel(); - let channel = TrackedChannel { - inner: chan, - tracker: Arc::new(Tracker { - key: Some(1), - dropped_keys, - }), - }; - - chan_tx.unbounded_send("test").unwrap(); - pin_mut!(channel); - assert_matches!(channel.poll_next(&mut ctx()), Poll::Ready(Some("test"))); -} - -#[test] -fn tracked_channel_sink() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - let (chan, mut chan_rx) = futures::channel::mpsc::unbounded(); - let (dropped_keys, _) = mpsc::unbounded_channel(); - let channel = TrackedChannel { - inner: chan, - tracker: Arc::new(Tracker { - key: Some(1), - dropped_keys, - }), - }; - - pin_mut!(channel); - assert_matches!(channel.as_mut().poll_ready(&mut ctx()), Poll::Ready(Ok(()))); - assert_matches!(channel.as_mut().start_send("test"), Ok(())); - assert_matches!(channel.as_mut().poll_flush(&mut ctx()), Poll::Ready(Ok(()))); - assert_matches!(chan_rx.try_next(), Ok(Some("test"))); -} - -#[test] -fn channel_filter_increment_channels_for_key() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - struct TestChannel { - key: &'static str, - } - let (_, listener) = futures::channel::mpsc::unbounded(); - let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key); - pin_mut!(filter); - let tracker1 = filter.as_mut().increment_channels_for_key("key").unwrap(); - assert_eq!(Arc::strong_count(&tracker1), 1); - let tracker2 = filter.as_mut().increment_channels_for_key("key").unwrap(); - assert_eq!(Arc::strong_count(&tracker1), 2); - assert_matches!(filter.increment_channels_for_key("key"), Err("key")); - drop(tracker2); - assert_eq!(Arc::strong_count(&tracker1), 1); -} - -#[test] -fn channel_filter_handle_new_channel() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - #[derive(Debug)] - struct TestChannel { - key: &'static str, - } - let (_, listener) = futures::channel::mpsc::unbounded(); - let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key); - pin_mut!(filter); - let channel1 = filter - .as_mut() - .handle_new_channel(TestChannel { key: "key" }) - .unwrap(); - assert_eq!(Arc::strong_count(&channel1.tracker), 1); - - let channel2 = filter - .as_mut() - .handle_new_channel(TestChannel { key: "key" }) - .unwrap(); - assert_eq!(Arc::strong_count(&channel1.tracker), 2); - - assert_matches!( - filter.handle_new_channel(TestChannel { key: "key" }), - Err("key") - ); - drop(channel2); - assert_eq!(Arc::strong_count(&channel1.tracker), 1); -} - -#[test] -fn channel_filter_poll_listener() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - #[derive(Debug)] - struct TestChannel { - key: &'static str, - } - let (new_channels, listener) = futures::channel::mpsc::unbounded(); - let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key); - pin_mut!(filter); - - new_channels - .unbounded_send(TestChannel { key: "key" }) - .unwrap(); - let channel1 = - assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c); - assert_eq!(Arc::strong_count(&channel1.tracker), 1); - - new_channels - .unbounded_send(TestChannel { key: "key" }) - .unwrap(); - let _channel2 = - assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c); - assert_eq!(Arc::strong_count(&channel1.tracker), 2); - - new_channels - .unbounded_send(TestChannel { key: "key" }) - .unwrap(); - let key = - assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Err(k))) => k); - assert_eq!(key, "key"); - assert_eq!(Arc::strong_count(&channel1.tracker), 2); -} - -#[test] -fn channel_filter_poll_closed_channels() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - #[derive(Debug)] - struct TestChannel { - key: &'static str, - } - let (new_channels, listener) = futures::channel::mpsc::unbounded(); - let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key); - pin_mut!(filter); - - new_channels - .unbounded_send(TestChannel { key: "key" }) - .unwrap(); - let channel = - assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c); - assert_eq!(filter.key_counts.len(), 1); - - drop(channel); - assert_matches!( - filter.as_mut().poll_closed_channels(&mut ctx()), - Poll::Ready(()) - ); - assert!(filter.key_counts.is_empty()); -} - -#[test] -fn channel_filter_stream() { - use assert_matches::assert_matches; - use pin_utils::pin_mut; - - #[derive(Debug)] - struct TestChannel { - key: &'static str, - } - let (new_channels, listener) = futures::channel::mpsc::unbounded(); - let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key); - pin_mut!(filter); - - new_channels - .unbounded_send(TestChannel { key: "key" }) - .unwrap(); - let channel = assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Ready(Some(c)) => c); - assert_eq!(filter.key_counts.len(), 1); - - drop(channel); - assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Pending); - assert!(filter.key_counts.is_empty()); -} diff --git a/datadog-ipc/tarpc/src/server/limits/requests_per_channel.rs b/datadog-ipc/tarpc/src/server/limits/requests_per_channel.rs deleted file mode 100644 index 6882a506ef..0000000000 --- a/datadog-ipc/tarpc/src/server/limits/requests_per_channel.rs +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2020 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use crate::server::RequestResponse; -use crate::{ - server::{Channel, Config}, - Response, ServerError, -}; -use futures::{prelude::*, ready, task::*}; -use pin_project::pin_project; -use std::{io, pin::Pin}; - -/// A [`Channel`] that limits the number of concurrent requests by throttling. -/// -/// Note that this is a very basic throttling heuristic. It is easy to set a number that is too low -/// for the resources available to the server. For production use cases, a more advanced throttler -/// is likely needed. -#[pin_project] -#[derive(Debug)] -pub struct MaxRequests { - max_in_flight_requests: usize, - #[pin] - inner: C, -} - -impl MaxRequests { - /// Returns the inner channel. - pub fn get_ref(&self) -> &C { - &self.inner - } -} - -impl MaxRequests -where - C: Channel, -{ - /// Returns a new `MaxRequests` that wraps the given channel and limits concurrent requests to - /// `max_in_flight_requests`. - pub fn new(inner: C, max_in_flight_requests: usize) -> Self { - MaxRequests { - max_in_flight_requests, - inner, - } - } -} - -impl Stream for MaxRequests -where - C: Channel, -{ - type Item = ::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - while self.as_mut().in_flight_requests() >= *self.as_mut().project().max_in_flight_requests - { - ready!(self.as_mut().project().inner.poll_ready(cx)?); - - match ready!(self.as_mut().project().inner.poll_next(cx)?) { - Some(r) => { - let _entered = r.span.enter(); - tracing::info!( - in_flight_requests = self.as_mut().in_flight_requests(), - "ThrottleRequest", - ); - - self.as_mut() - .start_send(RequestResponse::Response(Response { - request_id: r.request.id, - message: Err(ServerError { - kind: io::ErrorKind::WouldBlock, - detail: "server throttled the request.".into(), - }), - }))?; - } - None => return Poll::Ready(None), - } - } - self.project().inner.poll_next(cx) - } -} - -impl Sink::Resp>> for MaxRequests -where - C: Channel, -{ - type Error = C::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().inner.poll_ready(cx) - } - - fn start_send( - self: Pin<&mut Self>, - item: RequestResponse<::Resp>, - ) -> Result<(), Self::Error> { - self.project().inner.start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().inner.poll_close(cx) - } -} - -impl AsRef for MaxRequests { - fn as_ref(&self) -> &C { - &self.inner - } -} - -impl Channel for MaxRequests -where - C: Channel, -{ - type Req = ::Req; - type Resp = ::Resp; - type Transport = ::Transport; - - fn in_flight_requests(&self) -> usize { - self.inner.in_flight_requests() - } - - fn config(&self) -> &Config { - self.inner.config() - } - - fn transport(&self) -> &Self::Transport { - self.inner.transport() - } -} - -/// An [`Incoming`](crate::server::incoming::Incoming) stream of channels that enforce limits on -/// the number of in-flight requests. -#[pin_project] -#[derive(Debug)] -pub struct MaxRequestsPerChannel { - #[pin] - inner: S, - max_in_flight_requests: usize, -} - -impl MaxRequestsPerChannel -where - S: Stream, - ::Item: Channel, -{ - pub(crate) fn new(inner: S, max_in_flight_requests: usize) -> Self { - Self { - inner, - max_in_flight_requests, - } - } -} - -impl Stream for MaxRequestsPerChannel -where - S: Stream, - ::Item: Channel, -{ - type Item = MaxRequests<::Item>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - match ready!(self.as_mut().project().inner.poll_next(cx)) { - Some(channel) => Poll::Ready(Some(MaxRequests::new( - channel, - *self.project().max_in_flight_requests, - ))), - None => Poll::Ready(None), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use crate::server::{ - testing::{self, FakeChannel, PollExt}, - RequestResponse, TrackedRequest, - }; - use pin_utils::pin_mut; - use std::{ - marker::PhantomData, - time::{Duration, SystemTime}, - }; - use tracing::Span; - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn throttler_in_flight_requests() { - let throttler = MaxRequests { - max_in_flight_requests: 0, - inner: FakeChannel::default::(), - }; - - pin_mut!(throttler); - for i in 0..5 { - throttler - .inner - .in_flight_requests - .start_request( - i, - SystemTime::now() + Duration::from_secs(1), - Span::current(), - ) - .unwrap(); - } - assert_eq!(throttler.as_mut().in_flight_requests(), 5); - } - - #[test] - fn throttler_poll_next_done() { - let throttler = MaxRequests { - max_in_flight_requests: 0, - inner: FakeChannel::default::(), - }; - - pin_mut!(throttler); - assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done()); - } - - #[test] - fn throttler_poll_next_some() -> io::Result<()> { - let throttler = MaxRequests { - max_in_flight_requests: 1, - inner: FakeChannel::default::(), - }; - - pin_mut!(throttler); - throttler.inner.push_req(0, 1); - assert!(throttler.as_mut().poll_ready(&mut testing::cx()).is_ready()); - assert_eq!( - throttler - .as_mut() - .poll_next(&mut testing::cx())? - .map(|r| r.map(|r| (r.request.id, r.request.message))), - Poll::Ready(Some((0, 1))) - ); - Ok(()) - } - - #[test] - fn throttler_poll_next_throttled() { - let throttler = MaxRequests { - max_in_flight_requests: 0, - inner: FakeChannel::default::(), - }; - - pin_mut!(throttler); - throttler.inner.push_req(1, 1); - assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done()); - assert_eq!(throttler.inner.sink.len(), 1); - match throttler.inner.sink.front().unwrap() { - RequestResponse::Response(resp) => { - assert_eq!(resp.request_id, 1); - assert!(resp.message.is_err()); - } - _ => unimplemented!(), - } - } - - #[test] - fn throttler_poll_next_throttled_sink_not_ready() { - let throttler = MaxRequests { - max_in_flight_requests: 0, - inner: PendingSink::default::(), - }; - pin_mut!(throttler); - assert!(throttler.poll_next(&mut testing::cx()).is_pending()); - - struct PendingSink { - ghost: PhantomData In>, - } - impl PendingSink<(), ()> { - pub fn default( - ) -> PendingSink>, RequestResponse> { - PendingSink { ghost: PhantomData } - } - } - impl Stream for PendingSink { - type Item = In; - fn poll_next(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - unimplemented!() - } - } - impl Sink for PendingSink { - type Error = io::Error; - fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Pending - } - fn start_send(self: Pin<&mut Self>, _: Out) -> Result<(), Self::Error> { - Err(io::Error::from(io::ErrorKind::WouldBlock)) - } - fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Pending - } - fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Pending - } - } - impl Channel for PendingSink>, RequestResponse> { - type Req = Req; - type Resp = Resp; - type Transport = (); - fn config(&self) -> &Config { - unimplemented!() - } - fn in_flight_requests(&self) -> usize { - 0 - } - fn transport(&self) -> &() { - &() - } - } - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn throttler_start_send() { - let throttler = MaxRequests { - max_in_flight_requests: 0, - inner: FakeChannel::default::(), - }; - - pin_mut!(throttler); - throttler - .inner - .in_flight_requests - .start_request( - 0, - SystemTime::now() + Duration::from_secs(1), - Span::current(), - ) - .unwrap(); - throttler - .as_mut() - .start_send(RequestResponse::Response(Response { - request_id: 0, - message: Ok(1), - })) - .unwrap(); - assert_eq!(throttler.inner.in_flight_requests.len(), 0); - match throttler.inner.sink.front().unwrap() { - RequestResponse::Response(resp) => { - assert_eq!( - resp, - &Response { - request_id: 0, - message: Ok(1), - } - ); - } - _ => unimplemented!(), - } - } -} diff --git a/datadog-ipc/tarpc/src/server/testing.rs b/datadog-ipc/tarpc/src/server/testing.rs deleted file mode 100644 index 1d2dbbab8f..0000000000 --- a/datadog-ipc/tarpc/src/server/testing.rs +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2020 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use crate::{ - cancellations::{cancellations, CanceledRequests, RequestCancellation}, - context, - server::{Channel, Config, RequestResponse, ResponseGuard, TrackedRequest}, - Request, -}; -use futures::{task::*, Sink, Stream}; -use pin_project::pin_project; -use std::{collections::VecDeque, io, pin::Pin, time::SystemTime}; -use tracing::Span; - -#[pin_project] -pub(crate) struct FakeChannel { - #[pin] - pub stream: VecDeque, - #[pin] - pub sink: VecDeque, - pub config: Config, - pub in_flight_requests: super::in_flight_requests::InFlightRequests, - pub request_cancellation: RequestCancellation, - pub canceled_requests: CanceledRequests, -} - -impl Stream for FakeChannel -where - In: Unpin, -{ - type Item = In; - - fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { - Poll::Ready(self.project().stream.pop_front()) - } -} - -impl Sink> for FakeChannel> { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().sink.poll_ready(cx).map_err(|e| match e {}) - } - - fn start_send( - mut self: Pin<&mut Self>, - response: RequestResponse, - ) -> Result<(), Self::Error> { - if let RequestResponse::Response(ref response) = response { - self.as_mut() - .project() - .in_flight_requests - .remove_request(response.request_id); - } - self.project() - .sink - .start_send(response) - .map_err(|e| match e {}) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().sink.poll_flush(cx).map_err(|e| match e {}) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - self.project().sink.poll_close(cx).map_err(|e| match e {}) - } -} - -impl Channel for FakeChannel>, RequestResponse> -where - Req: Unpin, -{ - type Req = Req; - type Resp = Resp; - type Transport = (); - - fn config(&self) -> &Config { - &self.config - } - - fn in_flight_requests(&self) -> usize { - self.in_flight_requests.len() - } - - fn transport(&self) -> &() { - &() - } -} - -impl FakeChannel>, RequestResponse> { - pub fn push_req(&mut self, id: u64, message: Req) { - let (_, abort_registration) = futures::future::AbortHandle::new_pair(); - let (request_cancellation, _) = cancellations(); - self.stream.push_back(Ok(TrackedRequest { - request: Request { - context: context::Context { - deadline: SystemTime::UNIX_EPOCH, - trace_context: Default::default(), - discard_response: false, - }, - id, - message, - }, - abort_registration, - span: Span::none(), - response_guard: ResponseGuard { - request_cancellation, - request_id: id, - cancel: false, - }, - })); - } -} - -impl FakeChannel<(), ()> { - pub fn default( - ) -> FakeChannel>, RequestResponse> { - let (request_cancellation, canceled_requests) = cancellations(); - FakeChannel { - stream: Default::default(), - sink: Default::default(), - config: Default::default(), - in_flight_requests: Default::default(), - request_cancellation, - canceled_requests, - } - } -} - -pub trait PollExt { - fn is_done(&self) -> bool; -} - -impl PollExt for Poll> { - fn is_done(&self) -> bool { - matches!(self, Poll::Ready(None)) - } -} - -pub fn cx() -> Context<'static> { - Context::from_waker(noop_waker_ref()) -} diff --git a/datadog-ipc/tarpc/src/server/tokio.rs b/datadog-ipc/tarpc/src/server/tokio.rs deleted file mode 100644 index e62f3df225..0000000000 --- a/datadog-ipc/tarpc/src/server/tokio.rs +++ /dev/null @@ -1,114 +0,0 @@ -use super::{Channel, Requests, Serve}; -use futures::{prelude::*, ready, task::*}; -use pin_project::pin_project; -use std::fmt::Debug; -use std::pin::Pin; - -/// A future that drives the server by [spawning](tokio::spawn) a [`TokioChannelExecutor`](TokioChannelExecutor) -/// for each new channel. Returned by -/// [`Incoming::execute`](crate::server::incoming::Incoming::execute). -#[must_use] -#[pin_project] -#[derive(Debug)] -pub struct TokioServerExecutor { - #[pin] - inner: T, - serve: S, -} - -impl TokioServerExecutor { - pub(crate) fn new(inner: T, serve: S) -> Self { - Self { inner, serve } - } -} - -/// A future that drives the server by [spawning](tokio::spawn) each [response -/// handler](super::InFlightRequest::execute) on tokio's default executor. Returned by -/// [`Channel::execute`](crate::server::Channel::execute). -#[must_use] -#[pin_project] -#[derive(Debug)] -pub struct TokioChannelExecutor { - #[pin] - inner: T, - serve: S, -} - -impl TokioServerExecutor { - fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut T> { - self.as_mut().project().inner - } -} - -impl TokioChannelExecutor { - fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut T> { - self.as_mut().project().inner - } -} - -// Send + 'static execution helper methods. - -impl Requests -where - C: Channel, - C::Req: Send + Debug + 'static, - C::Resp: Send + 'static, -{ - /// Executes all requests using the given service function. Requests are handled concurrently - /// by [spawning](::tokio::spawn) each handler on tokio's default executor. - pub fn execute(self, serve: S) -> TokioChannelExecutor - where - S: Serve + Send + 'static, - { - TokioChannelExecutor { inner: self, serve } - } -} - -impl Future for TokioServerExecutor -where - St: Sized + Stream, - C: Channel + Send + 'static, - C::Req: Send + Debug + 'static, - C::Resp: Send + 'static, - Se: Serve + Send + 'static + Clone, - Se::Fut: Send, -{ - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - while let Some(channel) = ready!(self.inner_pin_mut().poll_next(cx)) { - tokio::spawn(channel.execute(self.serve.clone())); - } - tracing::info!("Server shutting down."); - Poll::Ready(()) - } -} - -impl Future for TokioChannelExecutor, S> -where - C: Channel + 'static, - C::Req: Send + Debug + 'static, - C::Resp: Send + 'static, - S: Serve + Send + 'static + Clone, - S::Fut: Send, -{ - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - while let Some(response_handler) = ready!(self.inner_pin_mut().poll_next(cx)) { - match response_handler { - Ok(resp) => { - let server = self.serve.clone(); - tokio::spawn(async move { - resp.execute(server).await; - }); - } - Err(e) => { - tracing::warn!("Requests stream errored out: {}", e); - break; - } - } - } - Poll::Ready(()) - } -} diff --git a/datadog-ipc/tarpc/src/trace.rs b/datadog-ipc/tarpc/src/trace.rs deleted file mode 100644 index b3bc326ea4..0000000000 --- a/datadog-ipc/tarpc/src/trace.rs +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -#![deny(missing_docs, missing_debug_implementations)] - -//! Provides building blocks for tracing distributed programs. -//! -//! A trace is logically a tree of causally-related events called spans. Traces are tracked via a -//! [context](Context) that identifies the current trace, span, and parent of the current span. In -//! distributed systems, a context can be sent from client to server to connect events occurring on -//! either side. -//! -//! This crate's design is based on [opencensus -//! tracing](https://opencensus.io/core-concepts/tracing/). - -#[cfg(feature = "opentelemetry")] -use opentelemetry::trace::TraceContextExt; -use rand::Rng; -use std::{ - fmt::{self, Formatter}, - num::{NonZeroU128, NonZeroU64}, -}; -#[cfg(feature = "opentelemetry")] -use tracing_opentelemetry::OpenTelemetrySpanExt; - -/// A context for tracing the execution of processes, distributed or otherwise. -/// -/// Consists of a span identifying an event, an optional parent span identifying a causal event -/// that triggered the current span, and a trace with which all related spans are associated. -#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Copy)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct Context { - /// An identifier of the trace associated with the current context. A trace ID is typically - /// created at a root span and passed along through all causal events. - pub trace_id: TraceId, - /// An identifier of the current span. In typical RPC usage, a span is created by a client - /// before making an RPC, and the span ID is sent to the server. The server is free to create - /// its own spans, for which it sets the client's span as the parent span. - pub span_id: SpanId, - /// Indicates whether a sampler has already decided whether or not to sample the trace - /// associated with the Context. If `sampling_decision` is None, then a decision has not yet - /// been made. Downstream samplers do not need to abide by "no sample" decisions--for example, - /// an upstream client may choose to never sample, which may not make sense for the client's - /// dependencies. On the other hand, if an upstream process has chosen to sample this trace, - /// then the downstream samplers are expected to respect that decision and also sample the - /// trace. Otherwise, the full trace would not be able to be reconstructed. - pub sampling_decision: SamplingDecision, -} - -/// A 128-bit UUID identifying a trace. All spans caused by the same originating span share the -/// same trace ID. -#[derive(Default, PartialEq, Eq, Hash, Clone, Copy)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct TraceId(#[cfg_attr(feature = "serde1", serde(with = "u128_serde"))] u128); - -/// A 64-bit identifier of a span within a trace. The identifier is unique within the span's trace. -#[derive(Default, PartialEq, Eq, Hash, Clone, Copy)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -pub struct SpanId(u64); - -/// Indicates whether a sampler has decided whether or not to sample the trace associated with the -/// Context. Downstream samplers do not need to abide by "no sample" decisions--for example, an -/// upstream client may choose to never sample, which may not make sense for the client's -/// dependencies. On the other hand, if an upstream process has chosen to sample this trace, then -/// the downstream samplers are expected to respect that decision and also sample the trace. -/// Otherwise, the full trace would not be able to be reconstructed reliably. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] -#[repr(u8)] -pub enum SamplingDecision { - /// The associated span was sampled by its creating process. Child spans must also be sampled. - Sampled, - /// The associated span was not sampled by its creating process. - Unsampled, -} - -impl Context { - /// Constructs a new context with the trace ID and sampling decision inherited from the parent. - pub(crate) fn new_child(&self) -> Self { - Self { - trace_id: self.trace_id, - span_id: SpanId::random(&mut rand::thread_rng()), - sampling_decision: self.sampling_decision, - } - } -} - -impl TraceId { - /// Returns a random trace ID that can be assumed to be globally unique if `rng` generates - /// actually-random numbers. - pub fn random(rng: &mut R) -> Self { - TraceId(rng.gen::().get()) - } - - /// Returns true iff the trace ID is 0. - pub fn is_none(&self) -> bool { - self.0 == 0 - } -} - -impl SpanId { - /// Returns a random span ID that can be assumed to be unique within a single trace. - pub fn random(rng: &mut R) -> Self { - SpanId(rng.gen::().get()) - } - - /// Returns true iff the span ID is 0. - pub fn is_none(&self) -> bool { - self.0 == 0 - } -} - -impl From for u128 { - fn from(trace_id: TraceId) -> Self { - trace_id.0 - } -} - -impl From for TraceId { - fn from(trace_id: u128) -> Self { - Self(trace_id) - } -} - -impl From for u64 { - fn from(span_id: SpanId) -> Self { - span_id.0 - } -} - -impl From for SpanId { - fn from(span_id: u64) -> Self { - Self(span_id) - } -} - -#[cfg(feature = "opentelemetry")] -impl From for TraceId { - fn from(trace_id: opentelemetry::trace::TraceId) -> Self { - Self::from(u128::from_be_bytes(trace_id.to_bytes())) - } -} -#[cfg(feature = "opentelemetry")] -impl From for opentelemetry::trace::TraceId { - fn from(trace_id: TraceId) -> Self { - Self::from_bytes(u128::from(trace_id).to_be_bytes()) - } -} -#[cfg(feature = "opentelemetry")] -impl From for SpanId { - fn from(span_id: opentelemetry::trace::SpanId) -> Self { - Self::from(u64::from_be_bytes(span_id.to_bytes())) - } -} -#[cfg(feature = "opentelemetry")] -impl From for opentelemetry::trace::SpanId { - fn from(span_id: SpanId) -> Self { - Self::from_bytes(u64::from(span_id).to_be_bytes()) - } -} - -impl TryFrom<&tracing::Span> for Context { - type Error = NoActiveSpan; - #[cfg(feature = "opentelemetry")] - fn try_from(span: &tracing::Span) -> Result { - let context = span.context(); - if context.has_active_span() { - Ok(Self::from(context.span())) - } else { - Err(NoActiveSpan) - } - } - #[cfg(not(feature = "opentelemetry"))] - fn try_from(_span: &tracing::Span) -> Result { - Err(NoActiveSpan) - } -} - -#[cfg(feature = "opentelemetry")] -impl From> for Context { - fn from(span: opentelemetry::trace::SpanRef<'_>) -> Self { - let otel_ctx = span.span_context(); - Self { - trace_id: TraceId::from(otel_ctx.trace_id()), - span_id: SpanId::from(otel_ctx.span_id()), - sampling_decision: SamplingDecision::from(otel_ctx), - } - } -} -#[cfg(feature = "opentelemetry")] -impl From for opentelemetry::trace::TraceFlags { - fn from(decision: SamplingDecision) -> Self { - match decision { - SamplingDecision::Sampled => opentelemetry::trace::TraceFlags::SAMPLED, - SamplingDecision::Unsampled => opentelemetry::trace::TraceFlags::default(), - } - } -} -#[cfg(feature = "opentelemetry")] -impl From<&opentelemetry::trace::SpanContext> for SamplingDecision { - fn from(context: &opentelemetry::trace::SpanContext) -> Self { - if context.is_sampled() { - SamplingDecision::Sampled - } else { - SamplingDecision::Unsampled - } - } -} - -impl Default for SamplingDecision { - fn default() -> Self { - Self::Unsampled - } -} - -/// Returned when a [`Context`] cannot be constructed from a [`Span`](tracing::Span). -#[derive(Debug)] -pub struct NoActiveSpan; - -impl fmt::Display for TraceId { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - write!(f, "{:02x}", self.0)?; - Ok(()) - } -} - -impl fmt::Debug for TraceId { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - write!(f, "{:02x}", self.0)?; - Ok(()) - } -} - -impl fmt::Display for SpanId { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - write!(f, "{:02x}", self.0)?; - Ok(()) - } -} - -impl fmt::Debug for SpanId { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - write!(f, "{:02x}", self.0)?; - Ok(()) - } -} - -#[cfg(feature = "serde1")] -mod u128_serde { - pub fn serialize(u: &u128, serializer: S) -> Result - where - S: serde::Serializer, - { - serde::Serialize::serialize(&u.to_le_bytes(), serializer) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - Ok(u128::from_le_bytes(serde::Deserialize::deserialize( - deserializer, - )?)) - } -} diff --git a/datadog-ipc/tarpc/src/transport.rs b/datadog-ipc/tarpc/src/transport.rs deleted file mode 100644 index 7d47ab7c10..0000000000 --- a/datadog-ipc/tarpc/src/transport.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! Provides a [`Transport`](sealed::Transport) trait as well as implementations. -//! -//! The rpc crate is transport- and protocol-agnostic. Any transport that impls [`Transport`](sealed::Transport) -//! can be plugged in, using whatever protocol it wants. - -pub mod channel; - -pub(crate) mod sealed { - use futures::prelude::*; - use std::error::Error; - - /// A bidirectional stream ([`Sink`] + [`Stream`]) of messages. - pub trait Transport - where - Self: Stream>::Error>>, - Self: Sink>::TransportError>, - >::Error: Error, - { - /// Associated type where clauses are not elaborated; this associated type allows users - /// bounding types by Transport to avoid having to explicitly add `T::Error: Error` to their - /// bounds. - type TransportError: Error + Send + Sync + 'static; - } - - impl Transport for T - where - T: ?Sized, - T: Stream>, - T: Sink, - T::Error: Error + Send + Sync + 'static, - { - type TransportError = E; - } -} diff --git a/datadog-ipc/tarpc/src/transport/channel.rs b/datadog-ipc/tarpc/src/transport/channel.rs deleted file mode 100644 index 55d5fc506a..0000000000 --- a/datadog-ipc/tarpc/src/transport/channel.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -//! Transports backed by in-memory channels. - -use futures::{task::*, Sink, Stream}; -use pin_project::pin_project; -use std::{error::Error, pin::Pin}; -use tokio::sync::mpsc; - -/// Errors that occur in the sending or receiving of messages over a channel. -#[derive(thiserror::Error, Debug)] -pub enum ChannelError { - /// An error occurred sending over the channel. - #[error("an error occurred sending over the channel")] - Send(#[source] Box), -} - -/// Returns two unbounded channel peers. Each [`Stream`] yields items sent through the other's -/// [`Sink`]. -pub fn unbounded() -> ( - UnboundedChannel, - UnboundedChannel, -) { - let (tx1, rx2) = mpsc::unbounded_channel(); - let (tx2, rx1) = mpsc::unbounded_channel(); - ( - UnboundedChannel { tx: tx1, rx: rx1 }, - UnboundedChannel { tx: tx2, rx: rx2 }, - ) -} - -/// A bi-directional channel backed by an [`UnboundedSender`](mpsc::UnboundedSender) -/// and [`UnboundedReceiver`](mpsc::UnboundedReceiver). -#[derive(Debug)] -pub struct UnboundedChannel { - rx: mpsc::UnboundedReceiver, - tx: mpsc::UnboundedSender, -} - -impl Stream for UnboundedChannel { - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.rx.poll_recv(cx).map(|option| option.map(Ok)) - } -} - -const CLOSED_MESSAGE: &str = "the channel is closed and cannot accept new items for sending"; - -impl Sink for UnboundedChannel { - type Error = ChannelError; - - fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(if self.tx.is_closed() { - Err(ChannelError::Send(CLOSED_MESSAGE.into())) - } else { - Ok(()) - }) - } - - fn start_send(self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> { - self.tx - .send(item) - .map_err(|_| ChannelError::Send(CLOSED_MESSAGE.into())) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // UnboundedSender requires no flushing. - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // UnboundedSender can't initiate closure. - Poll::Ready(Ok(())) - } -} - -/// Returns two channel peers with buffer equal to `capacity`. Each [`Stream`] yields items sent -/// through the other's [`Sink`]. -pub fn bounded( - capacity: usize, -) -> (Channel, Channel) { - let (tx1, rx2) = futures::channel::mpsc::channel(capacity); - let (tx2, rx1) = futures::channel::mpsc::channel(capacity); - (Channel { tx: tx1, rx: rx1 }, Channel { tx: tx2, rx: rx2 }) -} - -/// A bi-directional channel backed by a [`Sender`](futures::channel::mpsc::Sender) -/// and [`Receiver`](futures::channel::mpsc::Receiver). -#[pin_project] -#[derive(Debug)] -pub struct Channel { - #[pin] - rx: futures::channel::mpsc::Receiver, - #[pin] - tx: futures::channel::mpsc::Sender, -} - -impl Stream for Channel { - type Item = Result; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.project().rx.poll_next(cx).map(|option| option.map(Ok)) - } -} - -impl Sink for Channel { - type Error = ChannelError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .tx - .poll_ready(cx) - .map_err(|e| ChannelError::Send(Box::new(e))) - } - - fn start_send(self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> { - self.project() - .tx - .start_send(item) - .map_err(|e| ChannelError::Send(Box::new(e))) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .tx - .poll_flush(cx) - .map_err(|e| ChannelError::Send(Box::new(e))) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .tx - .poll_close(cx) - .map_err(|e| ChannelError::Send(Box::new(e))) - } -} - -#[cfg(test)] -#[cfg(feature = "tokio1")] -mod tests { - use crate::{ - client, context, - server::{incoming::Incoming, BaseChannel}, - transport::{ - self, - channel::{Channel, UnboundedChannel}, - }, - }; - use assert_matches::assert_matches; - use futures::{prelude::*, stream}; - use std::io; - use tracing::trace; - - #[test] - fn ensure_is_transport() { - fn is_transport>() {} - is_transport::<(), (), UnboundedChannel<(), ()>>(); - is_transport::<(), (), Channel<(), ()>>(); - } - - #[tokio::test] - #[cfg_attr(miri, ignore)] - async fn integration() -> anyhow::Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - - let (client_channel, server_channel) = transport::channel::unbounded(); - tokio::spawn( - stream::once(future::ready(server_channel)) - .map(BaseChannel::with_defaults) - .execute(|_ctx, request: String| { - future::ready(request.parse::().map_err(|_| { - io::Error::new( - io::ErrorKind::InvalidInput, - format!("{request:?} is not an int"), - ) - })) - }), - ); - - let client = client::new(client::Config::default(), client_channel).spawn(); - - let response1 = client.call(context::current(), "", "123".into()).await?; - let response2 = client.call(context::current(), "", "abc".into()).await?; - - trace!("response1: {:?}, response2: {:?}", response1, response2); - - assert_matches!(response1, Ok(123)); - assert_matches!(response2, Err(ref e) if e.kind() == io::ErrorKind::InvalidInput); - - Ok(()) - } -} diff --git a/datadog-ipc/tarpc/src/util.rs b/datadog-ipc/tarpc/src/util.rs deleted file mode 100644 index ad04236a0d..0000000000 --- a/datadog-ipc/tarpc/src/util.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use std::{ - collections::HashMap, - hash::{BuildHasher, Hash}, - time::{Duration, SystemTime}, -}; - -#[cfg(feature = "serde1")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde1")))] -pub mod serde; - -/// Extension trait for [SystemTimes](SystemTime) in the future, i.e. deadlines. -pub trait TimeUntil { - /// How much time from now until this time is reached. - fn time_until(&self) -> Duration; -} - -impl TimeUntil for SystemTime { - fn time_until(&self) -> Duration { - self.duration_since(SystemTime::now()).unwrap_or_default() - } -} - -/// Collection compaction; configurable `shrink_to_fit`. -pub trait Compact { - /// Compacts space if the ratio of length : capacity is less than `usage_ratio_threshold`. - fn compact(&mut self, usage_ratio_threshold: f64); -} - -impl Compact for HashMap -where - K: Eq + Hash, - H: BuildHasher, -{ - fn compact(&mut self, usage_ratio_threshold: f64) { - let usage_ratio_threshold = usage_ratio_threshold.clamp(f64::MIN_POSITIVE, 1.); - let cap = f64::max(1000., self.len() as f64 / usage_ratio_threshold); - self.shrink_to(cap as usize); - } -} - -#[test] -fn test_compact() { - let mut map = HashMap::with_capacity(2048); - assert_eq!(map.capacity(), 3584); - - // Make usage ratio 25% - for i in 0..896 { - map.insert(format!("k{i}"), "v"); - } - - map.compact(-1.0); - assert_eq!(map.capacity(), 3584); - - map.compact(0.25); - assert_eq!(map.capacity(), 3584); - - map.compact(0.50); - assert_eq!(map.capacity(), 1792); - - map.compact(1.0); - assert_eq!(map.capacity(), 1792); - - map.compact(2.0); - assert_eq!(map.capacity(), 1792); -} diff --git a/datadog-ipc/tarpc/src/util/serde.rs b/datadog-ipc/tarpc/src/util/serde.rs deleted file mode 100644 index ca23fe0315..0000000000 --- a/datadog-ipc/tarpc/src/util/serde.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 Google LLC -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file or at -// https://opensource.org/licenses/MIT. - -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::io; - -/// Serializes [`io::ErrorKind`] as a `u32`. -#[allow(clippy::trivially_copy_pass_by_ref)] // Exact fn signature required by serde derive -pub fn serialize_io_error_kind_as_u32( - kind: &io::ErrorKind, - serializer: S, -) -> Result -where - S: Serializer, -{ - use std::io::ErrorKind::*; - match *kind { - NotFound => 0, - PermissionDenied => 1, - ConnectionRefused => 2, - ConnectionReset => 3, - ConnectionAborted => 4, - NotConnected => 5, - AddrInUse => 6, - AddrNotAvailable => 7, - BrokenPipe => 8, - AlreadyExists => 9, - WouldBlock => 10, - InvalidInput => 11, - InvalidData => 12, - TimedOut => 13, - WriteZero => 14, - Interrupted => 15, - Other => 16, - UnexpectedEof => 17, - _ => 16, - } - .serialize(serializer) -} - -/// Deserializes [`io::ErrorKind`] from a `u32`. -pub fn deserialize_io_error_kind_from_u32<'de, D>( - deserializer: D, -) -> Result -where - D: Deserializer<'de>, -{ - use std::io::ErrorKind::*; - Ok(match u32::deserialize(deserializer)? { - 0 => NotFound, - 1 => PermissionDenied, - 2 => ConnectionRefused, - 3 => ConnectionReset, - 4 => ConnectionAborted, - 5 => NotConnected, - 6 => AddrInUse, - 7 => AddrNotAvailable, - 8 => BrokenPipe, - 9 => AlreadyExists, - 10 => WouldBlock, - 11 => InvalidInput, - 12 => InvalidData, - 13 => TimedOut, - 14 => WriteZero, - 15 => Interrupted, - 16 => Other, - 17 => UnexpectedEof, - _ => Other, - }) -} diff --git a/datadog-ipc/tarpc/tests/compile_fail.rs b/datadog-ipc/tarpc/tests/compile_fail.rs deleted file mode 100644 index 23c8982f18..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail.rs +++ /dev/null @@ -1,9 +0,0 @@ -// #[test] -// fn ui() { -// let t = trybuild::TestCases::new(); -// t.compile_fail("tests/compile_fail/*.rs"); -// #[cfg(feature = "tokio1")] -// t.compile_fail("tests/compile_fail/tokio/*.rs"); -// #[cfg(all(feature = "serde-transport", feature = "tcp"))] -// t.compile_fail("tests/compile_fail/serde_transport/*.rs"); -// } diff --git a/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.rs b/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.rs deleted file mode 100644 index 2915d32377..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.rs +++ /dev/null @@ -1,15 +0,0 @@ -use tarpc::client; - -#[tarpc::service] -trait World { - async fn hello(name: String) -> String; -} - -fn main() { - let (client_transport, _) = tarpc::transport::channel::unbounded(); - - #[deny(unused_must_use)] - { - WorldClient::new(client::Config::default(), client_transport).dispatch; - } -} diff --git a/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.stderr b/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.stderr deleted file mode 100644 index 823ac5bfd9..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/must_use_request_dispatch.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: unused `RequestDispatch` that must be used - --> tests/compile_fail/must_use_request_dispatch.rs:13:9 - | -13 | WorldClient::new(client::Config::default(), client_transport).dispatch; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: the lint level is defined here - --> tests/compile_fail/must_use_request_dispatch.rs:11:12 - | -11 | #[deny(unused_must_use)] - | ^^^^^^^^^^^^^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.rs b/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.rs deleted file mode 100644 index 1f437b17d5..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.rs +++ /dev/null @@ -1,9 +0,0 @@ -use tarpc::serde_transport; -use tokio_serde::formats::Json; - -fn main() { - #[deny(unused_must_use)] - { - serde_transport::tcp::connect::<_, (), (), _, _>("0.0.0.0:0", Json::default); - } -} diff --git a/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.stderr b/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.stderr deleted file mode 100644 index b1be874ab1..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/serde_transport/must_use_tcp_connect.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: unused `tarpc::serde_transport::tcp::Connect` that must be used - --> tests/compile_fail/serde_transport/must_use_tcp_connect.rs:7:9 - | -7 | serde_transport::tcp::connect::<_, (), (), _, _>("0.0.0.0:0", Json::default); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: the lint level is defined here - --> tests/compile_fail/serde_transport/must_use_tcp_connect.rs:5:12 - | -5 | #[deny(unused_must_use)] - | ^^^^^^^^^^^^^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.rs b/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.rs deleted file mode 100644 index 99d858b6d8..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.rs +++ /dev/null @@ -1,15 +0,0 @@ -#[tarpc::service(derive_serde = false)] -trait World { - async fn hello(name: String) -> String; -} - -struct HelloServer; - -#[tarpc::server] -impl World for HelloServer { - fn hello(name: String) -> String { - format!("Hello, {name}!", name) - } -} - -fn main() {} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.stderr b/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.stderr deleted file mode 100644 index 28106e63f8..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_server_missing_async.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: not all trait items implemented, missing: `HelloFut` - --> $DIR/tarpc_server_missing_async.rs:9:1 - | -9 | impl World for HelloServer { - | ^^^^ - -error: hint: `#[tarpc::server]` only rewrites async fns, and `fn hello` is not async - --> $DIR/tarpc_server_missing_async.rs:10:5 - | -10 | fn hello(name: String) -> String { - | ^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.rs b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.rs deleted file mode 100644 index 800af364e7..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[tarpc::service] -trait World { - async fn pat((a, b): (u8, u32)); -} - -fn main() {} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr deleted file mode 100644 index 36c1e86128..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: patterns aren't allowed in RPC args - --> $DIR/tarpc_service_arg_pat.rs:3:18 - | -3 | async fn pat((a, b): (u8, u32)); - | ^^^^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.rs b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.rs deleted file mode 100644 index 5fbd6a09d6..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[tarpc::service] -trait World { - async fn new(); -} - -fn main() {} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.stderr b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.stderr deleted file mode 100644 index 52161ce3f8..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_new.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: method name conflicts with generated fn `WorldClient::new` - --> $DIR/tarpc_service_fn_new.rs:3:14 - | -3 | async fn new(); - | ^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.rs b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.rs deleted file mode 100644 index 11f996bdb2..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[tarpc::service] -trait World { - async fn serve(); -} - -fn main() {} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr b/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr deleted file mode 100644 index 705e0e75e9..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: method name conflicts with generated fn `World::serve` - --> $DIR/tarpc_service_fn_serve.rs:3:14 - | -3 | async fn serve(); - | ^^^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.rs b/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.rs deleted file mode 100644 index 6fc2f2bf3d..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.rs +++ /dev/null @@ -1,29 +0,0 @@ -use tarpc::{ - context, - server::{self, Channel}, -}; - -#[tarpc::service] -trait World { - async fn hello(name: String) -> String; -} - -#[derive(Clone)] -struct HelloServer; - -#[tarpc::server] -impl World for HelloServer { - async fn hello(self, _: context::Context, name: String) -> String { - format!("Hello, {name}!") - } -} - -fn main() { - let (_, server_transport) = tarpc::transport::channel::unbounded(); - let server = server::BaseChannel::with_defaults(server_transport); - - #[deny(unused_must_use)] - { - server.execute(HelloServer.serve()); - } -} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.stderr b/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.stderr deleted file mode 100644 index 5b5adf0c23..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_channel_executor.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: unused `TokioChannelExecutor` that must be used - --> tests/compile_fail/tokio/must_use_channel_executor.rs:27:9 - | -27 | server.execute(HelloServer.serve()); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: the lint level is defined here - --> tests/compile_fail/tokio/must_use_channel_executor.rs:25:12 - | -25 | #[deny(unused_must_use)] - | ^^^^^^^^^^^^^^^ diff --git a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.rs b/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.rs deleted file mode 100644 index 950cf74e61..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.rs +++ /dev/null @@ -1,30 +0,0 @@ -use futures::stream::once; -use tarpc::{ - context, - server::{self, incoming::Incoming}, -}; - -#[tarpc::service] -trait World { - async fn hello(name: String) -> String; -} - -#[derive(Clone)] -struct HelloServer; - -#[tarpc::server] -impl World for HelloServer { - async fn hello(self, _: context::Context, name: String) -> String { - format!("Hello, {name}!") - } -} - -fn main() { - let (_, server_transport) = tarpc::transport::channel::unbounded(); - let server = once(async move { server::BaseChannel::with_defaults(server_transport) }); - - #[deny(unused_must_use)] - { - server.execute(HelloServer.serve()); - } -} diff --git a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.stderr b/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.stderr deleted file mode 100644 index 57daf90638..0000000000 --- a/datadog-ipc/tarpc/tests/compile_fail/tokio/must_use_server_executor.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: unused `TokioServerExecutor` that must be used - --> tests/compile_fail/tokio/must_use_server_executor.rs:28:9 - | -28 | server.execute(HelloServer.serve()); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: the lint level is defined here - --> tests/compile_fail/tokio/must_use_server_executor.rs:26:12 - | -26 | #[deny(unused_must_use)] - | ^^^^^^^^^^^^^^^ diff --git a/datadog-ipc/tarpc/tests/dataservice.rs b/datadog-ipc/tarpc/tests/dataservice.rs deleted file mode 100644 index 365594bd45..0000000000 --- a/datadog-ipc/tarpc/tests/dataservice.rs +++ /dev/null @@ -1,55 +0,0 @@ -use futures::prelude::*; -use tarpc::serde_transport; -use tarpc::{ - client, context, - server::{incoming::Incoming, BaseChannel}, -}; -use tokio_serde::formats::Json; - -#[tarpc::derive_serde] -#[derive(Debug, PartialEq, Eq)] -pub enum TestData { - Black, - White, -} - -#[tarpc::service] -pub trait ColorProtocol { - async fn get_opposite_color(color: TestData) -> TestData; -} - -#[derive(Clone)] -struct ColorServer; - -#[tarpc::server] -impl ColorProtocol for ColorServer { - async fn get_opposite_color(self, _: context::Context, color: TestData) -> TestData { - match color { - TestData::White => TestData::Black, - TestData::Black => TestData::White, - } - } -} - -#[tokio::test] -async fn test_call() -> anyhow::Result<()> { - let transport = tarpc::serde_transport::tcp::listen("localhost:56797", Json::default).await?; - let addr = transport.local_addr(); - tokio::spawn( - transport - .take(1) - .filter_map(|r| async { r.ok() }) - .map(BaseChannel::with_defaults) - .execute(ColorServer.serve()), - ); - - let transport = serde_transport::tcp::connect(addr, Json::default).await?; - let client = ColorProtocolClient::new(client::Config::default(), transport).spawn(); - - let color = client - .get_opposite_color(context::current(), TestData::White) - .await?; - assert_eq!(color, TestData::Black); - - Ok(()) -} diff --git a/datadog-ipc/tarpc/tests/service_functional.rs b/datadog-ipc/tarpc/tests/service_functional.rs deleted file mode 100644 index 0504f45b27..0000000000 --- a/datadog-ipc/tarpc/tests/service_functional.rs +++ /dev/null @@ -1,280 +0,0 @@ -use assert_matches::assert_matches; -use futures::{ - future::{join_all, ready, Ready}, - prelude::*, -}; -use std::time::{Duration, SystemTime}; -use tarpc::{ - client::{self}, - context, - server::{self, incoming::Incoming, BaseChannel, Channel}, - transport::channel, -}; -use tokio::join; - -#[tarpc_plugins::service] -trait Service { - async fn add(x: i32, y: i32) -> i32; - async fn hey(name: String) -> String; -} - -#[derive(Clone)] -struct Server; - -impl Service for Server { - type AddFut = Ready; - - fn add(self, _: context::Context, x: i32, y: i32) -> Self::AddFut { - ready(x + y) - } - - type HeyFut = Ready; - - fn hey(self, _: context::Context, name: String) -> Self::HeyFut { - ready(format!("Hey, {name}.")) - } -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn sequential() -> anyhow::Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - - let (tx, rx) = channel::unbounded(); - - tokio::spawn( - BaseChannel::new(server::Config::default(), rx) - .requests() - .execute(Server.serve()), - ); - - let client = ServiceClient::new(client::Config::default(), tx).spawn(); - - assert_matches!(client.add(context::current(), 1, 2).await, Ok(3)); - assert_matches!( - client.hey(context::current(), "Tim".into()).await, - Ok(ref s) if s == "Hey, Tim."); - - Ok(()) -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn dropped_channel_aborts_in_flight_requests() -> anyhow::Result<()> { - #[tarpc_plugins::service] - trait Loop { - async fn r#loop(); - } - - #[derive(Clone)] - struct LoopServer; - - #[tarpc::server] - impl Loop for LoopServer { - async fn r#loop(self, _: context::Context) { - loop { - futures::pending!(); - } - } - } - - let _ = tracing_subscriber::fmt::try_init(); - - let (tx, rx) = channel::unbounded(); - - // Set up a client that initiates a long-lived request. - // The request will complete in error when the server drops the connection. - tokio::spawn(async move { - let client = LoopClient::new(client::Config::default(), tx).spawn(); - - let mut ctx = context::current(); - ctx.deadline = SystemTime::now() + Duration::from_secs(60 * 60); - let _ = client.r#loop(ctx).await; - }); - - let mut requests = BaseChannel::with_defaults(rx).requests(); - // Reading a request should trigger the request being registered with BaseChannel. - let first_request = requests.next().await.unwrap()?; - // Dropping the channel should trigger cleanup of outstanding requests. - drop(requests); - // In-flight requests should be aborted by channel cleanup. - // The first and only request sent by the client is `loop`, which is an infinite loop - // on the server side, so if cleanup was not triggered, this line should hang indefinitely. - first_request.execute(LoopServer.serve()).await; - - Ok(()) -} - -#[cfg(all(feature = "serde-transport", feature = "tcp"))] -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn serde_tcp() -> anyhow::Result<()> { - use tarpc::serde_transport; - use tokio_serde::formats::Json; - - let _ = tracing_subscriber::fmt::try_init(); - - let transport = tarpc::serde_transport::tcp::listen("localhost:56789", Json::default).await?; - let addr = transport.local_addr(); - tokio::spawn( - transport - .take(1) - .filter_map(|r| async { r.ok() }) - .map(BaseChannel::with_defaults) - .execute(Server.serve()), - ); - - let transport = serde_transport::tcp::connect(addr, Json::default).await?; - let client = ServiceClient::new(client::Config::default(), transport).spawn(); - - assert_matches!(client.add(context::current(), 1, 2).await, Ok(3)); - assert_matches!( - client.hey(context::current(), "Tim".to_string()).await, - Ok(ref s) if s == "Hey, Tim." - ); - - Ok(()) -} - -#[cfg(all(feature = "serde-transport", feature = "unix", unix))] -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn serde_uds() -> anyhow::Result<()> { - use tarpc::serde_transport; - use tokio_serde::formats::Json; - - let _ = tracing_subscriber::fmt::try_init(); - - let sock = tarpc::serde_transport::unix::TempPathBuf::with_random("uds"); - let transport = tarpc::serde_transport::unix::listen(&sock, Json::default).await?; - tokio::spawn( - transport - .take(1) - .filter_map(|r| async { r.ok() }) - .map(BaseChannel::with_defaults) - .execute(Server.serve()), - ); - - let transport = serde_transport::unix::connect(&sock, Json::default).await?; - let client = ServiceClient::new(client::Config::default(), transport).spawn(); - - // Save results using socket so we can clean the socket even if our test assertions fail - let res1 = client.add(context::current(), 1, 2).await; - let res2 = client.hey(context::current(), "Tim".to_string()).await; - - assert_matches!(res1, Ok(3)); - assert_matches!(res2, Ok(ref s) if s == "Hey, Tim."); - - Ok(()) -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn concurrent() -> anyhow::Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - - let (tx, rx) = channel::unbounded(); - tokio::spawn( - stream::once(ready(rx)) - .map(BaseChannel::with_defaults) - .execute(Server.serve()), - ); - - let client = ServiceClient::new(client::Config::default(), tx).spawn(); - - let req1 = client.add(context::current(), 1, 2); - let req2 = client.add(context::current(), 3, 4); - let req3 = client.hey(context::current(), "Tim".to_string()); - - assert_matches!(req1.await, Ok(3)); - assert_matches!(req2.await, Ok(7)); - assert_matches!(req3.await, Ok(ref s) if s == "Hey, Tim."); - - Ok(()) -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn concurrent_join() -> anyhow::Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - - let (tx, rx) = channel::unbounded(); - tokio::spawn( - stream::once(ready(rx)) - .map(BaseChannel::with_defaults) - .execute(Server.serve()), - ); - - let client = ServiceClient::new(client::Config::default(), tx).spawn(); - - let req1 = client.add(context::current(), 1, 2); - let req2 = client.add(context::current(), 3, 4); - let req3 = client.hey(context::current(), "Tim".to_string()); - - let (resp1, resp2, resp3) = join!(req1, req2, req3); - assert_matches!(resp1, Ok(3)); - assert_matches!(resp2, Ok(7)); - assert_matches!(resp3, Ok(ref s) if s == "Hey, Tim."); - - Ok(()) -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn concurrent_join_all() -> anyhow::Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - - let (tx, rx) = channel::unbounded(); - tokio::spawn( - stream::once(ready(rx)) - .map(BaseChannel::with_defaults) - .execute(Server.serve()), - ); - - let client = ServiceClient::new(client::Config::default(), tx).spawn(); - - let req1 = client.add(context::current(), 1, 2); - let req2 = client.add(context::current(), 3, 4); - - let responses = join_all(vec![req1, req2]).await; - assert_matches!(responses[0], Ok(3)); - assert_matches!(responses[1], Ok(7)); - - Ok(()) -} - -#[tokio::test] -#[cfg_attr(miri, ignore)] -async fn counter() -> anyhow::Result<()> { - #[tarpc::service] - trait Counter { - async fn count() -> u32; - } - - struct CountService(u32); - - impl Counter for &mut CountService { - type CountFut = futures::future::Ready; - - fn count(self, _: context::Context) -> Self::CountFut { - self.0 += 1; - futures::future::ready(self.0) - } - } - - let (tx, rx) = channel::unbounded(); - tokio::spawn(async { - let mut requests = BaseChannel::with_defaults(rx).requests(); - let mut counter = CountService(0); - - while let Some(Ok(request)) = requests.next().await { - request.execute(counter.serve()).await; - } - }); - - let client = CounterClient::new(client::Config::default(), tx).spawn(); - assert_matches!(client.count(context::current()).await, Ok(1)); - assert_matches!(client.count(context::current()).await, Ok(2)); - - Ok(()) -} diff --git a/datadog-ipc/tests/blocking_client.rs b/datadog-ipc/tests/blocking_client.rs index d42ae89dd1..c9a6005b9b 100644 --- a/datadog-ipc/tests/blocking_client.rs +++ b/datadog-ipc/tests/blocking_client.rs @@ -3,78 +3,43 @@ #![cfg(unix)] use std::{ io::Write, - os::unix::net::UnixStream, time::{Duration, Instant}, }; use tokio::runtime; -use datadog_ipc::example_interface::{ - ExampleInterfaceRequest, ExampleInterfaceResponse, ExampleServer, ExampleTransport, -}; -use datadog_ipc::platform::Channel; +use datadog_ipc::example_interface::{ExampleInterfaceChannel, ExampleServer}; +use datadog_ipc::SeqpacketConn; #[test] #[cfg_attr(miri, ignore)] fn test_blocking_client() { - let (sock_a, sock_b) = UnixStream::pair().unwrap(); + let (conn_server, conn_client) = SeqpacketConn::socketpair().unwrap(); + // Setup async server let rt = runtime::Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); - { - // drop guard at the end of the code - let _g = rt.enter(); - sock_a.set_nonblocking(true).unwrap(); - - let socket = Channel::from(sock_a); + rt.spawn({ let server = ExampleServer::default(); + async move { server.accept_connection(conn_server).await } + }); - rt.spawn(server.accept_connection(socket)); - } + let mut channel = ExampleInterfaceChannel::new(conn_client); - // Test blocking sync code - let mut transport = ExampleTransport::from(sock_b); - transport.set_nonblocking(true).unwrap(); // sending one-way messages should be instantaineous, even if the RPC worker is not fully up - transport.send(&ExampleInterfaceRequest::Ping {}).unwrap(); - transport.set_nonblocking(false).unwrap(); // write should still be quick, but we'll have to block waiting for RPC worker to come up + // Fire-and-forget ping (blocking variant that waits for ack) + channel.call_ping().unwrap(); - transport - .set_write_timeout(Some(Duration::from_millis(100))) - .unwrap(); - match transport - .call(&ExampleInterfaceRequest::TimeNow {}) - .unwrap() - { - ExampleInterfaceResponse::TimeNow(time) => { - assert!(Instant::now().elapsed().saturating_sub(time) < Duration::from_millis(10)); - } - _ => panic!("shouldn't happen"), - } + // Blocking call with response + let time = channel.call_time_now().unwrap(); + assert!(Instant::now().elapsed().saturating_sub(time) < Duration::from_millis(10)); - transport - .set_read_timeout(Some(Duration::from_millis(100))) - .unwrap(); // the RPC worker is up at this point - the read should be very quick - - match transport.call(&ExampleInterfaceRequest::ReqCnt {}).unwrap() { - ExampleInterfaceResponse::ReqCnt(cnt) => assert_eq!(2, cnt), - _ => panic!("shouldn't happen"), - } + // req_cnt should be 2 (ping + time_now) + assert_eq!(2, channel.call_req_cnt().unwrap()); + // Store a file via handle transfer let f = tempfile::tempfile().unwrap(); - transport - .call(&ExampleInterfaceRequest::StoreFile { file: f.into() }) - .unwrap(); - - let f = match transport - .call(&ExampleInterfaceRequest::RetrieveFile {}) - .unwrap() - { - ExampleInterfaceResponse::RetrieveFile(f) => f.unwrap(), - _ => panic!("shouldn't happen"), - }; - let mut f = f.into_instance().unwrap(); - writeln!(f, "test").unwrap(); // file should still be writeable + channel.try_send_store_file(f.into()); } diff --git a/datadog-sidecar-ffi/src/lib.rs b/datadog-sidecar-ffi/src/lib.rs index 2978dc953a..a3d1c5db07 100644 --- a/datadog-sidecar-ffi/src/lib.rs +++ b/datadog-sidecar-ffi/src/lib.rs @@ -581,8 +581,7 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( force_drop_size: usize, log_level: ffi::CharSlice, log_path: ffi::CharSlice, - #[allow(unused)] // On FFI layer we cannot conditionally compile, so we need the arg - remote_config_notify_function: *mut c_void, + _remote_config_notify_function: *mut c_void, remote_config_products: *const RemoteConfigProduct, remote_config_products_count: usize, remote_config_capabilities: *const RemoteConfigCapabilities, @@ -591,51 +590,59 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( is_fork: bool, process_tags: &libdd_common_ffi::Vec, ) -> MaybeError { + let session_id_str: String = session_id.to_utf8_lossy().into(); + let session_config = SessionConfig { + endpoint: agent_endpoint.clone(), + dogstatsd_endpoint: dogstatsd_endpoint.clone(), + language: language.to_utf8_lossy().into(), + language_version: language_version.to_utf8_lossy().into(), + tracer_version: tracer_version.to_utf8_lossy().into(), + flush_interval: Duration::from_millis(flush_interval_milliseconds as u64), + remote_config_poll_interval: Duration::from_millis( + remote_config_poll_interval_millis as u64 + ), + telemetry_heartbeat_interval: Duration::from_millis( + telemetry_heartbeat_interval_millis as u64 + ), + force_flush_size, + force_drop_size, + log_level: log_level.to_utf8_lossy().into(), + log_file: if log_path.is_empty() { + config::FromEnv::log_method() + } else { + LogMethod::File(String::from(log_path.to_utf8_lossy()).into()) + }, + remote_config_products: ffi::Slice::from_raw_parts( + remote_config_products, + remote_config_products_count + ) + .as_slice() + .to_vec(), + remote_config_capabilities: ffi::Slice::from_raw_parts( + remote_config_capabilities, + remote_config_capabilities_count + ) + .as_slice() + .to_vec(), + remote_config_enabled, + process_tags: process_tags.to_vec(), + }; #[cfg(unix)] - let remote_config_notify_target = libc::getpid(); + try_c!(blocking::set_session_config( + transport, + session_id_str, + &session_config, + is_fork, + )); #[cfg(windows)] - let remote_config_notify_target = remote_config_notify_function; try_c!(blocking::set_session_config( transport, - remote_config_notify_target, - session_id.to_utf8_lossy().into(), - &SessionConfig { - endpoint: agent_endpoint.clone(), - dogstatsd_endpoint: dogstatsd_endpoint.clone(), - language: language.to_utf8_lossy().into(), - language_version: language_version.to_utf8_lossy().into(), - tracer_version: tracer_version.to_utf8_lossy().into(), - flush_interval: Duration::from_millis(flush_interval_milliseconds as u64), - remote_config_poll_interval: Duration::from_millis( - remote_config_poll_interval_millis as u64 - ), - telemetry_heartbeat_interval: Duration::from_millis( - telemetry_heartbeat_interval_millis as u64 - ), - force_flush_size, - force_drop_size, - log_level: log_level.to_utf8_lossy().into(), - log_file: if log_path.is_empty() { - config::FromEnv::log_method() - } else { - LogMethod::File(String::from(log_path.to_utf8_lossy()).into()) - }, - remote_config_products: ffi::Slice::from_raw_parts( - remote_config_products, - remote_config_products_count - ) - .as_slice() - .to_vec(), - remote_config_capabilities: ffi::Slice::from_raw_parts( - remote_config_capabilities, - remote_config_capabilities_count - ) - .as_slice() - .to_vec(), - remote_config_enabled, - process_tags: process_tags.to_vec(), - }, - is_fork + session_id_str, + datadog_sidecar::service::remote_configs::RemoteConfigNotifyFunction( + _remote_config_notify_function, + ), + &session_config, + is_fork, )); MaybeError::None diff --git a/datadog-sidecar/Cargo.toml b/datadog-sidecar/Cargo.toml index e0194393af..e0837e8f37 100644 --- a/datadog-sidecar/Cargo.toml +++ b/datadog-sidecar/Cargo.toml @@ -95,7 +95,7 @@ sendfd = { version = "0.4", features = ["tokio"] } [target.'cfg(windows)'.dependencies] libdd-common-ffi = { path = "../libdd-common-ffi", default-features = false } libdd-crashtracker-ffi = { path = "../libdd-crashtracker-ffi", default-features = false, features = ["collector", "collector_windows"] } -winapi = { version = "0.3.9", features = ["securitybaseapi", "sddl"] } +winapi = { version = "0.3.9", features = ["securitybaseapi", "sddl", "winerror", "winbase"] } windows-sys = { version = "0.52.0", features = ["Win32_System_SystemInformation"] } [target.'cfg(windows_seh_wrapper)'.dependencies] diff --git a/datadog-sidecar/src/entry.rs b/datadog-sidecar/src/entry.rs index 218ec23252..9137c8826f 100644 --- a/datadog-sidecar/src/entry.rs +++ b/datadog-sidecar/src/entry.rs @@ -21,7 +21,6 @@ use tokio::sync::mpsc; use crate::crashtracker::crashtracker_unix_socket_path; use crate::service::blocking::SidecarTransport; use crate::service::SidecarServer; -use datadog_ipc::platform::AsyncChannel; use crate::setup::{self, IpcClient, IpcServer, Liaison}; @@ -120,7 +119,7 @@ where let server = server.clone(); let shutdown_complete_tx = shutdown_complete_tx.clone(); tokio::spawn(async move { - server.accept_connection(AsyncChannel::from(socket)).await; + server.accept_connection(socket).await; cloned_counter.fetch_add(-1, Ordering::AcqRel); tracing::info!("connection closed"); @@ -235,8 +234,9 @@ pub fn start_or_connect_to_sidecar(cfg: Config) -> anyhow::Result err.context("Error starting sidecar").err(), }; - Ok(liaison - .connect_to_server() - .map_err(|e| err.unwrap_or(e.into()))? - .into()) + Ok(SidecarTransport::from( + liaison + .connect_to_server() + .map_err(|e| err.unwrap_or(e.into()))?, + )) } diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 8ea858e8f5..55952f3c84 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -3,36 +3,32 @@ use super::{ DynamicInstrumentationConfigState, InstanceId, QueueId, SerializedTracerHeaderTags, - SessionConfig, SidecarAction, SidecarInterfaceRequest, SidecarInterfaceResponse, + SessionConfig, SidecarAction, }; -use datadog_ipc::platform::{Channel, FileBackedHandle, ShmHandle}; -use datadog_ipc::transport::blocking::BlockingTransport; +use crate::service::sender::SidecarSender; +use crate::service::sidecar_interface::{SidecarInterfaceChannel, SidecarInterfaceRequest}; +use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; +use datadog_ipc::SeqpacketConn; use datadog_live_debugger::debugger_defs::DebuggerPayload; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; -use libdd_common::MutexExt; use libdd_dogstatsd_client::DogStatsDActionOwned; use serde::Serialize; -use std::sync::{Mutex, MutexGuard}; +use std::sync::Mutex; use std::{ io, time::{Duration, Instant}, }; use tracing::{info, warn}; -/// `SidecarTransport` is a wrapper around a BlockingTransport struct from the `datadog_ipc` crate -/// that handles transparent reconnection. -/// It is used for sending `SidecarInterfaceRequest` and receiving `SidecarInterfaceResponse`. +/// `SidecarTransport` wraps a [`SidecarSender`] with transparent reconnection support. /// /// This transport is used for communication between different parts of the sidecar service. -/// It is a blocking transport, meaning that it will block the current thread until the operation is -/// complete. +/// It is a blocking transport (all operations block the current thread). pub struct SidecarTransport { - pub inner: Mutex>, - /// If the reconnect_fn is given, whenever a broken pipe is encountered, the connection will be + pub inner: Mutex, + /// If provided, whenever a connection error is encountered, the connection will be /// attempted to be re-established by calling this function. - /// Note that reconnecting only changes the transport (i.e. inner), but keeps the original - /// reconnect_fn. pub reconnect_fn: Option Option>>>, } @@ -47,7 +43,7 @@ impl SidecarTransport { }; #[allow(clippy::unwrap_used)] - if transport.is_closed() { + if transport.channel.0.is_closed() { info!("The sidecar transport is closed. Reconnecting..."); let new = match factory() { None => return, @@ -60,46 +56,56 @@ impl SidecarTransport { } } - pub fn set_read_timeout(&mut self, timeout: Option) -> io::Result<()> { - match self.inner.lock() { - Ok(mut t) => t.set_read_timeout(timeout), - Err(e) => Err(io::Error::other(e.to_string())), - } + pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { + lock_sender(self)?.set_read_timeout(d) } - pub fn set_write_timeout(&mut self, timeout: Option) -> io::Result<()> { - match self.inner.lock() { - Ok(mut t) => t.set_write_timeout(timeout), - Err(e) => Err(io::Error::other(e.to_string())), + pub fn set_write_timeout(&mut self, d: Option) -> io::Result<()> { + lock_sender(self)?.set_write_timeout(d) + } + + pub fn ensure_alive(&mut self) { + let closed = match self.inner.lock() { + Ok(guard) => guard.channel.0.is_closed(), + Err(_) => return, + }; + if closed { + if let Some(ref reconnect) = self.reconnect_fn { + warn!("The sidecar transport is closed. Reconnecting... This generally indicates a problem with the sidecar, most likely a crash. Check the logs / core dump locations and possibly report a bug."); + if let Some(n) = reconnect() { + if let Ok(mut guard) = self.inner.lock() { + if let Ok(new) = n.inner.into_inner() { + *guard = new; + } + } + } + } } } pub fn is_closed(&self) -> bool { match self.inner.lock() { - Ok(t) => t.is_closed(), + Ok(t) => t.channel.0.is_closed(), // Should happen only during the "reconnection" phase. During this phase the transport - // is always closed. + // is always considered closed. Err(_) => true, } } - fn with_retry< - F: Fn( - &mut MutexGuard>, - ) -> io::Result, - V, - >( - &mut self, - f: F, - ) -> io::Result { + fn with_retry(&mut self, f: F) -> io::Result + where + F: Fn(&mut SidecarSender) -> io::Result, + { let mut inner = match self.inner.lock() { Ok(t) => t, Err(e) => return Err(io::Error::other(e.to_string())), }; - match f(&mut inner) { + match f(&mut *inner) { Ok(ret) => Ok(ret), Err(e) => { - if e.kind() == io::ErrorKind::BrokenPipe { + if e.kind() == io::ErrorKind::BrokenPipe + || e.kind() == io::ErrorKind::ConnectionReset + { if let Some(ref reconnect) = self.reconnect_fn { warn!("The sidecar transport is closed. Reconnecting... This generally indicates a problem with the sidecar, most likely a crash. Check the logs / core dump locations and possibly report a bug."); *inner = match reconnect() { @@ -107,7 +113,7 @@ impl SidecarTransport { #[allow(clippy::unwrap_used)] Some(n) => n.inner.into_inner().unwrap(), }; - f(&mut inner) + f(&mut *inner) } else { Err(e) } @@ -118,191 +124,106 @@ impl SidecarTransport { } } - pub fn send(&mut self, item: SidecarInterfaceRequest) -> io::Result<()> { - self.with_retry(|t| t.send(&item)) - } - - pub fn call(&mut self, item: SidecarInterfaceRequest) -> io::Result { - self.with_retry(|t| t.call(&item)) - } - - pub fn call_noretry( - &mut self, - item: SidecarInterfaceRequest, - ) -> io::Result { - let mut inner = match self.inner.lock() { - Ok(t) => t, - Err(e) => return Err(io::Error::other(e.to_string())), - }; - inner.call(&item) - } - + /// Send garbage data (used in tests to verify error handling). pub fn send_garbage(&mut self) -> io::Result<()> { - self.inner.lock_or_panic().send_garbage() + match self.inner.lock() { + Ok(mut c) => c.channel.0.send_blocking(&mut vec![0xDE, 0xAD, 0xBE, 0xEF], &[]), + Err(e) => Err(io::Error::other(e.to_string())), + } } } -impl From for SidecarTransport { - fn from(c: Channel) -> Self { +impl From for SidecarTransport { + fn from(conn: SeqpacketConn) -> Self { SidecarTransport { - inner: Mutex::new(c.into()), + inner: Mutex::new(SidecarSender::new(SidecarInterfaceChannel::new(conn))), reconnect_fn: None, } } } +fn lock_sender(transport: &mut SidecarTransport) -> io::Result> { + transport.ensure_alive(); + transport.inner.lock().map_err(|e| io::Error::other(e.to_string())) +} + /// Shuts down a runtime. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn shutdown_runtime( transport: &mut SidecarTransport, instance_id: &InstanceId, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::ShutdownRuntime { - instance_id: instance_id.clone(), - }) + lock_sender(transport)?.shutdown_runtime(instance_id.clone()); + Ok(()) } /// Shuts down a session. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `session_id` - The ID of the session. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn shutdown_session(transport: &mut SidecarTransport, session_id: String) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::ShutdownSession { session_id }) + lock_sender(transport)?.shutdown_session(session_id); + Ok(()) } /// Enqueues a list of actions to be performed. /// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the action in the queue. -/// * `actions` - The action type being enqueued. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. +/// Uses `with_retry`: if the connection is broken the transport reconnects and the actions +/// are retried once on the new connection, so that telemetry/lifecycle events are not lost +/// when the sidecar crashes and restarts. pub fn enqueue_actions( transport: &mut SidecarTransport, instance_id: &InstanceId, queue_id: &QueueId, actions: Vec, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::EnqueueActions { + // Pre-serialize once so the Fn closure can borrow the bytes for both the initial + // attempt and the reconnect retry without needing SidecarAction: Clone. + let req = SidecarInterfaceRequest::EnqueueActions { instance_id: instance_id.clone(), queue_id: *queue_id, actions, - }) + }; + let data = datadog_ipc::codec::encode(req.discriminant(), &req); + transport.with_retry(|s| s.drain_and_send_raw_blocking(&data)) } /// Sets the configuration for a session. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `remote_config_notify_function` (windows): a function pointer to be invoked -/// * `pid` (unix): the pid of the remote process -/// * `session_id` - The ID of the session. -/// * `config` - The configuration to be set. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn set_session_config( transport: &mut SidecarTransport, - #[cfg(unix)] pid: libc::pid_t, - #[cfg(windows)] remote_config_notify_function: *mut libc::c_void, session_id: String, + #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: &SessionConfig, is_fork: bool, ) -> io::Result<()> { - #[cfg(unix)] - let remote_config_notify_target = pid; - #[cfg(windows)] - let remote_config_notify_target = - crate::service::remote_configs::RemoteConfigNotifyFunction(remote_config_notify_function); - transport.send(SidecarInterfaceRequest::SetSessionConfig { + lock_sender(transport)?.set_session_config( session_id, - remote_config_notify_target, - config: config.clone(), + #[cfg(windows)] + remote_config_notify_function, + config.clone(), is_fork, - }) + ); + Ok(()) } /// Updates the process tags for an existing session. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `session_id` - The ID of the session. -/// * `process_tags` - The process tags to set. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn set_session_process_tags( transport: &mut SidecarTransport, session_id: String, process_tags: Vec, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SetSessionProcessTags { - session_id, - process_tags, - }) + lock_sender(transport)?.set_session_process_tags(session_id, process_tags); + Ok(()) } /// Sends a trace as bytes. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `data` - The trace data serialized as bytes. -/// * `headers` - The serialized headers from the tracer. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn send_trace_v04_bytes( transport: &mut SidecarTransport, instance_id: &InstanceId, data: Vec, headers: SerializedTracerHeaderTags, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendTraceV04Bytes { - instance_id: instance_id.clone(), - data, - headers, - }) + lock_sender(transport)?.send_trace_v04_bytes(instance_id.clone(), data, headers); + Ok(()) } /// Sends a trace via shared memory. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `handle` - The handle to the shared memory. -/// * `len` - The size of the shared memory data. -/// * `headers` - The serialized headers from the tracer. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn send_trace_v04_shm( transport: &mut SidecarTransport, instance_id: &InstanceId, @@ -310,27 +231,11 @@ pub fn send_trace_v04_shm( len: usize, headers: SerializedTracerHeaderTags, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendTraceV04Shm { - instance_id: instance_id.clone(), - handle, - len, - headers, - }) + lock_sender(transport)?.send_trace_v04_shm(instance_id.clone(), handle, len, headers); + Ok(()) } /// Sends raw data from shared memory to the debugger endpoint. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the trace context. -/// * `handle` - The handle to the shared memory. -/// * `debugger_type` - Whether it's log or diagnostic data. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn send_debugger_data_shm( transport: &mut SidecarTransport, instance_id: &InstanceId, @@ -338,26 +243,11 @@ pub fn send_debugger_data_shm( handle: ShmHandle, debugger_type: DebuggerType, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendDebuggerDataShm { - instance_id: instance_id.clone(), - queue_id, - handle, - debugger_type, - }) + lock_sender(transport)?.send_debugger_data_shm(instance_id.clone(), queue_id, handle, debugger_type); + Ok(()) } -/// Sends a collection of debugger payloads to the debugger endpoint. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the trace context. -/// * `payloads` - The payloads to be sent -/// -/// # Returns -/// -/// An `anyhow::Result<()>` indicating the result of the operation. +/// Sends a collection of debugger payloads to the debugger endpoint via shared memory. pub fn send_debugger_data_shm_vec( transport: &mut SidecarTransport, instance_id: &InstanceId, @@ -399,65 +289,31 @@ pub fn send_debugger_data_shm_vec( } /// Submits debugger diagnostics. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the trace context. -/// * `handle` - The handle to the shared memory. -/// * `diagnostics_payload` - The diagnostics data to send. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn send_debugger_diagnostics( transport: &mut SidecarTransport, instance_id: &InstanceId, queue_id: QueueId, diagnostics_payload: DebuggerPayload, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendDebuggerDiagnostics { - instance_id: instance_id.clone(), + lock_sender(transport)?.send_debugger_diagnostics( + instance_id.clone(), queue_id, - diagnostics_payload: serde_json::to_vec(&diagnostics_payload)?, - }) + serde_json::to_vec(&diagnostics_payload)?, + ); + Ok(()) } /// Acquire an exception hash rate limiter -/// -/// # Arguments -/// * `exception_hash` - the ID -/// * `granularity` - how much time needs to pass between two exceptions pub fn acquire_exception_hash_rate_limiter( transport: &mut SidecarTransport, exception_hash: u64, granularity: Duration, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::AcquireExceptionHashRateLimiter { - exception_hash, - granularity, - }) + lock_sender(transport)?.acquire_exception_hash_rate_limiter(exception_hash, granularity); + Ok(()) } /// Sets the state of the current remote config operation. -/// The queue id is shared with telemetry and the associated data will be freed upon a -/// `Lifecycle::Stop` event. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the action in the queue. -/// * `service_name` - The name of the service. -/// * `env_name` - The name of the environment. -/// * `app_version` - The metadata of the runtime. -/// * `global_tags` - Global tags. -/// * `dynamic_instrumentation_state` - Whether dynamic instrumentation is enabled. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. #[allow(clippy::too_many_arguments)] pub fn set_universal_service_tags( transport: &mut SidecarTransport, @@ -469,146 +325,72 @@ pub fn set_universal_service_tags( global_tags: Vec, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SetUniversalServiceTags { - instance_id: instance_id.clone(), - queue_id: *queue_id, + lock_sender(transport)?.set_universal_service_tags( + instance_id.clone(), + *queue_id, service_name, env_name, app_version, global_tags, dynamic_instrumentation_state, - }) + ); + Ok(()) } /// Sets request state which do not directly affect the RC connection. -/// The queue id is shared with telemetry and the associated data will be freed upon a -/// `Lifecycle::Stop` event. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `queue_id` - The unique identifier for the action in the queue. -/// * `dynamic_instrumentation_state` - Whether dynamic instrumentation is enabled. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn set_request_config( transport: &mut SidecarTransport, instance_id: &InstanceId, queue_id: &QueueId, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SetRequestConfig { - instance_id: instance_id.clone(), - queue_id: *queue_id, + lock_sender(transport)?.set_request_config( + instance_id.clone(), + *queue_id, dynamic_instrumentation_state, - }) + ); + Ok(()) } /// Sends DogStatsD actions. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `actions` - The DogStatsD actions to send. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn send_dogstatsd_actions( transport: &mut SidecarTransport, instance_id: &InstanceId, actions: Vec, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendDogstatsdActions { - instance_id: instance_id.clone(), - actions, - }) + lock_sender(transport)?.send_dogstatsd_actions(instance_id.clone(), actions); + Ok(()) } /// Sets x-datadog-test-session-token on all requests for the given session. -/// -/// # Arguments -/// -/// * `session_id` - The ID of the session. -/// * `token` - The session token. -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn set_test_session_token( transport: &mut SidecarTransport, session_id: String, token: String, ) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SetTestSessionToken { session_id, token }) + lock_sender(transport)?.set_test_session_token(session_id, token); + Ok(()) } /// Dumps the current state of the service. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// -/// # Returns -/// -/// An `io::Result` representing the current state of the service. pub fn dump(transport: &mut SidecarTransport) -> io::Result { - let res = transport.call(SidecarInterfaceRequest::Dump {})?; - if let SidecarInterfaceResponse::Dump(dump) = res { - Ok(dump) - } else { - Ok(String::default()) - } + transport.with_retry(|s| s.dump().map_err(|e| io::Error::other(e.to_string()))) } /// Retrieves the current statistics of the service. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// -/// # Returns -/// -/// An `io::Result` representing the current statistics of the service. pub fn stats(transport: &mut SidecarTransport) -> io::Result { - let res = transport.call(SidecarInterfaceRequest::Stats {})?; - if let SidecarInterfaceResponse::Stats(stats) = res { - Ok(stats) - } else { - Ok(String::default()) - } + transport.with_retry(|s| s.stats().map_err(|e| io::Error::other(e.to_string()))) } /// Flushes the outstanding traces. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. pub fn flush_traces(transport: &mut SidecarTransport) -> io::Result<()> { - transport.call_noretry(SidecarInterfaceRequest::FlushTraces {})?; - Ok(()) + transport.with_retry(|s| s.flush_traces()) } /// Sends a ping to the service. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// -/// # Returns -/// -/// An `io::Result` representing the round-trip time of the ping. pub fn ping(transport: &mut SidecarTransport) -> io::Result { let start = Instant::now(); - transport.call(SidecarInterfaceRequest::Ping {})?; - + transport.with_retry(|s| s.ping())?; Ok(start.elapsed()) } @@ -616,71 +398,53 @@ pub fn ping(transport: &mut SidecarTransport) -> io::Result { #[cfg(unix)] mod tests { use crate::service::blocking::SidecarTransport; - use datadog_ipc::platform::Channel; - use std::net::Shutdown; - use std::os::unix::net::{UnixListener, UnixStream}; + use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use std::time::Duration; + use tempfile::tempdir; #[test] #[cfg_attr(miri, ignore)] fn test_reconnect() { - let bind_addr = "/tmp/test_reconnect.sock"; - let _ = std::fs::remove_file(bind_addr); + let tmpdir = tempdir().unwrap(); + let socket_path = tmpdir.path().join("test.sock"); - let listener = UnixListener::bind(bind_addr).expect("Cannot bind"); - let sock = UnixStream::connect_addr(&listener.local_addr().unwrap()).unwrap(); + let listener = SeqpacketListener::bind(&socket_path).expect("Cannot bind"); + let conn = SeqpacketConn::connect(&socket_path).unwrap(); + // Accept so the server holds liveness_read; dropping server_conn triggers POLLHUP. + let server_conn = listener.try_accept().expect("try_accept"); - let mut transport = SidecarTransport::from(Channel::from(sock.try_clone().unwrap())); + let mut transport = SidecarTransport::from(conn); assert!(!transport.is_closed()); - sock.shutdown(Shutdown::Both) - .expect("shutdown function failed"); + // Drop the accepted conn: closes liveness_read → POLLHUP on liveness_write. + drop(server_conn); + drop(listener); + // Force close detection by triggering an I/O operation. + let _ = transport.send_garbage(); assert!(transport.is_closed()); + let socket_path2 = socket_path.clone(); + let listener2 = SeqpacketListener::bind(&socket_path2).expect("Cannot rebind"); transport.reconnect(|| { - let new_sock = UnixStream::connect_addr(&listener.local_addr().unwrap()).unwrap(); - Some(Box::new(SidecarTransport::from(Channel::from(new_sock)))) + let new_conn = SeqpacketConn::connect(&socket_path2).ok()?; + Some(Box::new(SidecarTransport::from(new_conn))) }); assert!(!transport.is_closed()); - - let _ = std::fs::remove_file(bind_addr); + drop(listener2); } #[test] #[cfg_attr(miri, ignore)] - fn test_set_timeout() { - let bind_addr = "/tmp/test_set_timeout.sock"; - let _ = std::fs::remove_file(bind_addr); - - let listener = UnixListener::bind(bind_addr).expect("Cannot bind"); - let sock = UnixStream::connect_addr(&listener.local_addr().unwrap()).unwrap(); - - let mut transport = SidecarTransport::from(Channel::from(sock.try_clone().unwrap())); - assert_eq!( - Duration::default(), - sock.read_timeout().unwrap().unwrap_or_default() - ); - assert_eq!( - Duration::default(), - sock.write_timeout().unwrap().unwrap_or_default() - ); - - transport - .set_read_timeout(Some(Duration::from_millis(200))) - .expect("set_read_timeout function failed"); - transport - .set_write_timeout(Some(Duration::from_millis(300))) - .expect("set_write_timeout function failed"); - - assert_eq!( - Duration::from_millis(200), - sock.read_timeout().unwrap().unwrap_or_default() - ); - assert_eq!( - Duration::from_millis(300), - sock.write_timeout().unwrap().unwrap_or_default() - ); - - let _ = std::fs::remove_file(bind_addr); + fn test_connection_basic() { + let tmpdir = tempdir().unwrap(); + let socket_path = tmpdir.path().join("test_basic.sock"); + + let listener = SeqpacketListener::bind(&socket_path).expect("Cannot bind"); + let conn = SeqpacketConn::connect(&socket_path).unwrap(); + + let transport = SidecarTransport::from(conn); + assert!(!transport.is_closed()); + drop(transport); + drop(listener); } } diff --git a/datadog-sidecar/src/service/mod.rs b/datadog-sidecar/src/service/mod.rs index b5e3ddb081..133c019fab 100644 --- a/datadog-sidecar/src/service/mod.rs +++ b/datadog-sidecar/src/service/mod.rs @@ -19,12 +19,11 @@ pub use runtime_metadata::RuntimeMetadata; pub use serialized_tracer_header_tags::SerializedTracerHeaderTags; // public to crate types we want to bring up to top level of service:: scope -pub(crate) use request_identification::{RequestIdentification, RequestIdentifier}; pub(crate) use sidecar_server::SidecarServer; use runtime_info::RuntimeInfo; use session_info::SessionInfo; -use sidecar_interface::{SidecarInterface, SidecarInterfaceRequest, SidecarInterfaceResponse}; +pub(crate) use sidecar_interface::SidecarInterface; pub mod agent_info; pub mod blocking; @@ -33,12 +32,12 @@ pub mod exception_hash_rate_limiter; mod instance_id; mod queue_id; mod remote_configs; -mod request_identification; mod runtime_info; mod runtime_metadata; mod serialized_tracer_header_tags; mod session_info; -mod sidecar_interface; +pub mod sender; +pub mod sidecar_interface; pub(crate) mod sidecar_server; pub mod telemetry; pub(crate) mod tracing; diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs new file mode 100644 index 0000000000..035b2b4d06 --- /dev/null +++ b/datadog-sidecar/src/service/sender.rs @@ -0,0 +1,419 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +//! Higher-level sender with priority outbox and telemetry load-shedding. +//! +//! Wraps [`SidecarInterfaceChannel`] with: +//! - A **priority outbox** for state-change messages: coalesced and drained before +//! fire-and-forget sends. +//! - **Telemetry load-shedding**: when `outstanding > max_outstanding / 2`, 90% of +//! `EnqueueActions` calls are dropped (telemetry is low priority). +//! +//! `SidecarSender` takes `&mut self`; the caller is responsible for exclusive access. + +use crate::service::{ + sidecar_interface::{DynamicInstrumentationConfigState, SidecarInterfaceChannel, SidecarInterfaceRequest}, + InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, +}; +use datadog_ipc::platform::ShmHandle; +use datadog_live_debugger::sender::DebuggerType; +use libdd_common::tag::Tag; +use libdd_dogstatsd_client::DogStatsDActionOwned; +use std::{io, time::Duration}; + +// --------------------------------------------------------------------------- +// Outbox +// --------------------------------------------------------------------------- + +/// Priority outbox for state-change (coalesced) messages. +/// +/// Each slot holds the most recent pending message of its kind. +/// Slots are drained in field order (priority order) before fire-and-forget sends. +#[derive(Default)] +struct SidecarOutbox { + set_session_config: Option, + set_session_process_tags: Option, + set_universal_service_tags: Option, + set_request_config: Option, + shutdown_runtime: Option, + shutdown_session: Option, +} + +impl SidecarOutbox { + fn slots_mut(&mut self) -> [&mut Option; 6] { + [ + &mut self.set_session_config, + &mut self.set_session_process_tags, + &mut self.set_universal_service_tags, + &mut self.set_request_config, + &mut self.shutdown_runtime, + &mut self.shutdown_session, + ] + } +} + +// --------------------------------------------------------------------------- +// Outbox coalescing helpers +// --------------------------------------------------------------------------- + +fn cancel_if_instance(slot: &mut Option, instance_id: &InstanceId) { + let should_cancel = match slot { + Some(SidecarInterfaceRequest::SetUniversalServiceTags { instance_id: id, .. }) => { + id == instance_id + } + Some(SidecarInterfaceRequest::SetRequestConfig { instance_id: id, .. }) => { + id == instance_id + } + _ => false, + }; + if should_cancel { + *slot = None; + } +} + +fn cancel_if_session(slot: &mut Option, session_id: &str) { + let should_cancel = match slot { + Some(SidecarInterfaceRequest::SetSessionConfig { session_id: id, .. }) => { + id.as_str() == session_id + } + _ => false, + }; + if should_cancel { + *slot = None; + } +} + +fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { + // For messages that trigger cancellations, do the cancellation first using a + // borrow, then move `incoming` into the slot. + if let SidecarInterfaceRequest::ShutdownRuntime { ref instance_id } = incoming { + let id = instance_id.clone(); + cancel_if_instance(&mut outbox.set_request_config, &id); + cancel_if_instance(&mut outbox.set_universal_service_tags, &id); + } + if let SidecarInterfaceRequest::ShutdownSession { ref session_id } = incoming { + let id = session_id.clone(); + cancel_if_session(&mut outbox.set_session_config, &id); + } + + match incoming { + SidecarInterfaceRequest::SetSessionConfig { .. } => { + outbox.set_session_config = Some(incoming); + } + SidecarInterfaceRequest::SetSessionProcessTags { .. } => { + outbox.set_session_process_tags = Some(incoming); + } + SidecarInterfaceRequest::SetUniversalServiceTags { .. } => { + outbox.set_universal_service_tags = Some(incoming); + } + SidecarInterfaceRequest::SetRequestConfig { .. } => { + outbox.set_request_config = Some(incoming); + } + SidecarInterfaceRequest::ShutdownRuntime { .. } => { + outbox.shutdown_runtime = Some(incoming); + } + SidecarInterfaceRequest::ShutdownSession { .. } => { + outbox.shutdown_session = Some(incoming); + } + _ => { + // Non-outbox messages should not be routed here. + } + } +} + +// --------------------------------------------------------------------------- +// SidecarSender +// --------------------------------------------------------------------------- + +/// Higher-level IPC sender with outbox coalescing and telemetry load-shedding. +/// +/// Takes `&mut self` — callers are responsible for exclusive access. +pub struct SidecarSender { + pub channel: SidecarInterfaceChannel, + outbox: SidecarOutbox, + /// Cycles 0–9; used to implement 90% telemetry drop under backpressure. + enqueue_actions_counter: u8, +} + +impl SidecarSender { + pub fn new(channel: SidecarInterfaceChannel) -> Self { + Self { + channel, + outbox: SidecarOutbox::default(), + enqueue_actions_counter: 0, + } + } + + /// Non-blocking drain of the outbox. Returns `true` if all messages were sent. + fn try_drain_outbox(&mut self) -> bool { + self.channel.0.drain_acks(); + for slot in self.outbox.slots_mut() { + if let Some(msg) = slot { + if self.channel.0.outstanding() >= self.channel.0.max_outstanding { + return false; + } + if !self.channel.try_send_request(msg) { + return false; + } + *slot = None; + } + } + true + } + + /// Blocking drain of the outbox (used before blocking calls). + fn drain_outbox_blocking(&mut self) { + self.channel.0.drain_acks(); + for slot in self.outbox.slots_mut() { + if let Some(msg) = slot.take() { + self.channel.send_request_blocking(&msg).ok(); + } + } + } + + /// Drain outbox blocking, then send pre-serialized bytes blocking (no fds). + /// + /// Returns `Err(BrokenPipe)` (or another I/O error) when the connection is broken, + /// allowing callers to detect failure and trigger reconnect via `SidecarTransport::with_retry`. + /// Only suitable for requests that transfer no file descriptors (e.g. `enqueue_actions`). + pub fn drain_and_send_raw_blocking(&mut self, data: &[u8]) -> io::Result<()> { + self.drain_outbox_blocking(); + self.channel.0.send_blocking(&mut data.to_vec(), &[]) + } + + // ------------------------------------------------------------------------- + // Outbox-coalesced state-change methods + // ------------------------------------------------------------------------- + + pub fn set_session_config( + &mut self, + session_id: String, + #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, + config: SessionConfig, + is_fork: bool, + ) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::SetSessionConfig { + session_id, + #[cfg(windows)] remote_config_notify_function, + config, + is_fork, + }, + ); + self.try_drain_outbox(); + } + + pub fn set_session_process_tags(&mut self, session_id: String, process_tags: String) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::SetSessionProcessTags { + session_id, + process_tags, + }, + ); + self.try_drain_outbox(); + } + + pub fn set_universal_service_tags( + &mut self, + instance_id: InstanceId, + queue_id: QueueId, + service_name: String, + env_name: String, + app_version: String, + global_tags: Vec, + dynamic_instrumentation_state: DynamicInstrumentationConfigState, + ) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::SetUniversalServiceTags { + instance_id, + queue_id, + service_name, + env_name, + app_version, + global_tags, + dynamic_instrumentation_state, + }, + ); + self.try_drain_outbox(); + } + + pub fn set_request_config( + &mut self, + instance_id: InstanceId, + queue_id: QueueId, + dynamic_instrumentation_state: DynamicInstrumentationConfigState, + ) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::SetRequestConfig { + instance_id, + queue_id, + dynamic_instrumentation_state, + }, + ); + self.try_drain_outbox(); + } + + pub fn shutdown_runtime(&mut self, instance_id: InstanceId) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::ShutdownRuntime { instance_id }, + ); + self.try_drain_outbox(); + } + + pub fn shutdown_session(&mut self, session_id: String) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::ShutdownSession { session_id }, + ); + self.try_drain_outbox(); + } + + // ------------------------------------------------------------------------- + // Fire-and-forget methods (drain outbox first, then send; drop on EAGAIN) + // ------------------------------------------------------------------------- + + /// Enqueue telemetry actions. + /// + /// When `outstanding > max_outstanding / 2`, 90% of calls are dropped to shed load. + pub fn enqueue_actions( + &mut self, + instance_id: InstanceId, + queue_id: QueueId, + actions: Vec, + ) { + if !self.try_drain_outbox() { + return; + } + // Load-shed: drop 90% when buffer is more than half full. + if self.channel.0.outstanding() > self.channel.0.max_outstanding / 2 { + self.enqueue_actions_counter = self.enqueue_actions_counter.wrapping_add(1) % 10; + if self.enqueue_actions_counter != 0 { + return; + } + // The 1-in-10 that passes through falls to the try_send below. + } + self.channel.try_send_enqueue_actions(instance_id, queue_id, actions); + } + + pub fn send_trace_v04_shm( + &mut self, + instance_id: InstanceId, + handle: ShmHandle, + len: usize, + headers: SerializedTracerHeaderTags, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_send_trace_v04_shm(instance_id, handle, len, headers); + } + + pub fn send_trace_v04_bytes( + &mut self, + instance_id: InstanceId, + data: Vec, + headers: SerializedTracerHeaderTags, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_send_trace_v04_bytes(instance_id, data, headers); + } + + pub fn send_debugger_data_shm( + &mut self, + instance_id: InstanceId, + queue_id: QueueId, + handle: ShmHandle, + debugger_type: DebuggerType, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_send_debugger_data_shm(instance_id, queue_id, handle, debugger_type); + } + + pub fn send_debugger_diagnostics( + &mut self, + instance_id: InstanceId, + queue_id: QueueId, + diagnostics_payload: Vec, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_send_debugger_diagnostics(instance_id, queue_id, diagnostics_payload); + } + + pub fn acquire_exception_hash_rate_limiter( + &mut self, + exception_hash: u64, + granularity: Duration, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_acquire_exception_hash_rate_limiter(exception_hash, granularity); + } + + pub fn send_dogstatsd_actions( + &mut self, + instance_id: InstanceId, + actions: Vec, + ) { + if !self.try_drain_outbox() { + return; + } + self.channel + .try_send_send_dogstatsd_actions(instance_id, actions); + } + + pub fn set_test_session_token(&mut self, session_id: String, token: String) { + if !self.try_drain_outbox() { + return; + } + self.channel.try_send_set_test_session_token(session_id, token); + } + + pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { + self.channel.0.set_read_timeout(d) + } + + pub fn set_write_timeout(&mut self, d: Option) -> io::Result<()> { + self.channel.0.set_write_timeout(d) + } + + // ------------------------------------------------------------------------- + // Blocking methods (drain outbox blocking first, then call) + // ------------------------------------------------------------------------- + + pub fn flush_traces(&mut self) -> io::Result<()> { + self.drain_outbox_blocking(); + self.channel.call_flush_traces() + } + + pub fn ping(&mut self) -> io::Result<()> { + self.drain_outbox_blocking(); + self.channel.call_ping() + } + + pub fn dump(&mut self) -> Result { + self.drain_outbox_blocking(); + self.channel.call_dump() + } + + pub fn stats(&mut self) -> Result { + self.drain_outbox_blocking(); + self.channel.call_stats() + } +} diff --git a/datadog-sidecar/src/service/session_info.rs b/datadog-sidecar/src/service/session_info.rs index 34495cb67a..0118257cac 100644 --- a/datadog-sidecar/src/service/session_info.rs +++ b/datadog-sidecar/src/service/session_info.rs @@ -15,8 +15,7 @@ use crate::{spawn_map_err, tracer}; use datadog_live_debugger::sender::{DebuggerType, PayloadSender}; use datadog_remote_config::fetch::ConfigOptions; use libdd_common::{tag::Tag, MutexExt}; -use tracing::log::warn; -use tracing::{debug, error, info, trace}; +use tracing::{debug, error, info, trace, warn}; use crate::service::agent_info::AgentInfoGuard; use crate::service::{InstanceId, QueueId, RuntimeInfo}; @@ -38,6 +37,9 @@ pub(crate) struct SessionInfo { #[cfg(windows)] pub(crate) remote_config_notify_function: Arc>, + #[cfg(windows)] + pub(crate) process_handle: + Arc>>, pub(crate) log_guard: Arc, MultiWriterGuard<'static>)>>>, pub(crate) session_id: String, @@ -59,6 +61,8 @@ impl Clone for SessionInfo { remote_config_interval: self.remote_config_interval.clone(), #[cfg(windows)] remote_config_notify_function: self.remote_config_notify_function.clone(), + #[cfg(windows)] + process_handle: self.process_handle.clone(), log_guard: self.log_guard.clone(), session_id: self.session_id.clone(), pid: self.pid.clone(), diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index eeddcde5e9..a3c6719d19 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -3,27 +3,14 @@ #![allow(clippy::too_many_arguments)] -use crate::service::{ - InstanceId, QueueId, RequestIdentification, RequestIdentifier, SerializedTracerHeaderTags, - SessionConfig, SidecarAction, -}; -use anyhow::Result; +use crate::service::{InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction}; use datadog_ipc::platform::ShmHandle; -use datadog_ipc::tarpc; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; use libdd_dogstatsd_client::DogStatsDActionOwned; use serde::{Deserialize, Serialize}; use std::time::Duration; -// This is a bit weird, but depending on the OS we're interested in different things... -// and the macro expansion is not going to be happy with #[cfg()] instructions inside them. -// So we'll just define a type, a pid on unix, a function pointer on windows. -#[cfg(unix)] -type RemoteConfigNotifyTarget = libc::pid_t; -#[cfg(windows)] -type RemoteConfigNotifyTarget = crate::service::remote_configs::RemoteConfigNotifyFunction; - #[repr(C)] #[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] pub enum DynamicInstrumentationConfigState { @@ -36,9 +23,7 @@ pub enum DynamicInstrumentationConfigState { /// /// These methods include operations such as enqueueing actions, registering services, setting /// session configurations, and sending traces. -#[datadog_sidecar_macros::extract_request_id] -#[datadog_ipc_macros::impl_transfer_handles] -#[tarpc::service] +#[datadog_ipc_macros::service] pub trait SidecarInterface { /// Enqueues a list of actions to be performed. /// @@ -60,10 +45,9 @@ pub trait SidecarInterface { /// * `session_id` - The ID of the session. /// * `pid` - The pid of the sidecar client. /// * `config` - The configuration to be set. - #[force_backpressure] async fn set_session_config( session_id: String, - remote_config_notify_target: RemoteConfigNotifyTarget, + #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: SessionConfig, is_fork: bool, ); @@ -74,7 +58,6 @@ pub trait SidecarInterface { /// /// * `session_id` - The ID of the session. /// * `process_tags` - The process tags. - #[force_backpressure] async fn set_session_process_tags(session_id: String, process_tags: Vec); /// Shuts down a runtime. @@ -82,7 +65,6 @@ pub trait SidecarInterface { /// # Arguments /// /// * `instance_id` - The ID of the instance. - #[force_backpressure] async fn shutdown_runtime(instance_id: InstanceId); /// Shuts down a session. @@ -90,7 +72,6 @@ pub trait SidecarInterface { /// # Arguments /// /// * `session_id` - The ID of the session. - #[force_backpressure] async fn shutdown_session(session_id: String); /// Sends a trace via shared memory. @@ -169,7 +150,6 @@ pub trait SidecarInterface { /// * `global_tags` - Global tags which need to be propagated. /// * `dynamic_instrumentation_state` - Whether dynamic instrumentation is enabled, disabled or /// not set. - #[force_backpressure] async fn set_universal_service_tags( instance_id: InstanceId, queue_id: QueueId, @@ -187,7 +167,6 @@ pub trait SidecarInterface { /// * `queue_id` - The unique identifier for the trace context. /// * `dynamic_instrumentation_state` - Whether dynamic instrumentation is enabled, disabled or /// not set. - #[force_backpressure] async fn set_request_config( instance_id: InstanceId, queue_id: QueueId, @@ -203,7 +182,7 @@ pub trait SidecarInterface { async fn send_dogstatsd_actions(instance_id: InstanceId, actions: Vec); /// Flushes any outstanding traces queued for sending. - #[force_backpressure] + #[blocking] async fn flush_traces(); /// Sets x-datadog-test-session-token on all requests for the given session. @@ -215,7 +194,7 @@ pub trait SidecarInterface { async fn set_test_session_token(session_id: String, token: String); /// Sends a ping to the service. - #[force_backpressure] + #[blocking] async fn ping(); /// Dumps the current state of the service. @@ -223,7 +202,6 @@ pub trait SidecarInterface { /// # Returns /// /// A string representation of the current state of the service. - #[force_backpressure] async fn dump() -> String; /// Retrieves the current statistics of the service. @@ -231,6 +209,5 @@ pub trait SidecarInterface { /// # Returns /// /// A string representation of the current statistics of the service. - #[force_backpressure] async fn stats() -> String; } diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index d3655f6bcb..01da311573 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -3,20 +3,14 @@ use crate::log::{TemporarilyRetainedMapStats, MULTI_LOG_FILTER, MULTI_LOG_WRITER}; use crate::service::{ - sidecar_interface::ServeSidecarInterface, + sidecar_interface::serve_sidecar_interface_connection, telemetry::{TelemetryCachedClient, TelemetryCachedClientSet}, tracing::TraceFlusher, - DynamicInstrumentationConfigState, InstanceId, QueueId, RequestIdentification, - RequestIdentifier, RuntimeInfo, RuntimeMetadata, SerializedTracerHeaderTags, SessionConfig, - SessionInfo, SidecarAction, SidecarInterface, SidecarInterfaceRequest, - SidecarInterfaceResponse, + DynamicInstrumentationConfigState, InstanceId, QueueId, RuntimeInfo, RuntimeMetadata, + SerializedTracerHeaderTags, SessionConfig, SessionInfo, SidecarAction, SidecarInterface, }; -use datadog_ipc::platform::{AsyncChannel, ShmHandle}; -use datadog_ipc::tarpc; -use datadog_ipc::tarpc::context::Context; -use datadog_ipc::transport::Transport; -use futures::future; -use futures::future::Ready; +use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; +use datadog_ipc::{PeerCredentials, SeqpacketConn}; use libdd_common::{Endpoint, MutexExt}; use libdd_telemetry::worker::{LifecycleAction, TelemetryActions, TelemetryWorkerStats}; use libdd_trace_utils::trace_utils::SendData; @@ -25,16 +19,13 @@ use libdd_trace_utils::tracer_payload::TraceEncoding; use manual_future::ManualFutureCompleter; use std::borrow::Cow; use std::collections::hash_map::Entry; -use std::collections::{HashMap, HashSet}; -use std::pin::Pin; +use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::{Duration, SystemTime}; use tracing::{debug, error, info, trace, warn}; -use futures::FutureExt; use serde::{Deserialize, Serialize}; -use tokio::task::{JoinError, JoinHandle}; use crate::config::get_product_endpoint; use crate::service::agent_info::AgentInfos; @@ -45,8 +36,6 @@ use crate::service::exception_hash_rate_limiter::EXCEPTION_HASH_LIMITER; use crate::service::remote_configs::{RemoteConfigNotifyTarget, RemoteConfigs}; use crate::service::tracing::trace_flusher::TraceFlusherStats; use crate::tokio_util::run_or_spawn_shared; -use datadog_ipc::platform::FileBackedHandle; -use datadog_ipc::tarpc::server::{Channel, InFlightRequest}; use datadog_live_debugger::sender::{agent_info_supports_debugger_v2_endpoint, DebuggerType}; use datadog_remote_config::fetch::{ConfigInvariants, ConfigOptions, MultiTargetStats}; use libdd_common::tag::Tag; @@ -55,12 +44,44 @@ use libdd_telemetry::config::Config; use libdd_tinybytes as tinybytes; use libdd_trace_utils::tracer_header_tags::TracerHeaderTags; -type NoResponse = Ready<()>; +/// A Windows process handle used for remote config notification. +/// +/// Wraps a raw `HANDLE` value (from `OpenProcess`). The handle is intentionally not +/// closed on drop — it is valid for the lifetime of the session. +#[cfg(windows)] +#[derive(Clone)] +pub struct ProcessHandle(pub winapi::um::winnt::HANDLE); + +#[cfg(windows)] +impl std::fmt::Debug for ProcessHandle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ProcessHandle({:p})", self.0) + } +} + +#[cfg(windows)] +impl PartialEq for ProcessHandle { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +#[cfg(windows)] +impl Eq for ProcessHandle {} -fn no_response() -> NoResponse { - future::ready(()) +#[cfg(windows)] +impl std::hash::Hash for ProcessHandle { + fn hash(&self, state: &mut H) { + (self.0 as usize).hash(state); + } } +#[cfg(windows)] +unsafe impl Send for ProcessHandle {} + +#[cfg(windows)] +unsafe impl Sync for ProcessHandle {} + #[derive(Debug, Serialize, Deserialize)] pub struct SidecarStats { trace_flusher: TraceFlusherStats, @@ -79,15 +100,6 @@ pub struct SidecarStats { log_filter: TemporarilyRetainedMapStats, } -#[cfg(windows)] -#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] -pub struct ProcessHandle(pub winapi::um::winnt::HANDLE); - -#[cfg(windows)] -unsafe impl Send for ProcessHandle {} -#[cfg(windows)] -unsafe impl Sync for ProcessHandle {} - /// The `SidecarServer` struct represents a server that handles sidecar operations. /// /// It maintains a list of active sessions and a counter for each session. @@ -114,62 +126,92 @@ pub struct SidecarServer { remote_configs: RemoteConfigs, /// Diagnostics bookkeeper debugger_diagnostics_bookkeeper: Arc, - /// The ProcessHandle tied to the connection - #[cfg(windows)] - process_handle: Option, +} + +/// Per-connection handler wrapper that tracks sessions/instances for cleanup on disconnect. +struct ConnectionSidecarHandler { + server: SidecarServer, + sessions: std::sync::Mutex>, + instances: std::sync::Mutex>, +} + +impl ConnectionSidecarHandler { + fn new(server: SidecarServer) -> Self { + Self { + server, + sessions: Default::default(), + instances: Default::default(), + } + } + + fn track_session(&self, session_id: &str) { + if self.sessions.lock_or_panic().insert(session_id.to_owned()) { + let mut counter = self.server.session_counter.lock_or_panic(); + match counter.entry(session_id.to_owned()) { + Entry::Occupied(mut e) => { + e.insert(e.get() + 1); + } + Entry::Vacant(e) => { + e.insert(1); + } + } + } + } + + fn track_instance(&self, instance_id: &InstanceId) { + self.instances.lock_or_panic().insert(instance_id.clone()); + } + + async fn cleanup(&self) { + let sessions: Vec = self.sessions.lock_or_panic().iter().cloned().collect(); + let instances: Vec = self.instances.lock_or_panic().iter().cloned().collect(); + + for session_id in &sessions { + let stop = { + let mut counter = self.server.session_counter.lock_or_panic(); + if let Entry::Occupied(mut entry) = counter.entry(session_id.clone()) { + if entry.insert(entry.get() - 1) == 1 { + entry.remove(); + true + } else { + false + } + } else { + false + } + }; + if stop { + self.server.stop_session(session_id).await; + } + } + for instance_id in instances { + let maybe_session = self + .server + .sessions + .lock_or_panic() + .get(&instance_id.session_id) + .cloned(); + if let Some(session) = maybe_session { + session.shutdown_runtime(&instance_id.runtime_id).await; + } + } + } } impl SidecarServer { /// Accepts a new connection and starts processing requests. /// - /// This function creates a new `tarpc` server with the provided `async_channel` and starts - /// processing incoming requests. It also starts a session interceptor to keep track of active - /// sessions and submitted payload counts. + /// This function creates a per-connection `ConnectionSidecarHandler` and serves the connection, + /// then runs cleanup when the connection closes. /// /// # Arguments /// - /// * `async_channel`: An `AsyncChannel` that represents the connection to the client. - #[cfg_attr(not(windows), allow(unused_mut))] - pub async fn accept_connection(mut self, async_channel: AsyncChannel) { - let handle = async_channel.handle(); - #[cfg(windows)] - #[allow(clippy::unwrap_used)] - { - self.process_handle = async_channel - .metadata - .lock() - .unwrap() - .process_handle() - .map(|p| ProcessHandle(p as winapi::um::winnt::HANDLE)); - } - let server = tarpc::server::BaseChannel::new( - tarpc::server::Config { - pending_response_buffer: 10000, - }, - Transport::from(async_channel), - ); - let mut executor = datadog_ipc::sequential::execute_sequential( - server.requests(), - self.clone().serve(), - 500, - ) - .with_backpressure(SidecarInterfaceRequest::requires_backpressure); - let (tx, rx) = tokio::sync::mpsc::channel::<_>(100); - let tx = executor.swap_sender(tx); - - let session_interceptor = tokio::spawn(session_interceptor( - self.session_counter.clone(), - self.submitted_payloads.clone(), - rx, - tx, - )); - - if let Err(e) = executor.await { - warn!("Error from executor for handle {handle}: {e:?}"); - } - - self.process_interceptor_response(session_interceptor.await) - .await; + /// * `conn`: A `SeqpacketConn` that represents the connection to the client. + pub async fn accept_connection(self, conn: SeqpacketConn) { + let handler = Arc::new(ConnectionSidecarHandler::new(self)); + let handler_for_cleanup = handler.clone(); + serve_sidecar_interface_connection(conn, handler).await; + handler_for_cleanup.cleanup().await; } /// Returns the number of active sidecar sessions. @@ -181,48 +223,6 @@ impl SidecarServer { self.session_counter.lock_or_panic().len() } - async fn process_interceptor_response( - &self, - result: Result<(HashSet, HashSet), JoinError>, - ) { - match result { - Ok((sessions, instances)) => { - for session in sessions { - let stop = { - let mut counter = self.session_counter.lock_or_panic(); - if let Entry::Occupied(mut entry) = counter.entry(session.to_owned()) { - if entry.insert(entry.get() - 1) == 1 { - entry.remove(); - true - } else { - false - } - } else { - false - } - }; - if stop { - self.stop_session(&session).await; - } - } - for instance_id in instances { - let maybe_session = self - .sessions - .lock_or_panic() - .get(&instance_id.session_id) - .cloned(); - if let Some(session) = maybe_session { - session.shutdown_runtime(&instance_id.runtime_id).await; - } - } - } - Err(e) => { - // TODO: APMSP-1076 - Do we need to do more than just log this error? - debug!("session interceptor encountered an error: {:?}", e); - } - } - } - pub(crate) fn get_session(&self, session_id: &String) -> SessionInfo { let mut sessions = self.sessions.lock_or_panic(); match sessions.get(session_id) { @@ -297,11 +297,17 @@ impl SidecarServer { #[cfg(windows)] #[allow(clippy::unwrap_used)] fn get_notify_target(&self, session: &SessionInfo) -> Option { - self.process_handle.map(|handle| RemoteConfigNotifyTarget { - process_handle: handle, - notify_function: *session.remote_config_notify_function.lock().unwrap(), + let notify_function = *session.remote_config_notify_function.lock().unwrap(); + if notify_function.0.is_null() { + return None; + } + let process_handle = session.process_handle.lock_or_panic().clone()?; + Some(RemoteConfigNotifyTarget { + process_handle, + notify_function, }) } + #[cfg(unix)] fn get_notify_target(&self, session: &SessionInfo) -> Option { Some(RemoteConfigNotifyTarget { @@ -381,18 +387,13 @@ impl SidecarServer { pub fn shutdown(&self) { self.remote_configs.shutdown(); } -} -impl SidecarInterface for SidecarServer { - type EnqueueActionsFut = NoResponse; - - fn enqueue_actions( - self, - _context: Context, + fn enqueue_actions_impl( + &self, instance_id: InstanceId, queue_id: QueueId, actions: Vec, - ) -> Self::EnqueueActionsFut { + ) { let session = self.get_session(&instance_id.session_id); let trace_config = session.get_trace_config(); let runtime_metadata = RuntimeMetadata::new( @@ -409,8 +410,7 @@ impl SidecarInterface for SidecarServer { if actions.len() == 1 && matches!(actions[0], SidecarAction::ClearQueueId) { info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); entry.remove(); - - return no_response(); + return; } let service = entry @@ -538,33 +538,34 @@ impl SidecarInterface for SidecarServer { } else { info!("No application found for instance {instance_id:?} and queue_id {queue_id:?}"); } - - no_response() } - type SetSessionConfigFut = Pin>>; - - fn set_session_config( - self, - _: Context, + async fn set_session_config_impl( + &self, session_id: String, - #[cfg(unix)] pid: libc::pid_t, - #[cfg(windows)] - remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, + peer_pid: u32, + #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: SessionConfig, is_fork: bool, - ) -> Self::SetSessionConfigFut { + ) { debug!("Set session config for {session_id} to {config:?}"); let session = self.get_session(&session_id); - #[cfg(unix)] - { - session.pid.store(pid, Ordering::Relaxed); - } + session.pid.store(peer_pid as i32, Ordering::Relaxed); #[cfg(windows)] #[allow(clippy::unwrap_used)] { *session.remote_config_notify_function.lock().unwrap() = remote_config_notify_function; + let handle = unsafe { + winapi::um::processthreadsapi::OpenProcess( + winapi::um::winnt::PROCESS_ALL_ACCESS, + 0, + peer_pid, + ) + }; + if !handle.is_null() { + *session.process_handle.lock_or_panic() = Some(ProcessHandle(handle)); + } } *session.remote_config_enabled.lock_or_panic() = config.remote_config_enabled; *session.process_tags.lock_or_panic() = config.process_tags.clone(); @@ -649,64 +650,45 @@ impl SidecarInterface for SidecarServer { }); } - Box::pin(async move { - if !is_fork { - session.shutdown_running_instances().await; - } - no_response().await - }) + if !is_fork { + session.shutdown_running_instances().await; + } } - type SetSessionProcessTagsFut = NoResponse; - - fn set_session_process_tags( - self, - _: Context, - session_id: String, - process_tags: Vec, - ) -> Self::SetSessionProcessTagsFut { + fn set_session_process_tags_impl(&self, session_id: String, process_tags: Vec) { let session = self.get_session(&session_id); *session.process_tags.lock_or_panic() = process_tags; - no_response() } - type ShutdownRuntimeFut = NoResponse; - - fn shutdown_runtime(self, _: Context, instance_id: InstanceId) -> Self::ShutdownRuntimeFut { + fn shutdown_runtime_impl(&self, instance_id: InstanceId) { let session = self.get_session(&instance_id.session_id); tokio::spawn(async move { session.shutdown_runtime(&instance_id.runtime_id).await }); - - no_response() } - type ShutdownSessionFut = NoResponse; - - fn shutdown_session(self, _: Context, session_id: String) -> Self::ShutdownSessionFut { - tokio::spawn(async move { SidecarServer::stop_session(&self, &session_id).await }); - no_response() + fn shutdown_session_impl(&self, session_id: String) { + let server = self.clone(); + tokio::spawn(async move { server.stop_session(&session_id).await }); } - type SendTraceV04ShmFut = NoResponse; - - fn send_trace_v04_shm( - self, - _: Context, + fn send_trace_v04_shm_impl( + &self, instance_id: InstanceId, handle: ShmHandle, _len: usize, headers: SerializedTracerHeaderTags, - ) -> Self::SendTraceV04ShmFut { + ) { if let Some(endpoint) = self .get_session(&instance_id.session_id) .get_trace_config() .endpoint .clone() { + let server = self.clone(); tokio::spawn(async move { match handle.map() { Ok(mapped) => { let bytes = tinybytes::Bytes::from(mapped); - self.send_trace_v04(&headers, bytes, &endpoint); + server.send_trace_v04(&headers, bytes, &endpoint); } Err(e) => error!("Failed mapping shared trace data memory: {}", e), } @@ -717,28 +699,24 @@ impl SidecarInterface for SidecarServer { instance_id.session_id ); } - - no_response() } - type SendTraceV04BytesFut = NoResponse; - - fn send_trace_v04_bytes( - self, - _: Context, + fn send_trace_v04_bytes_impl( + &self, instance_id: InstanceId, data: Vec, headers: SerializedTracerHeaderTags, - ) -> Self::SendTraceV04BytesFut { + ) { if let Some(endpoint) = self .get_session(&instance_id.session_id) .get_trace_config() .endpoint .clone() { + let server = self.clone(); tokio::spawn(async move { let bytes = tinybytes::Bytes::from(data); - self.send_trace_v04(&headers, bytes, &endpoint); + server.send_trace_v04(&headers, bytes, &endpoint); }); } else { warn!( @@ -746,20 +724,15 @@ impl SidecarInterface for SidecarServer { instance_id.session_id ); } - - no_response() } - type SendDebuggerDataShmFut = NoResponse; - - fn send_debugger_data_shm( - self, - _: Context, + fn send_debugger_data_shm_impl( + &self, instance_id: InstanceId, queue_id: QueueId, handle: ShmHandle, debugger_type: DebuggerType, - ) -> Self::SendDebuggerDataShmFut { + ) { let session = self.get_session(&instance_id.session_id); match handle.map() { Ok(mapped) => { @@ -772,19 +745,14 @@ impl SidecarInterface for SidecarServer { } Err(e) => error!("Failed mapping shared debugger data memory: {}", e), } - - no_response() } - type SendDebuggerDiagnosticsFut = NoResponse; - - fn send_debugger_diagnostics( - self, - _: Context, + fn send_debugger_diagnostics_impl( + &self, instance_id: InstanceId, queue_id: QueueId, diagnostics_payload: Vec, - ) -> Self::SendDebuggerDiagnosticsFut { + ) { let session = self.get_session(&instance_id.session_id); #[allow(clippy::unwrap_used)] let payload = serde_json::from_slice(diagnostics_payload.as_slice()).unwrap(); @@ -799,30 +767,16 @@ impl SidecarInterface for SidecarServer { serde_json::to_vec(&vec![payload]).unwrap(), ); } - - no_response() } - type AcquireExceptionHashRateLimiterFut = NoResponse; - - fn acquire_exception_hash_rate_limiter( - self, - _: Context, - exception_hash: u64, - granularity: Duration, - ) -> Self::AcquireExceptionHashRateLimiterFut { + fn acquire_exception_hash_rate_limiter_impl(&self, exception_hash: u64, granularity: Duration) { EXCEPTION_HASH_LIMITER .lock_or_panic() .add(exception_hash, granularity); - - no_response() } - type SetUniversalServiceTagsFut = NoResponse; - - fn set_universal_service_tags( - self, - _: Context, + fn set_universal_service_tags_impl( + &self, instance_id: InstanceId, queue_id: QueueId, service_name: String, @@ -830,7 +784,7 @@ impl SidecarInterface for SidecarServer { app_version: String, global_tags: Vec, dynamic_instrumentation_state: DynamicInstrumentationConfigState, - ) -> Self::SetUniversalServiceTagsFut { + ) { debug!("Registered remote config metadata: instance {instance_id:?}, queue_id: {queue_id:?}, service: {service_name}, env: {env_name}, version: {app_version}"); let session = self.get_session(&instance_id.session_id); @@ -839,7 +793,7 @@ impl SidecarInterface for SidecarServer { let app = applications.entry(queue_id).or_default(); app.set_metadata(env_name, app_version, service_name, global_tags); let Some(notify_target) = self.get_notify_target(&session) else { - return no_response(); + return; }; app.update_remote_config( &self.remote_configs, @@ -848,25 +802,20 @@ impl SidecarInterface for SidecarServer { notify_target, dynamic_instrumentation_state, ); - - no_response() } - type SetRequestConfigFut = NoResponse; - - fn set_request_config( - self, - _: Context, + fn set_request_config_impl( + &self, instance_id: InstanceId, queue_id: QueueId, dynamic_instrumentation_state: DynamicInstrumentationConfigState, - ) -> Self::SetRequestConfigFut { + ) { let session = self.get_session(&instance_id.session_id); let runtime_info = session.get_runtime(&instance_id.runtime_id); let mut applications = runtime_info.lock_applications(); let app = applications.entry(queue_id).or_default(); let Some(notify_target) = self.get_notify_target(&session) else { - return no_response(); + return; }; app.update_remote_config( &self.remote_configs, @@ -875,48 +824,27 @@ impl SidecarInterface for SidecarServer { notify_target, dynamic_instrumentation_state, ); - - no_response() } - type SendDogstatsdActionsFut = NoResponse; - - fn send_dogstatsd_actions( - self, - _: Context, - instance_id: InstanceId, - actions: Vec, - ) -> Self::SendDogstatsdActionsFut { + fn send_dogstatsd_actions_impl(&self, instance_id: InstanceId, actions: Vec) { + let server = self.clone(); tokio::spawn(async move { - self.get_session(&instance_id.session_id) + server + .get_session(&instance_id.session_id) .get_dogstatsd() .as_ref() .inspect(|f| f.send_owned(actions)); }); - - no_response() } - type FlushTracesFut = future::Map, fn(Result<(), JoinError>)>; - - fn flush_traces(self, _: Context) -> Self::FlushTracesFut { + async fn flush_traces_impl(&self) { let flusher = self.trace_flusher.clone(); - fn report_result(result: Result<(), JoinError>) { - if let Err(e) = result { - error!("Failed flushing traces: {e:?}"); - } + if let Err(e) = tokio::spawn(async move { flusher.flush().await }).await { + error!("Failed flushing traces: {e:?}"); } - tokio::spawn(async move { flusher.flush().await }).map(report_result) } - type SetTestSessionTokenFut = NoResponse; - - fn set_test_session_token( - self, - _: Context, - session_id: String, - token: String, - ) -> Self::SetTestSessionTokenFut { + fn set_test_session_token_impl(&self, session_id: String, token: String) { let session = self.get_session(&session_id); let token = if token.is_empty() { None @@ -934,78 +862,218 @@ impl SidecarInterface for SidecarServer { // session.configure_dogstatsd(|cfg| { // update_cfg(cfg.endpoint.take(), |e| cfg.set_endpoint(e), &token); // }); + } - no_response() + async fn dump_impl(&self) -> String { + crate::dump::dump().await } - type PingFut = Ready<()>; + async fn stats_impl(&self) -> String { + let stats = self.compute_stats().await; + #[allow(clippy::expect_used)] + simd_json::serde::to_string(&stats).expect("unable to serialize stats to string") + } +} - fn ping(self, _: Context) -> Ready<()> { - future::ready(()) +impl SidecarInterface for ConnectionSidecarHandler { + async fn enqueue_actions( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + actions: Vec, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server.enqueue_actions_impl(instance_id, queue_id, actions); } - type DumpFut = Pin>>; + async fn set_session_config( + &self, + peer: PeerCredentials, + session_id: String, + #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, + config: SessionConfig, + is_fork: bool, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_session(&session_id); + self.server + .set_session_config_impl( + session_id, + peer.pid, + #[cfg(windows)] remote_config_notify_function, + config, + is_fork, + ) + .await; + } - fn dump(self, _: Context) -> Self::DumpFut { - Box::pin(crate::dump::dump()) + async fn set_session_process_tags( + &self, + _peer: PeerCredentials, + session_id: String, + process_tags: String, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_session(&session_id); + self.server + .set_session_process_tags_impl(session_id, process_tags); } - type StatsFut = Pin>>; + async fn shutdown_runtime(&self, _peer: PeerCredentials, instance_id: InstanceId) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server.shutdown_runtime_impl(instance_id); + } - fn stats(self, _: Context) -> Self::StatsFut { - let this = self.clone(); - #[allow(clippy::expect_used)] - Box::pin(async move { - let stats = this.compute_stats().await; - simd_json::serde::to_string(&stats).expect("unable to serialize stats to string") - }) + async fn shutdown_session(&self, _peer: PeerCredentials, session_id: String) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_session(&session_id); + self.server.shutdown_session_impl(session_id); } -} -// The session_interceptor function keeps track of session counts and submitted payload counts. It -// also keeps track of RequestIdentifiers and returns hashsets of session and instance ids when the -// rx channel is closed. -async fn session_interceptor( - session_counter: Arc>>, - submitted_payload_count: Arc, - mut rx: tokio::sync::mpsc::Receiver<( - ServeSidecarInterface, - InFlightRequest, - )>, - tx: tokio::sync::mpsc::Sender<( - ServeSidecarInterface, - InFlightRequest, - )>, -) -> (HashSet, HashSet) { - let mut sessions = HashSet::new(); - let mut instances = HashSet::new(); - loop { - let (serve, req) = match rx.recv().await { - None => return (sessions, instances), - Some(s) => s, - }; + async fn send_trace_v04_shm( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + handle: ShmHandle, + len: usize, + headers: SerializedTracerHeaderTags, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server + .send_trace_v04_shm_impl(instance_id, handle, len, headers); + } - submitted_payload_count.fetch_add(1, Ordering::Relaxed); + async fn send_trace_v04_bytes( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + data: Vec, + headers: SerializedTracerHeaderTags, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server + .send_trace_v04_bytes_impl(instance_id, data, headers); + } - let instance: RequestIdentifier = req.get().extract_identifier(); - if tx.send((serve, req)).await.is_ok() { - if let RequestIdentifier::InstanceId(ref instance_id) = instance { - instances.insert(instance_id.clone()); - } - if let RequestIdentifier::SessionId(session) - | RequestIdentifier::InstanceId(InstanceId { - session_id: session, - .. - }) = instance - { - if sessions.insert(session.clone()) { - match session_counter.lock_or_panic().entry(session) { - Entry::Occupied(mut entry) => entry.insert(entry.get() + 1), - Entry::Vacant(entry) => *entry.insert(1), - }; - } - } - } + async fn send_debugger_data_shm( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + handle: ShmHandle, + debugger_type: DebuggerType, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server + .send_debugger_data_shm_impl(instance_id, queue_id, handle, debugger_type); + } + + async fn send_debugger_diagnostics( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + diagnostics_payload: Vec, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server + .send_debugger_diagnostics_impl(instance_id, queue_id, diagnostics_payload); + } + + async fn acquire_exception_hash_rate_limiter( + &self, + _peer: PeerCredentials, + exception_hash: u64, + granularity: Duration, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .acquire_exception_hash_rate_limiter_impl(exception_hash, granularity); + } + + async fn set_universal_service_tags( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + service_name: String, + env_name: String, + app_version: String, + global_tags: Vec, + dynamic_instrumentation_state: DynamicInstrumentationConfigState, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server.set_universal_service_tags_impl( + instance_id, + queue_id, + service_name, + env_name, + app_version, + global_tags, + dynamic_instrumentation_state, + ); + } + + async fn set_request_config( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + dynamic_instrumentation_state: DynamicInstrumentationConfigState, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server + .set_request_config_impl(instance_id, queue_id, dynamic_instrumentation_state); + } + + async fn send_dogstatsd_actions( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + actions: Vec, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + self.server.send_dogstatsd_actions_impl(instance_id, actions); + } + + async fn flush_traces(&self, _peer: PeerCredentials) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server.flush_traces_impl().await; + } + + async fn set_test_session_token( + &self, + _peer: PeerCredentials, + session_id: String, + token: String, + ) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.track_session(&session_id); + self.server.set_test_session_token_impl(session_id, token); + } + + async fn ping(&self, _peer: PeerCredentials) { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + } + + async fn dump(&self, _peer: PeerCredentials) -> String { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server.dump_impl().await + } + + async fn stats(&self, _peer: PeerCredentials) -> String { + self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server.stats_impl().await } } diff --git a/datadog-sidecar/src/setup/mod.rs b/datadog-sidecar/src/setup/mod.rs index 07c837aab0..ea4f37479f 100644 --- a/datadog-sidecar/src/setup/mod.rs +++ b/datadog-sidecar/src/setup/mod.rs @@ -12,14 +12,14 @@ mod windows; #[cfg(windows)] pub use self::windows::*; -use datadog_ipc::platform::Channel; +use datadog_ipc::SeqpacketConn; use std::io; /// Implementations of this interface must provide behavior repeatable across processes with the /// same version of library. /// Allowing all instances of the same version of the library to establish a shared connection pub trait Liaison: Sized { - fn connect_to_server(&self) -> io::Result; + fn connect_to_server(&self) -> io::Result; fn attempt_listen(&self) -> io::Result>; fn ipc_shared() -> Self; fn ipc_per_process() -> Self; diff --git a/datadog-sidecar/src/setup/unix.rs b/datadog-sidecar/src/setup/unix.rs index 589602b9b4..3eab09657e 100644 --- a/datadog-sidecar/src/setup/unix.rs +++ b/datadog-sidecar/src/setup/unix.rs @@ -4,24 +4,22 @@ use std::sync::LazyLock; use std::{ env, fs, io, - os::unix::{ - net::{UnixListener, UnixStream}, - prelude::PermissionsExt, - }, + os::unix::prelude::PermissionsExt, path::{Path, PathBuf}, }; use crate::primary_sidecar_identifier; use crate::setup::Liaison; -use datadog_ipc::platform::{self, locks::FLock, Channel}; +use datadog_ipc::platform::locks::FLock; +use datadog_ipc::{SeqpacketConn, SeqpacketListener}; #[cfg(feature = "logging")] use log::{debug, warn}; #[cfg(not(feature = "logging"))] use tracing::{debug, warn}; -pub type IpcClient = tokio::net::UnixStream; -pub type IpcServer = UnixListener; +pub type IpcClient = SeqpacketConn; +pub type IpcServer = SeqpacketListener; fn ensure_dir_world_writable>(path: P) -> io::Result<()> { let mut perm = path.as_ref().metadata()?.permissions(); @@ -46,11 +44,11 @@ pub struct SharedDirLiaison { } impl Liaison for SharedDirLiaison { - fn connect_to_server(&self) -> io::Result { - Ok(Channel::from(UnixStream::connect(&self.socket_path)?)) + fn connect_to_server(&self) -> io::Result { + SeqpacketConn::connect(&self.socket_path) } - fn attempt_listen(&self) -> io::Result> { + fn attempt_listen(&self) -> io::Result> { let dir = self.socket_path.parent().unwrap_or_else(|| Path::new("/")); ensure_dir_exists(dir)?; @@ -66,7 +64,7 @@ impl Liaison for SharedDirLiaison { if self.socket_path.exists() { // if socket is already listening, then creating listener is not available - if platform::sockets::is_listening(&self.socket_path)? { + if datadog_ipc::platform::sockets::is_listening(&self.socket_path)? { debug!( "The sidecar's socket is already listening ({})", self.socket_path.as_path().display() @@ -75,7 +73,7 @@ impl Liaison for SharedDirLiaison { } fs::remove_file(&self.socket_path)?; } - Ok(Some(UnixListener::bind(&self.socket_path)?)) + Ok(Some(SeqpacketListener::bind(&self.socket_path)?)) } fn ipc_shared() -> Self { @@ -129,12 +127,12 @@ impl Default for SharedDirLiaison { // In particular, when using different mount namespaces, but a shared network namespace, the // processes don't necessarily see the same things. mod linux { - use std::{io, os::unix::net::UnixListener, path::PathBuf}; + use std::{io, path::PathBuf}; use spawn_worker::getpid; use datadog_ipc::platform; - use datadog_ipc::platform::Channel; + use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use super::Liaison; @@ -144,13 +142,11 @@ mod linux { pub type DefaultLiason = AbstractUnixSocketLiaison; impl Liaison for AbstractUnixSocketLiaison { - fn connect_to_server(&self) -> io::Result { - Ok(Channel::from(platform::sockets::connect_abstract( - &self.path, - )?)) + fn connect_to_server(&self) -> io::Result { + platform::sockets::connect_abstract(&self.path) } - fn attempt_listen(&self) -> io::Result> { + fn attempt_listen(&self) -> io::Result> { match platform::sockets::bind_abstract(&self.path) { Ok(l) => Ok(Some(l)), Err(ref e) if e.kind() == io::ErrorKind::AddrInUse => Ok(None), @@ -197,14 +193,12 @@ pub type DefaultLiason = SharedDirLiaison; #[cfg(test)] mod tests { - use std::{ - io::{self, Read, Write}, - thread, - time::Duration, - }; + use std::{thread, time::Duration}; use tempfile::tempdir; + use datadog_ipc::{SeqpacketConn, SeqpacketListener}; + use super::Liaison; #[test] @@ -223,29 +217,17 @@ mod tests { T: Liaison, { { - let listener = liaison.attempt_listen().unwrap().unwrap(); + let listener: SeqpacketListener = liaison.attempt_listen().unwrap().unwrap(); // can't listen twice when some listener is active assert!(liaison.attempt_listen().unwrap().is_none()); - // a liaison can try connecting to existing socket to ensure its valid, adding - // connection to accept queue but we can drain any preexisting connections - // in the queue - listener.set_nonblocking(true).unwrap(); - loop { - match listener.accept() { - Ok(_) => continue, - Err(e) => { - assert_eq!(io::ErrorKind::WouldBlock, e.kind()); - break; - } - } - } - listener.set_nonblocking(false).unwrap(); - let mut client = liaison.connect_to_server().unwrap(); - let (mut srv, _) = listener.accept().unwrap(); - assert_eq!(1, client.write(&[255]).unwrap()); - let mut buf = [0; 1]; - assert_eq!(1, srv.read(&mut buf).unwrap()); + let mut client: SeqpacketConn = liaison.connect_to_server().unwrap(); + let mut srv: SeqpacketConn = listener.try_accept().unwrap(); + client.send_raw_blocking(&mut vec![255], &[]).unwrap(); + let mut buf = [0u8; 4]; + let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); + assert_eq!(n, 1); + assert_eq!(buf[0], 255); drop(listener); drop(client); } diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index 49c39caaf1..521783cf42 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -1,161 +1,36 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::one_way_shared_memory::open_named_shm; use crate::primary_sidecar_identifier; use crate::setup::Liaison; -use arrayref::array_ref; -use datadog_ipc::platform::metadata::ProcessHandle; -use datadog_ipc::platform::{Channel, PIPE_PATH}; +use datadog_ipc::platform::PIPE_PATH; +use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use libc::getpid; -use std::error::Error; use std::ffi::CString; -use std::os::windows::io::{FromRawHandle, OwnedHandle, RawHandle}; -use std::ptr::null_mut; -use std::time::{Duration, Instant}; -use std::{env, io, mem}; -use tokio::net::windows::named_pipe::NamedPipeServer; -use tracing::warn; -use winapi::{ - shared::{ - minwindef::DWORD, - winerror::{ERROR_ACCESS_DENIED, ERROR_PIPE_BUSY}, - }, - um::{ - fileapi::{CreateFileA, OPEN_EXISTING}, - handleapi::INVALID_HANDLE_VALUE, - minwinbase::SECURITY_ATTRIBUTES, - winbase::{ - CreateNamedPipeA, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, - PIPE_ACCESS_INBOUND, PIPE_ACCESS_OUTBOUND, PIPE_READMODE_BYTE, PIPE_TYPE_BYTE, - PIPE_UNLIMITED_INSTANCES, - }, - winnt::{GENERIC_READ, GENERIC_WRITE}, - }, -}; +use std::io; -pub type IpcClient = NamedPipeServer; -pub type IpcServer = OwnedHandle; +pub type IpcClient = SeqpacketConn; +pub type IpcServer = SeqpacketListener; pub struct NamedPipeLiaison { - socket_path: CString, -} - -pub fn pid_shm_path(pipe_path: &str) -> CString { - #[allow(clippy::unwrap_used)] - CString::new(&pipe_path[PIPE_PATH.len() - 1..]).unwrap() + socket_path: String, } impl Liaison for NamedPipeLiaison { - fn connect_to_server(&self) -> io::Result { - let timeout_end = Instant::now() + Duration::from_secs(2); - let pipe = loop { - let h = unsafe { - CreateFileA( - self.socket_path.as_ptr(), - GENERIC_READ | GENERIC_WRITE, - 0, - null_mut(), - OPEN_EXISTING, - FILE_FLAG_OVERLAPPED, - null_mut(), - ) - }; - if h == INVALID_HANDLE_VALUE { - let error = io::Error::last_os_error(); - if error.raw_os_error() != Some(ERROR_PIPE_BUSY as i32) { - return Err(error); - } - } else { - break h; - } - - if Instant::now() > timeout_end { - return Err(io::Error::from(io::ErrorKind::TimedOut)); - } - std::thread::yield_now(); - }; - - let socket_path = self.socket_path.clone(); - // Have a ProcessHandle::Getter() so that we don't immediately block in case the sidecar is - // still starting up, but only the first time we want to submit shared memory - Ok(Channel::from_client_handle_and_pid( - unsafe { OwnedHandle::from_raw_handle(pipe as RawHandle) }, - ProcessHandle::Getter(Box::new(move || { - // Await the shared memory handle which will contain the pid of the sidecar - // As it may not be immediately available during startup - let timeout_end = Instant::now() + Duration::from_secs(2); - let mut last_error: Option> = None; - let pid_path = pid_shm_path(&String::from_utf8_lossy(socket_path.as_bytes())); - loop { - match open_named_shm(&pid_path) { - Ok(shm) => { - #[cfg(windows_seh_wrapper)] - let pid = { - let mut pid = 0; - if let Err(e) = microseh::try_seh(|| { - pid = u32::from_ne_bytes(*array_ref![shm.as_slice(), 0, 4]) - }) { - last_error = Some(Box::new(e)); - } - pid - }; - - #[cfg(not(windows_seh_wrapper))] - let pid = u32::from_ne_bytes(*array_ref![shm.as_slice(), 0, 4]); - - if pid != 0 { - return Ok(ProcessHandle::Pid(pid)); - } - } - Err(e) => last_error = Some(Box::new(e)), - } - if Instant::now() > timeout_end { - warn!("Reading the sidecar pid from {} timed out after {:?}. (last error: {:?})", - pid_path.to_string_lossy(), timeout_end, last_error); - return Err(io::Error::from(io::ErrorKind::TimedOut)); - } - std::thread::yield_now(); - } - })), - )) + fn connect_to_server(&self) -> io::Result { + SeqpacketConn::connect(&self.socket_path) } - fn attempt_listen(&self) -> io::Result> { - let mut sec_attributes = SECURITY_ATTRIBUTES { - nLength: mem::size_of::() as DWORD, - lpSecurityDescriptor: null_mut(), - bInheritHandle: 1, // We want this one to be inherited - }; - match unsafe { - CreateNamedPipeA( - self.socket_path.as_ptr(), - FILE_FLAG_OVERLAPPED - | PIPE_ACCESS_OUTBOUND - | PIPE_ACCESS_INBOUND - | FILE_FLAG_FIRST_PIPE_INSTANCE, - PIPE_TYPE_BYTE | PIPE_READMODE_BYTE, - PIPE_UNLIMITED_INSTANCES, - 65536, - 65536, - 0, - &mut sec_attributes, - ) - } { - INVALID_HANDLE_VALUE => { - let error = io::Error::last_os_error(); - if match error.raw_os_error() { - Some(code) => code as u32 == ERROR_ACCESS_DENIED, - None => true, - } { - Ok(None) - } else { - Err(error) - } + fn attempt_listen(&self) -> io::Result> { + match SeqpacketListener::bind(&self.socket_path) { + Ok(listener) => Ok(Some(listener)), + Err(ref e) + if e.raw_os_error() + == Some(winapi::shared::winerror::ERROR_ACCESS_DENIED as i32) => + { + Ok(None) } - h => Ok(Some(unsafe { - OwnedHandle::from_raw_handle(h as RawHandle) - })), + Err(e) => Err(e), } } @@ -170,19 +45,14 @@ impl Liaison for NamedPipeLiaison { impl NamedPipeLiaison { pub fn new>(prefix: P) -> Self { - // Due to the restriction on Global\ namespace for shared memory we have to distinguish - // individual sidecar sessions. Fetch the session_id to effectively namespace the - // Named Pipe names too. - #[allow(clippy::unwrap_used)] Self { - socket_path: CString::new(format!( + socket_path: format!( "{}{}{}-libdd.{}", PIPE_PATH, prefix.as_ref(), primary_sidecar_identifier(), crate::sidecar_version!() - )) - .unwrap(), + ), } } @@ -199,58 +69,48 @@ impl Default for NamedPipeLiaison { pub type DefaultLiason = NamedPipeLiaison; +/// Helper: derive the shared-memory path used to publish the sidecar PID. +pub fn pid_shm_path(pipe_path: &str) -> CString { + #[allow(clippy::unwrap_used)] + CString::new(&pipe_path[PIPE_PATH.len() - 1..]).unwrap() +} + #[cfg(test)] mod tests { use super::Liaison; - use futures::future; - use rand::distributions::Alphanumeric; - use rand::{thread_rng, Rng}; - use std::io::Write; - use std::os::windows::io::IntoRawHandle; - use tokio::io::AsyncReadExt; - use tokio::net::windows::named_pipe::NamedPipeServer; - use winapi::um::{handleapi::CloseHandle, winnt::HANDLE}; + use datadog_ipc::{SeqpacketConn, SeqpacketListener}; - #[tokio::test] - async fn test_shared_dir_can_connect_to_socket() -> anyhow::Result<()> { + #[test] + fn test_shared_dir_can_connect_to_socket() -> anyhow::Result<()> { + use rand::distributions::Alphanumeric; + use rand::{thread_rng, Rng}; let random_prefix: Vec = thread_rng().sample_iter(&Alphanumeric).take(8).collect(); let liaison = super::NamedPipeLiaison::new(String::from_utf8_lossy(&random_prefix)); - basic_liaison_connection_test(liaison).await.unwrap(); + basic_liaison_connection_test(&liaison)?; Ok(()) } - pub async fn basic_liaison_connection_test(liaison: T) -> Result<(), anyhow::Error> + pub fn basic_liaison_connection_test(liaison: &T) -> Result<(), anyhow::Error> where - T: Liaison + Send + Sync + 'static, + T: Liaison, { - let liaison = { - let raw_handle = liaison.attempt_listen().unwrap().unwrap().into_raw_handle(); - let mut srv = unsafe { NamedPipeServer::from_raw_handle(raw_handle) }.unwrap(); - + { + let listener: SeqpacketListener = liaison.attempt_listen().unwrap().unwrap(); // can't listen twice when some listener is active - //assert!(liaison.attempt_listen().unwrap().is_none()); - // a liaison can try connecting to existing socket to ensure its valid, adding - // connection to accept queue but we can drain any preexisting connections - // in the queue - let (_, result) = future::join( - srv.connect(), - tokio::spawn(async move { (liaison.connect_to_server().unwrap(), liaison) }), - ) - .await; - let (mut client, liaison) = result.unwrap(); - assert_eq!(1, client.write(&[255]).unwrap()); - let mut buf = [0; 1]; - assert_eq!(1, srv.read(&mut buf).await.unwrap()); - - // for this test: Somehow, NamedPipeServer remains tangled with the event-loop and won't - // free itself in time - unsafe { CloseHandle(raw_handle as HANDLE) }; - std::mem::forget(srv); - - liaison - }; + assert!(liaison.attempt_listen().unwrap().is_none()); + + let mut client: SeqpacketConn = liaison.connect_to_server().unwrap(); + let mut srv: SeqpacketConn = listener.try_accept().unwrap(); + client.send_raw_blocking(&mut vec![255], &[]).unwrap(); + let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; + let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); + assert_eq!(n, 1); + assert_eq!(buf[0], 255); + drop(listener); + drop(client); + } - // we should be able to open new listener now + // we should be able to open a new listener now let _listener = liaison.attempt_listen().unwrap().unwrap(); Ok(()) } diff --git a/datadog-sidecar/src/unix.rs b/datadog-sidecar/src/unix.rs index 7eff3314e5..80b11aa7c0 100644 --- a/datadog-sidecar/src/unix.rs +++ b/datadog-sidecar/src/unix.rs @@ -4,17 +4,16 @@ use spawn_worker::{getpid, SpawnWorker, Stdio, TrampolineData}; use std::ffi::CString; -use std::os::unix::net::UnixListener as StdUnixListener; use crate::config::Config; use crate::enter_listener_loop; +use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use nix::fcntl::{fcntl, OFlag, F_GETFL, F_SETFL}; use nix::sys::socket::{shutdown, Shutdown}; use std::io; use std::os::fd::RawFd; use std::os::unix::prelude::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd}; use std::time::Instant; -use tokio::net::{UnixListener, UnixStream}; use tokio::select; use tokio::signal::unix::{signal, SignalKind}; use tracing::{error, info}; @@ -55,20 +54,20 @@ pub extern "C" fn ddog_daemon_entry_point(trampoline_data: &TrampolineData) { let appsec_started = maybe_start_appsec(); if let Some(fd) = spawn_worker::recv_passed_fd() { - let listener: StdUnixListener = fd.into(); + let seqpacket_listener = SeqpacketListener::from_owned_fd(fd); info!("Starting sidecar, pid: {}", getpid()); let acquire_listener = move || { - listener.set_nonblocking(true)?; - let listener = UnixListener::from_std(listener)?; + // Convert to async listener (also sets non-blocking mode). + let async_listener = seqpacket_listener.into_async_listener()?; // shutdown to gracefully dequeue, and immediately relinquish ownership of the socket // while shutting down let cancel = { - let listener_fd = listener.as_raw_fd(); + let listener_fd = async_listener.as_raw_fd(); move || stop_listening(listener_fd) }; - Ok((|handler| accept_socket_loop(listener, handler), cancel)) + Ok((move |handler| accept_socket_loop(async_listener, handler), cancel)) }; if let Err(err) = enter_listener_loop(acquire_listener) { error!("Error: {err}") @@ -96,22 +95,33 @@ fn stop_listening(listener_fd: RawFd) { } async fn accept_socket_loop( - listener: UnixListener, - handler: Box, + async_listener: tokio::io::unix::AsyncFd, + handler: Box, ) -> io::Result<()> { #[allow(clippy::unwrap_used)] let mut termsig = signal(SignalKind::terminate()).unwrap(); loop { select! { _ = termsig.recv() => { - stop_listening(listener.as_raw_fd()); + stop_listening(async_listener.as_raw_fd()); break; } - accept = listener.accept() => { - if let Ok((socket, _)) = accept { - handler(socket); - } else { - break; + ready = async_listener.readable() => { + match ready { + Ok(mut guard) => { + match guard.try_io(|inner| inner.get_ref().try_accept()) { + Ok(Ok(conn)) => handler(conn), + Ok(Err(e)) => { + error!("IPC accept error: {e}"); + break; + } + Err(_would_block) => continue, + } + } + Err(e) => { + error!("IPC listener error: {e}"); + break; + } } } } @@ -120,7 +130,7 @@ async fn accept_socket_loop( } pub fn setup_daemon_process( - listener: StdUnixListener, + listener: SeqpacketListener, spawn_cfg: &mut SpawnWorker, ) -> io::Result<()> { spawn_cfg diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index 9080bcfedc..1b32348904 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -6,6 +6,7 @@ use crate::setup::pid_shm_path; use datadog_ipc::platform::{ named_pipe_name_from_raw_handle, FileBackedHandle, MappedMem, NamedShmHandle, }; +use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use futures::FutureExt; use libdd_common::Endpoint; @@ -16,7 +17,7 @@ use manual_future::ManualFuture; use spawn_worker::{write_crashtracking_trampoline, SpawnWorker, Stdio, TrampolineData}; use std::ffi::CStr; use std::io::{self, Error}; -use std::os::windows::io::{AsRawHandle, IntoRawHandle, OwnedHandle}; +use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle}; use std::ptr::null_mut; use std::sync::LazyLock; use std::sync::{Arc, Mutex}; @@ -24,6 +25,9 @@ use std::time::Instant; use tokio::net::windows::named_pipe::{NamedPipeServer, ServerOptions}; use tokio::select; use tracing::{error, info}; +use winapi::shared::minwindef::ULONG; +use winapi::um::winbase::GetNamedPipeClientProcessId; +use winapi::um::winnt::HANDLE; use winapi::{ shared::{ sddl::ConvertSidToStringSidA, @@ -36,7 +40,7 @@ use winapi::{ }, securitybaseapi::GetTokenInformation, winbase::LocalFree, - winnt::{TokenUser, HANDLE, TOKEN_QUERY, TOKEN_USER}, + winnt::{TokenUser, TOKEN_QUERY, TOKEN_USER}, }, }; @@ -66,10 +70,10 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { info!("Starting sidecar, pid: {}", pid); - let acquire_listener = move || unsafe { + let acquire_listener = move || { let (closed_future, close_completer) = ManualFuture::new(); let close_completer = Arc::from(Mutex::new(Some(close_completer))); - let pipe = NamedPipeServer::from_raw_handle(handle.into_raw_handle())?; + let listener = SeqpacketListener::from_owned_fd(handle); let cancel = move || { if let Some(completer) = close_completer.lock_or_panic().take() { @@ -80,7 +84,7 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { // We pass the shm to ensure we drop the shm handle with the pid immediately after // cancellation To avoid actual race conditions Ok(( - |handler| accept_socket_loop(pipe, closed_future, handler, shm), + |handler| accept_socket_loop(listener, closed_future, handler, shm), cancel, )) }; @@ -98,14 +102,20 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { } async fn accept_socket_loop( - mut pipe: NamedPipeServer, + listener: SeqpacketListener, cancellation: ManualFuture<()>, - handler: Box, + handler: Box, _: MappedMem, ) -> io::Result<()> { - let name = named_pipe_name_from_raw_handle(pipe.as_raw_handle()) + // Wrap the first server instance as a Tokio NamedPipeServer for async connect polling. + // After each accepted connection we create a fresh Tokio server for the next client. + let name = named_pipe_name_from_raw_handle(listener.as_raw_handle()) .ok_or(io::Error::from(io::ErrorKind::InvalidInput))?; + // Transfer the listener's handle into a Tokio NamedPipeServer. + let mut pipe = + unsafe { NamedPipeServer::from_raw_handle(listener.into_raw_handle()) }?; + let cancellation = cancellation.shared(); loop { select! { @@ -113,22 +123,39 @@ async fn accept_socket_loop( result = pipe.connect() => result?, } let connected_pipe = pipe; - pipe = ServerOptions::new().create(&name)?; - handler(connected_pipe); + pipe = ServerOptions::new() + .pipe_mode(tokio::net::windows::named_pipe::PipeMode::Message) + .create(&name)?; + + // Convert the connected NamedPipeServer into a SeqpacketConn. + let raw = connected_pipe.as_raw_handle(); + let mut client_pid: ULONG = 0; + unsafe { GetNamedPipeClientProcessId(raw as HANDLE, &mut client_pid) }; + // Transfer ownership: forget the Tokio wrapper (which doesn't implement IntoRawHandle) + // and take the handle ourselves. + std::mem::forget(connected_pipe); + let owned = unsafe { OwnedHandle::from_raw_handle(raw) }; + let conn = SeqpacketConn::from_server_handle(owned, client_pid); + handler(conn); } // drops pipe and shm here Ok(()) } -pub fn setup_daemon_process(listener: OwnedHandle, spawn_cfg: &mut SpawnWorker) -> io::Result<()> { +pub fn setup_daemon_process( + listener: SeqpacketListener, + spawn_cfg: &mut SpawnWorker, +) -> io::Result<()> { // Ensure unique process names - we spawn one sidecar per console session id (see // setup/windows.rs for the reasoning) + let raw = listener.into_raw_handle(); + let owned = unsafe { OwnedHandle::from_raw_handle(raw) }; spawn_cfg .process_name(format!( "datadog-ipc-helper-{}", primary_sidecar_identifier() )) - .pass_handle(listener) + .pass_handle(owned) .stdin(Stdio::Null); Ok(()) diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index b6871a1101..b6858b6952 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -113,7 +113,6 @@ COPY "tests/spawn_from_lib/Cargo.toml" "tests/spawn_from_lib/" COPY "datadog-ipc/Cargo.toml" "datadog-ipc/" COPY "datadog-ipc-macros/Cargo.toml" "datadog-ipc-macros/" COPY "datadog-ipc/tarpc/Cargo.toml" "datadog-ipc/tarpc/" -COPY "datadog-ipc/plugins/Cargo.toml" "datadog-ipc/plugins/" COPY "libdd-data-pipeline/Cargo.toml" "libdd-data-pipeline/" COPY "libdd-data-pipeline-ffi/Cargo.toml" "libdd-data-pipeline-ffi/" COPY "bin_tests/Cargo.toml" "bin_tests/" From 401824c5480d7097a222451c590d4e108fce3fb3 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 10 Mar 2026 17:21:32 +0100 Subject: [PATCH 02/29] Fixup windows --- datadog-ipc-macros/src/lib.rs | 10 +-- datadog-ipc/src/handles.rs | 89 +------------------- datadog-ipc/src/lib.rs | 1 - datadog-ipc/src/platform/unix/handles.rs | 84 ++++++++++++++++++ datadog-ipc/src/platform/unix/mod.rs | 3 + datadog-ipc/src/platform/unix/sockets/mod.rs | 18 ++-- datadog-ipc/src/platform/windows/handles.rs | 74 ++++++++++++++++ datadog-ipc/src/platform/windows/mod.rs | 3 + datadog-ipc/src/platform/windows/sockets.rs | 33 +++++++- datadog-sidecar-ffi/src/lib.rs | 2 +- datadog-sidecar/src/service/mod.rs | 2 + 11 files changed, 215 insertions(+), 104 deletions(-) create mode 100644 datadog-ipc/src/platform/unix/handles.rs create mode 100644 datadog-ipc/src/platform/windows/handles.rs diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index 8a4a0326de..44fb11c8ee 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -409,13 +409,13 @@ fn gen_serve_fn( quote! { let result = handler.#name(peer, #(#field_names),*).await; let __resp_data = datadog_ipc::codec::encode_response(&result); - datadog_ipc::send_raw_async(&async_fd, &__resp_data, &[]).await.ok(); + datadog_ipc::send_raw_async(&async_fd, &__resp_data).await.ok(); } } else { quote! { handler.#name(peer, #(#field_names),*).await; // 1-byte ack: distinguishable from EOF (0 bytes from recvmsg on closed socket). - datadog_ipc::send_raw_async(&async_fd, &[0u8], &[]).await.ok(); + datadog_ipc::send_raw_async(&async_fd, &[0u8]).await.ok(); } }; @@ -433,15 +433,15 @@ fn gen_serve_fn( handler: ::std::sync::Arc, ) { let peer = conn.peer_credentials().unwrap_or_default(); - let async_fd = match conn.into_async_fd() { + let async_fd = match conn.into_async_conn() { Ok(fd) => fd, Err(e) => { - ::tracing::error!("IPC serve: into_async_fd failed: {e}"); + ::tracing::error!("IPC serve: into_async_conn failed: {e}"); return; } }; let mut recv_counter: u64 = 0; - let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE]; + let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; loop { let (n, fds) = match datadog_ipc::recv_raw_async(&async_fd, &mut buf).await { Ok(x) => x, diff --git a/datadog-ipc/src/handles.rs b/datadog-ipc/src/handles.rs index 0382dafcce..c6fa4f764e 100644 --- a/datadog-ipc/src/handles.rs +++ b/datadog-ipc/src/handles.rs @@ -1,13 +1,11 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use std::error::Error; - use super::platform::PlatformHandle; // Access ability to transport handles between processes pub trait HandlesTransport { - type Error: Error; + type Error: std::error::Error; /// Move handle out of an object, to send it to remote process fn copy_handle(self, handle: PlatformHandle) -> Result<(), Self::Error>; @@ -45,90 +43,7 @@ impl TransferHandles for &T { } } -/// Collects raw file descriptors to be sent via `SCM_RIGHTS` alongside a message. -#[cfg(unix)] -pub struct FdSink(Vec); - -#[cfg(unix)] -impl FdSink { - pub fn new() -> Self { - FdSink(Vec::new()) - } - - pub fn fds(&self) -> &[std::os::unix::io::RawFd] { - &self.0 - } - - pub fn into_fds(self) -> Vec { - self.0 - } -} - -#[cfg(unix)] -impl Default for FdSink { - fn default() -> Self { - Self::new() - } -} - -#[cfg(unix)] -impl HandlesTransport for &mut FdSink { - type Error = std::convert::Infallible; - - fn copy_handle(self, handle: super::platform::PlatformHandle) -> Result<(), Self::Error> { - if let Some(owned) = &handle.inner { - use std::os::unix::io::AsRawFd; - self.0.push(owned.as_raw_fd()); - } - Ok(()) - } - - fn provide_handle( - self, - _hint: &super::platform::PlatformHandle, - ) -> Result, Self::Error> { - unreachable!("FdSink::provide_handle should never be called") - } -} - -/// Distributes received `SCM_RIGHTS` file descriptors into `PlatformHandle` fields. -/// -/// Created fresh for each received message — no global fd queue, no fd stranding. -#[cfg(unix)] -pub struct FdSource(std::collections::VecDeque); - -#[cfg(unix)] -impl FdSource { - pub fn new(fds: Vec) -> Self { - FdSource(fds.into_iter().collect()) - } -} - -#[cfg(unix)] -impl HandlesTransport for &mut FdSource { - type Error = std::io::Error; - - fn copy_handle( - self, - _handle: super::platform::PlatformHandle, - ) -> Result<(), Self::Error> { - Ok(()) - } - - fn provide_handle( - self, - _hint: &super::platform::PlatformHandle, - ) -> Result, Self::Error> { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - let fd = self.0.pop_front().ok_or_else(|| { - std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, - "no more SCM_RIGHTS file descriptors available", - ) - })?; - Ok(unsafe { super::platform::PlatformHandle::from_raw_fd(fd.into_raw_fd()) }) - } -} +pub use crate::platform::{FdSink, FdSource}; impl TransferHandles for Result where diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index e19925616e..086f5e6aa5 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -19,6 +19,5 @@ pub mod client; pub use platform::{ PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE, }; -#[cfg(unix)] pub use platform::{recv_raw_async, send_raw_async}; pub use client::IpcClientConn; diff --git a/datadog-ipc/src/platform/unix/handles.rs b/datadog-ipc/src/platform/unix/handles.rs new file mode 100644 index 0000000000..623853fc88 --- /dev/null +++ b/datadog-ipc/src/platform/unix/handles.rs @@ -0,0 +1,84 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::handles::HandlesTransport; +use crate::platform::PlatformHandle; +use io_lifetimes::OwnedFd; +use std::collections::VecDeque; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; + +/// Collects raw file descriptors to be sent via `SCM_RIGHTS` alongside a message. +pub struct FdSink(Vec); + +impl FdSink { + pub fn new() -> Self { + FdSink(Vec::new()) + } + + pub fn fds(&self) -> &[std::os::unix::io::RawFd] { + &self.0 + } + + pub fn into_fds(self) -> Vec { + self.0 + } +} + +impl Default for FdSink { + fn default() -> Self { + Self::new() + } +} + +impl HandlesTransport for &mut FdSink { + type Error = std::convert::Infallible; + + fn copy_handle(self, handle: PlatformHandle) -> Result<(), Self::Error> { + if let Some(owned) = &handle.inner { + self.0.push(owned.as_raw_fd()); + } + Ok(()) + } + + fn provide_handle( + self, + _hint: &PlatformHandle, + ) -> Result, Self::Error> { + unreachable!("FdSink::provide_handle should never be called") + } +} + +/// Distributes received `SCM_RIGHTS` file descriptors into `PlatformHandle` fields. +/// +/// Created fresh for each received message — no global fd queue, no fd stranding. +pub struct FdSource(VecDeque); + +impl FdSource { + pub fn new(fds: Vec) -> Self { + FdSource(fds.into_iter().collect()) + } +} + +impl HandlesTransport for &mut FdSource { + type Error = std::io::Error; + + fn copy_handle( + self, + _handle: PlatformHandle, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn provide_handle( + self, + _hint: &PlatformHandle, + ) -> Result, Self::Error> { + let fd = self.0.pop_front().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "no more SCM_RIGHTS file descriptors available", + ) + })?; + Ok(unsafe { PlatformHandle::from_raw_fd(fd.into_raw_fd()) }) + } +} diff --git a/datadog-ipc/src/platform/unix/mod.rs b/datadog-ipc/src/platform/unix/mod.rs index f94115d8e6..eace1efdf0 100644 --- a/datadog-ipc/src/platform/unix/mod.rs +++ b/datadog-ipc/src/platform/unix/mod.rs @@ -7,6 +7,9 @@ pub mod locks; pub mod sockets; pub use sockets::*; +mod handles; +pub use handles::*; + mod message; pub use message::*; diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 5c39632fbb..fa3ff3529f 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -280,8 +280,8 @@ impl SeqpacketConn { Ok(()) } - /// Wrap in a Tokio `AsyncFd` for async server dispatch loops. - pub fn into_async_fd(self) -> io::Result> { + /// Convert to an async connection for use in async server dispatch loops. + pub fn into_async_conn(self) -> io::Result { tokio::io::unix::AsyncFd::new(self.inner) } @@ -290,11 +290,14 @@ impl SeqpacketConn { } } +/// The async connection type on Unix: a Tokio `AsyncFd` wrapping the raw fd. +pub type AsyncConn = tokio::io::unix::AsyncFd; + /// Async receive on a Tokio `AsyncFd`-wrapped IPC connection. /// /// Used by the server dispatch loop (generated by `#[service]` macro). pub async fn recv_raw_async( - fd: &tokio::io::unix::AsyncFd, + fd: &AsyncConn, buf: &mut [u8], ) -> io::Result<(usize, Vec)> { loop { @@ -310,14 +313,11 @@ pub async fn recv_raw_async( /// /// Used by the server dispatch loop (generated by `#[service]` macro) to send acks and /// responses without blocking the async thread. -pub async fn send_raw_async( - fd: &tokio::io::unix::AsyncFd, - data: &[u8], - fds: &[RawFd], -) -> io::Result<()> { +/// Server responses never carry fds (fds flow client→server only via SCM_RIGHTS). +pub async fn send_raw_async(fd: &AsyncConn, data: &[u8]) -> io::Result<()> { loop { let mut guard = fd.writable().await?; - match guard.try_io(|inner| sendmsg_raw(inner.as_raw_fd(), data, fds, MsgFlags::empty())) { + match guard.try_io(|inner| sendmsg_raw(inner.as_raw_fd(), data, &[], MsgFlags::empty())) { Ok(result) => return result, Err(_would_block) => continue, } diff --git a/datadog-ipc/src/platform/windows/handles.rs b/datadog-ipc/src/platform/windows/handles.rs new file mode 100644 index 0000000000..8dfc36fd9b --- /dev/null +++ b/datadog-ipc/src/platform/windows/handles.rs @@ -0,0 +1,74 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::handles::HandlesTransport; +use crate::platform::PlatformHandle; +use std::collections::VecDeque; +use std::os::windows::io::{FromRawHandle, IntoRawHandle, OwnedHandle}; + +/// No-op sink — Windows handles are transferred in-band via message suffix, not out-of-band. +pub struct FdSink; + +impl FdSink { + pub fn new() -> Self { + FdSink + } + + pub fn into_fds(self) -> Vec { + Vec::new() + } +} + +impl Default for FdSink { + fn default() -> Self { + Self::new() + } +} + +impl HandlesTransport for &mut FdSink { + type Error = std::convert::Infallible; + + fn copy_handle(self, _handle: PlatformHandle) -> Result<(), Self::Error> { + Ok(()) + } + + fn provide_handle( + self, + _hint: &PlatformHandle, + ) -> Result, Self::Error> { + unreachable!("FdSink::provide_handle should never be called") + } +} + +/// Distributes handles extracted from the in-band wire suffix into `PlatformHandle` fields. +pub struct FdSource(VecDeque); + +impl FdSource { + pub fn new(handles: Vec) -> Self { + FdSource(handles.into_iter().collect()) + } +} + +impl HandlesTransport for &mut FdSource { + type Error = std::io::Error; + + fn copy_handle( + self, + _handle: PlatformHandle, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn provide_handle( + self, + _hint: &PlatformHandle, + ) -> Result, Self::Error> { + let handle = self.0.pop_front().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "no more handles available", + ) + })?; + Ok(unsafe { PlatformHandle::from_raw_handle(handle.into_raw_handle()) }) + } +} diff --git a/datadog-ipc/src/platform/windows/mod.rs b/datadog-ipc/src/platform/windows/mod.rs index 3500bad286..bc0aff2cbd 100644 --- a/datadog-ipc/src/platform/windows/mod.rs +++ b/datadog-ipc/src/platform/windows/mod.rs @@ -18,3 +18,6 @@ pub use named_pipe::*; pub mod sockets; pub use sockets::*; + +mod handles; +pub use handles::*; diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 14b95de100..f0dc6de021 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -23,7 +23,7 @@ //! bytes beyond the maximum expected payload size. use std::io; -use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; +use std::os::windows::io::{AsRawHandle, FromRawHandle, OwnedHandle, RawHandle}; use std::path::Path; use std::ptr::{null, null_mut}; use std::sync::{ @@ -559,3 +559,34 @@ impl SeqpacketConn { pub fn is_listening>(path: P) -> io::Result { Ok(SeqpacketConn::connect(path).is_ok()) } + +/// The async connection type on Windows is the synchronous `SeqpacketConn` itself; +/// blocking I/O is wrapped in `tokio::task::block_in_place` for async compatibility. +pub type AsyncConn = SeqpacketConn; + +impl SeqpacketConn { + /// Convert to an async connection (identity on Windows). + pub fn into_async_conn(self) -> io::Result { + Ok(self) + } +} + +/// Async receive on a Windows named pipe IPC connection. +/// +/// Wraps `recv_raw_blocking` in `tokio::task::block_in_place` so it can be awaited +/// without blocking the Tokio thread pool. Requires a multi-thread Tokio runtime. +pub async fn recv_raw_async( + conn: &AsyncConn, + buf: &mut [u8], +) -> io::Result<(usize, Vec)> { + tokio::task::block_in_place(|| conn.recv_raw_blocking(buf)) +} + +/// Async send on a Windows named pipe IPC connection. +/// +/// Wraps `send_raw_blocking` in `tokio::task::block_in_place`. +/// Server responses never carry handles (handles flow client→server only via in-band suffix). +pub async fn send_raw_async(conn: &AsyncConn, data: &[u8]) -> io::Result<()> { + let mut data_vec = data.to_vec(); + tokio::task::block_in_place(|| conn.send_raw_blocking(&mut data_vec, &[])) +} diff --git a/datadog-sidecar-ffi/src/lib.rs b/datadog-sidecar-ffi/src/lib.rs index a3d1c5db07..48c92c0323 100644 --- a/datadog-sidecar-ffi/src/lib.rs +++ b/datadog-sidecar-ffi/src/lib.rs @@ -638,7 +638,7 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( try_c!(blocking::set_session_config( transport, session_id_str, - datadog_sidecar::service::remote_configs::RemoteConfigNotifyFunction( + datadog_sidecar::service::RemoteConfigNotifyFunction( _remote_config_notify_function, ), &session_config, diff --git a/datadog-sidecar/src/service/mod.rs b/datadog-sidecar/src/service/mod.rs index 133c019fab..4bb2e9ee0b 100644 --- a/datadog-sidecar/src/service/mod.rs +++ b/datadog-sidecar/src/service/mod.rs @@ -43,6 +43,8 @@ pub mod telemetry; pub(crate) mod tracing; pub use sidecar_interface::DynamicInstrumentationConfigState; +#[cfg(windows)] +pub use remote_configs::RemoteConfigNotifyFunction; pub use telemetry::{get_telemetry_action_sender, InternalTelemetryActions}; pub(crate) use telemetry::{init_telemetry_sender, telemetry_action_receiver_task}; From f5f040be94fc7b2aec53c523b7ae0c737a6528d8 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 10 Mar 2026 17:49:13 +0100 Subject: [PATCH 03/29] macos is a special kind of snowflake Signed-off-by: Bob Weinand --- datadog-ipc/src/platform/unix/handles.rs | 2 +- .../src/platform/unix/sockets/macos.rs | 83 +++++++++++++++---- datadog-ipc/src/platform/unix/sockets/mod.rs | 53 ++++++------ datadog-ipc/src/platform/windows/sockets.rs | 2 - 4 files changed, 97 insertions(+), 43 deletions(-) diff --git a/datadog-ipc/src/platform/unix/handles.rs b/datadog-ipc/src/platform/unix/handles.rs index 623853fc88..8defacec1f 100644 --- a/datadog-ipc/src/platform/unix/handles.rs +++ b/datadog-ipc/src/platform/unix/handles.rs @@ -50,7 +50,7 @@ impl HandlesTransport for &mut FdSink { /// Distributes received `SCM_RIGHTS` file descriptors into `PlatformHandle` fields. /// -/// Created fresh for each received message — no global fd queue, no fd stranding. +/// Every message should have its own FdSource. pub struct FdSource(VecDeque); impl FdSource { diff --git a/datadog-ipc/src/platform/unix/sockets/macos.rs b/datadog-ipc/src/platform/unix/sockets/macos.rs index ba5e793df2..68d7fb00a3 100644 --- a/datadog-ipc/src/platform/unix/sockets/macos.rs +++ b/datadog-ipc/src/platform/unix/sockets/macos.rs @@ -58,8 +58,6 @@ fn set_dgram_buffers(fd: i32) -> io::Result<()> { Ok(()) } -// ── SeqpacketListener ──────────────────────────────────────────────────────── - impl SeqpacketListener { /// Bind to a filesystem path (DGRAM rendezvous socket; no `listen()` needed). /// @@ -81,8 +79,12 @@ impl SeqpacketListener { let mut buf = [0u8; 1]; let (_, owned_fds) = super::recvmsg_raw(self.inner.as_raw_fd(), &mut buf, MsgFlags::MSG_DONTWAIT)?; - if let Some(client_fd) = owned_fds.into_iter().next() { - return SeqpacketConn::from_owned(client_fd); + let mut it = owned_fds.into_iter(); + if let Some(client_fd) = it.next() { + // The second fd (if present) is the liveness pipe read end from `connect()`. + // Holding it alive lets the client detect when we drop this connection. + // Unlike socketpairs, pipes aren't autoclosed when the transferred end is closed locally. + return SeqpacketConn::from_owned(client_fd, it.next()); } // No SCM_RIGHTS: liveness probe — discard and try the next message. // If the socket is empty, the next recvmsg call returns WouldBlock. @@ -109,9 +111,13 @@ impl SeqpacketConn { /// Connect to a server at the given filesystem path using the fd-passing handshake. /// - /// Creates a `SOCK_DGRAM` socketpair with 4 MiB buffers, then sends the server end - /// to the rendezvous socket via SCM_RIGHTS using a fresh unconnected DGRAM socket. - /// Returns the client end of the socketpair. + /// Creates a `SOCK_DGRAM` socketpair with 4 MiB buffers and a liveness pipe, then + /// sends the server end of the socketpair **and** the read end of the liveness pipe + /// to the rendezvous socket via SCM_RIGHTS. Returns the client end of the socketpair. + /// + /// The liveness pipe enables disconnect detection: when the daemon drops its + /// `SeqpacketConn` (closing `liveness_read`), `POLLHUP` appears on `liveness_write` + /// and subsequent sends return `BrokenPipe`. pub fn connect(path: impl AsRef) -> io::Result { let mut fds = [0i32; 2]; if unsafe { @@ -125,30 +131,75 @@ impl SeqpacketConn { set_dgram_buffers(fd_client.as_raw_fd())?; set_dgram_buffers(fd_server.as_raw_fd())?; + // Create a liveness pipe. The read end is sent to the daemon; we keep the + // write end. When the daemon drops its connection (closing liveness_read), + // poll on liveness_write returns POLLHUP — enabling disconnect detection even + // though _peer keeps the socketpair alive to prevent EINVAL on sendmsg. + let mut pipe_fds = [-1i32; 2]; + if unsafe { libc::pipe(pipe_fds.as_mut_ptr()) } == -1 { + return Err(io::Error::last_os_error()); + } + let liveness_read = unsafe { OwnedFd::from_raw_fd(pipe_fds[0]) }; + let liveness_write = unsafe { OwnedFd::from_raw_fd(pipe_fds[1]) }; + // Set FD_CLOEXEC on both pipe ends so they are not inherited across exec(). + for &fd in &[liveness_read.as_raw_fd(), liveness_write.as_raw_fd()] { + let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) }; + if flags >= 0 { + unsafe { libc::fcntl(fd, libc::F_SETFD, flags | libc::FD_CLOEXEC) }; + } + } + // A fresh unconnected DGRAM socket is required for the handshake sendmsg. // fd_client is already "connected" to fd_server and cannot reach the rendezvous path. let handshake_fd = create_dgram_socket()?; let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; let server_raw = fd_server.as_raw_fd(); + let liveness_r_raw = liveness_read.as_raw_fd(); let iov = [std::io::IoSlice::new(&[0u8])]; sendmsg::( handshake_fd.as_raw_fd(), &iov, - &[ControlMessage::ScmRights(&[server_raw])], + &[ControlMessage::ScmRights(&[server_raw, liveness_r_raw])], MsgFlags::empty(), Some(&addr), ) .map_err(io::Error::from)?; - // Do NOT drop fd_server here. On macOS, closing any local fd that references - // the peer end of a SOCK_DGRAM socketpair immediately disconnects this end - // (fd_client), even if fd_server is still alive in another process via - // SCM_RIGHTS. Keep fd_server alive in `_peer` for the lifetime of this - // SeqpacketConn so that sendmsg on fd_client continues to work. - Self::from_owned_pair(fd_client, fd_server) + // liveness_read was sent via SCM_RIGHTS; drop our local copy (daemon has the reference). + drop(liveness_read); + // Keep fd_server (_peer) to prevent EINVAL: on macOS, closing the local fd for the + // peer end of a SOCK_DGRAM socketpair disconnects this end even when the peer socket + // is alive in the daemon via SCM_RIGHTS. + // Keep liveness_w (liveness_write) to detect daemon death via POLLHUP. + Self::from_owned_pair(fd_client, fd_server, Some(liveness_write)) } -} -// ── Free functions ─────────────────────────────────────────────────────────── + fn poll_liveness_pipe(&self) -> io::Result<()> { + if let Some(ref lw) = self.liveness { + let mut pfd = libc::pollfd { fd: lw.as_raw_fd(), events: libc::POLLHUP as libc::c_short, revents: 0 }; + let ret = unsafe { libc::poll(&mut pfd, 1, 0) }; + if ret > 0 && pfd.revents & (libc::POLLHUP | libc::POLLERR) != 0 { + return Err(io::Error::from(io::ErrorKind::BrokenPipe)); + } + } + Ok(()) + } + + /// Create from a connected fd plus a peer fd that must be kept alive. + /// + /// On macOS, the peer fd must be kept open locally to maintain the SOCK_DGRAM + /// socketpair connection on this end. It is stored in `_peer` and closed when + /// this `SeqpacketConn` is dropped. + pub(super) fn from_owned_pair(client: OwnedFd, peer: OwnedFd, liveness: Option) -> io::Result { + set_nonblocking(client.as_raw_fd(), true)?; + Ok(Self { + inner: client, + _peer: Some(peer), + liveness, + read_timeout: None, + write_timeout: None, + }) + } +} /// Returns `true` if a live server is listening at the given socket path. /// diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index fa3ff3529f..35c85e887d 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -14,6 +14,7 @@ use std::{ os::unix::io::{AsRawFd, FromRawFd, OwnedFd, RawFd}, time::Duration, }; +use tokio::io::unix::AsyncFd; #[cfg(target_os = "linux")] mod linux; @@ -47,8 +48,6 @@ pub struct PeerCredentials { pub uid: u32, } -// ── Shared socket helpers ──────────────────────────────────────────────────── - pub(super) fn create_unix_socket(sock_type: SockType) -> io::Result { let fd = nix::sys::socket::socket(AddressFamily::Unix, sock_type, SockFlag::empty(), None) .map_err(io::Error::from)?; @@ -77,7 +76,7 @@ pub(super) fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { } pub(super) fn sendmsg_raw(fd: RawFd, data: &[u8], fds: &[RawFd], flags: MsgFlags) -> io::Result<()> { - let iov = [std::io::IoSlice::new(data)]; + let iov = [io::IoSlice::new(data)]; if fds.is_empty() { sendmsg::(fd, &iov, &[], flags, None) } else { @@ -93,10 +92,10 @@ pub(super) fn recvmsg_raw( flags: MsgFlags, ) -> io::Result<(usize, Vec)> { let cmsg_space = unsafe { - libc::CMSG_SPACE((std::mem::size_of::() * MAX_FDS) as libc::c_uint) + libc::CMSG_SPACE((size_of::() * MAX_FDS) as libc::c_uint) } as usize; let mut cmsg_buf = vec![0u8; cmsg_space]; - let mut iov = [std::io::IoSliceMut::new(buf)]; + let mut iov = [io::IoSliceMut::new(buf)]; let msg = recvmsg::(fd, &mut iov, Some(&mut cmsg_buf), flags) .map_err(io::Error::from)?; @@ -143,8 +142,6 @@ pub(super) fn poll_until_ready(fd: RawFd, event: libc::c_short) -> io::Result<() poll_with_timeout(fd, event, None) } -// ── Types ──────────────────────────────────────────────────────────────────── - /// A listening socket for accepting IPC connections. /// /// - Linux: `AF_UNIX SOCK_SEQPACKET` with `listen`/`accept`. @@ -166,9 +163,9 @@ impl SeqpacketListener { /// /// Sets the socket to non-blocking mode, then wraps in `AsyncFd`. /// Requires a running Tokio runtime. - pub fn into_async_listener(self) -> io::Result> { + pub fn into_async_listener(self) -> io::Result> { set_nonblocking(self.inner.as_raw_fd(), true)?; - tokio::io::unix::AsyncFd::new(self) + AsyncFd::new(self) } pub fn as_raw_fd(&self) -> RawFd { @@ -176,7 +173,7 @@ impl SeqpacketListener { } } -impl std::os::unix::io::AsRawFd for SeqpacketListener { +impl AsRawFd for SeqpacketListener { fn as_raw_fd(&self) -> RawFd { self.inner.as_raw_fd() } @@ -198,25 +195,29 @@ pub struct SeqpacketConn { /// immediately disconnects this socket, even if the peer is still alive in another /// process. Keep `_peer` alive here so the connection remains valid until this /// `SeqpacketConn` is dropped. + #[cfg(target_os = "macos")] _peer: Option, + /// macOS only: one end of a liveness pipe. Client holds the write end, server holds + /// the read end. Polling either end for `POLLHUP` detects peer disconnection: + /// write-end POLLHUP ← server closed read end; read-end POLLHUP ← client closed write end. + #[cfg(target_os = "macos")] + liveness: Option, read_timeout: Option, write_timeout: Option, } impl SeqpacketConn { - pub(super) fn from_owned(fd: OwnedFd) -> io::Result { + pub(super) fn from_owned(fd: OwnedFd, #[cfg(target_os = "macos")] liveness: Option) -> io::Result { set_nonblocking(fd.as_raw_fd(), true)?; - Ok(Self { inner: fd, _peer: None, read_timeout: None, write_timeout: None }) - } - - /// Create from a connected fd plus a peer fd that must be kept alive. - /// - /// On macOS, the peer fd must be kept open locally to maintain the SOCK_DGRAM - /// socketpair connection on this end. It is stored in `_peer` and closed when - /// this `SeqpacketConn` is dropped. - pub(super) fn from_owned_pair(client: OwnedFd, peer: OwnedFd) -> io::Result { - set_nonblocking(client.as_raw_fd(), true)?; - Ok(Self { inner: client, _peer: Some(peer), read_timeout: None, write_timeout: None }) + Ok(Self { + inner: fd, + #[cfg(target_os = "macos")] + _peer: None, + #[cfg(target_os = "macos")] + liveness, + read_timeout: None, + write_timeout: None, + }) } /// Retrieve the peer process's credentials (pid, uid). @@ -234,11 +235,15 @@ impl SeqpacketConn { /// `MSG_DONTWAIT` is not needed and is intentionally omitted — on macOS `AF_UNIX SOCK_DGRAM` /// socketpairs, `MSG_DONTWAIT` can return EINVAL instead of EAGAIN. pub fn try_send_raw(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { + #[cfg(target_os = "macos")] + self.poll_liveness_pipe(); sendmsg_raw(self.inner.as_raw_fd(), data, fds, MsgFlags::empty()) } /// Blocking send. Polls for writability (respecting write_timeout), then sends. pub fn send_raw_blocking(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { + #[cfg(target_os = "macos")] + self.poll_liveness_pipe(); let fd = self.inner.as_raw_fd(); loop { match sendmsg_raw(fd, data, fds, MsgFlags::empty()) { @@ -282,7 +287,7 @@ impl SeqpacketConn { /// Convert to an async connection for use in async server dispatch loops. pub fn into_async_conn(self) -> io::Result { - tokio::io::unix::AsyncFd::new(self.inner) + AsyncFd::new(self.inner) } pub fn as_raw_fd(&self) -> RawFd { @@ -291,7 +296,7 @@ impl SeqpacketConn { } /// The async connection type on Unix: a Tokio `AsyncFd` wrapping the raw fd. -pub type AsyncConn = tokio::io::unix::AsyncFd; +pub type AsyncConn = AsyncFd; /// Async receive on a Tokio `AsyncFd`-wrapped IPC connection. /// diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index f0dc6de021..1e07aed43b 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -244,8 +244,6 @@ fn make_overlapped(event: SysHANDLE) -> OVERLAPPED { } } -// ── SeqpacketListener ───────────────────────────────────────────────────────── - /// A named-pipe server that accepts message-mode IPC connections. /// /// `try_accept` swaps the connected pipe instance for a fresh server instance so the listener From 474efaa10157957918614dfe80abd29c88c2119e Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 10 Mar 2026 18:22:39 +0100 Subject: [PATCH 04/29] Clippy Signed-off-by: Bob Weinand --- datadog-ipc/src/client.rs | 16 ++----- datadog-ipc/src/platform/message.rs | 4 ++ datadog-ipc/src/platform/unix/message.rs | 5 -- .../src/platform/unix/sockets/macos.rs | 46 +++++++++---------- datadog-ipc/src/platform/unix/sockets/mod.rs | 16 +++---- datadog-ipc/src/platform/windows/sockets.rs | 4 +- datadog-ipc/tests/blocking_client.rs | 5 +- datadog-sidecar/src/service/blocking.rs | 8 ++-- datadog-sidecar/src/service/sender.rs | 1 + datadog-sidecar/src/service/sidecar_server.rs | 1 + datadog-sidecar/src/setup/unix.rs | 4 +- 11 files changed, 48 insertions(+), 62 deletions(-) diff --git a/datadog-ipc/src/client.rs b/datadog-ipc/src/client.rs index cefa0ff910..8ec5d01746 100644 --- a/datadog-ipc/src/client.rs +++ b/datadog-ipc/src/client.rs @@ -69,11 +69,8 @@ impl IpcClientConn { /// Non-blocking drain of all pending acks. Updates `ack_count`. pub fn drain_acks(&mut self) { - loop { - match self.conn.try_recv_raw(&mut self.recv_buf) { - Ok(_) => self.ack_count += 1, - Err(_) => break, - } + while self.conn.try_recv_raw(&mut self.recv_buf).is_ok() { + self.ack_count += 1; } } @@ -105,9 +102,8 @@ impl IpcClientConn { /// /// Used when draining the outbox of state-change messages. pub fn send_blocking(&mut self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { - self.conn.send_raw_blocking(data, fds).map_err(|e| { + self.conn.send_raw_blocking(data, fds).inspect_err(|_| { self.closed = true; - e })?; self.send_count += 1; Ok(()) @@ -120,16 +116,14 @@ impl IpcClientConn { /// ack for this specific send arrives. Returns the response bytes and any /// transferred file descriptors. pub fn call(&mut self, data: &mut Vec, fds: &[RawFd]) -> io::Result<(Vec, Vec)> { - self.conn.send_raw_blocking(data, fds).map_err(|e| { + self.conn.send_raw_blocking(data, fds).inspect_err(|_| { self.closed = true; - e })?; self.send_count += 1; let target = self.send_count; loop { - let (n, resp_fds) = self.conn.recv_raw_blocking(&mut self.recv_buf).map_err(|e| { + let (n, resp_fds) = self.conn.recv_raw_blocking(&mut self.recv_buf).inspect_err(|_| { self.closed = true; - e })?; self.ack_count += 1; if self.ack_count == target { diff --git a/datadog-ipc/src/platform/message.rs b/datadog-ipc/src/platform/message.rs index e8df4285bd..dfbea23b22 100644 --- a/datadog-ipc/src/platform/message.rs +++ b/datadog-ipc/src/platform/message.rs @@ -4,6 +4,10 @@ use crate::handles::{HandlesTransport, TransferHandles}; use crate::platform::Message; +/// Maximum file descriptors transferable in a single message. +pub const MAX_FDS: usize = 20; + + impl Message { pub fn ref_item(&self) -> &Item { &self.item diff --git a/datadog-ipc/src/platform/unix/message.rs b/datadog-ipc/src/platform/unix/message.rs index 0e583169c9..cb0336ff4b 100644 --- a/datadog-ipc/src/platform/unix/message.rs +++ b/datadog-ipc/src/platform/unix/message.rs @@ -3,11 +3,6 @@ use serde::{Deserialize, Serialize}; -/// sendfd crate's API is not able to resize the received FD container. -/// limiting the max number of sent FDs should allow help lower a chance of surprise -/// TODO: sendfd should be rewriten, fixed to handle cases like these better. -pub const MAX_FDS: usize = 20; - #[derive(Deserialize, Serialize)] pub struct Message { pub item: Item, diff --git a/datadog-ipc/src/platform/unix/sockets/macos.rs b/datadog-ipc/src/platform/unix/sockets/macos.rs index 68d7fb00a3..8c304de2f6 100644 --- a/datadog-ipc/src/platform/unix/sockets/macos.rs +++ b/datadog-ipc/src/platform/unix/sockets/macos.rs @@ -106,7 +106,7 @@ impl SeqpacketConn { let fd1 = unsafe { OwnedFd::from_raw_fd(fds[1]) }; set_dgram_buffers(fd0.as_raw_fd())?; set_dgram_buffers(fd1.as_raw_fd())?; - Ok((Self::from_owned(fd0)?, Self::from_owned(fd1)?)) + Ok((Self::from_owned(fd0, None)?, Self::from_owned(fd1, None)?)) } /// Connect to a server at the given filesystem path using the fd-passing handshake. @@ -173,7 +173,7 @@ impl SeqpacketConn { Self::from_owned_pair(fd_client, fd_server, Some(liveness_write)) } - fn poll_liveness_pipe(&self) -> io::Result<()> { + pub(super) fn poll_liveness_pipe(&self) -> io::Result<()> { if let Some(ref lw) = self.liveness { let mut pfd = libc::pollfd { fd: lw.as_raw_fd(), events: libc::POLLHUP as libc::c_short, revents: 0 }; let ret = unsafe { libc::poll(&mut pfd, 1, 0) }; @@ -225,6 +225,27 @@ pub fn is_listening>(path: P) -> io::Result { ) } +pub fn get_peer_credentials(fd: RawFd) -> io::Result { + let mut pid: libc::pid_t = 0; + let mut len = std::mem::size_of::() as libc::socklen_t; + if unsafe { + libc::getsockopt( + fd, + libc::SOL_LOCAL, + libc::LOCAL_PEERPID, + &mut pid as *mut _ as *mut libc::c_void, + &mut len, + ) + } < 0 + { + return Err(io::Error::last_os_error()); + } + Ok(PeerCredentials { + pid: pid as u32, + uid: 0, + }) +} + #[cfg(test)] mod tests { use super::*; @@ -269,24 +290,3 @@ mod tests { ); } } - -pub fn get_peer_credentials(fd: RawFd) -> io::Result { - let mut pid: libc::pid_t = 0; - let mut len = std::mem::size_of::() as libc::socklen_t; - if unsafe { - libc::getsockopt( - fd, - libc::SOL_LOCAL, - libc::LOCAL_PEERPID, - &mut pid as *mut _ as *mut libc::c_void, - &mut len, - ) - } < 0 - { - return Err(io::Error::last_os_error()); - } - Ok(PeerCredentials { - pid: pid as u32, - uid: 0, - }) -} diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 35c85e887d..9688e4b506 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -5,7 +5,7 @@ //! //! - Linux: `AF_UNIX SOCK_SEQPACKET` with `SCM_RIGHTS` for fd transfer. //! - macOS: `AF_UNIX SOCK_DGRAM` with an fd-passing connection handshake. This emulates the -//! semantics which SOCK_SEQPACKET provides us on Linux. +//! semantics which SOCK_SEQPACKET provides us on Linux. pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAddr}; use nix::sys::socket::{recvmsg, sendmsg, AddressFamily, SockFlag, SockType}; @@ -30,9 +30,7 @@ pub use macos::is_listening; use linux::get_peer_credentials; #[cfg(target_os = "macos")] use macos::get_peer_credentials; - -/// Maximum file descriptors transferable in a single message. -pub const MAX_FDS: usize = 20; +use crate::platform::message::MAX_FDS; /// Maximum IPC message payload size (4 MiB). pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; @@ -138,10 +136,6 @@ pub(super) fn poll_with_timeout(fd: RawFd, event: libc::c_short, timeout: Option } } -pub(super) fn poll_until_ready(fd: RawFd, event: libc::c_short) -> io::Result<()> { - poll_with_timeout(fd, event, None) -} - /// A listening socket for accepting IPC connections. /// /// - Linux: `AF_UNIX SOCK_SEQPACKET` with `listen`/`accept`. @@ -234,16 +228,18 @@ impl SeqpacketConn { /// Note: `O_NONBLOCK` is always set on `SeqpacketConn` sockets (via `from_owned`), so /// `MSG_DONTWAIT` is not needed and is intentionally omitted — on macOS `AF_UNIX SOCK_DGRAM` /// socketpairs, `MSG_DONTWAIT` can return EINVAL instead of EAGAIN. + #[allow(clippy::ptr_arg)] // windows interface compat pub fn try_send_raw(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { #[cfg(target_os = "macos")] - self.poll_liveness_pipe(); + self.poll_liveness_pipe()?; sendmsg_raw(self.inner.as_raw_fd(), data, fds, MsgFlags::empty()) } /// Blocking send. Polls for writability (respecting write_timeout), then sends. + #[allow(clippy::ptr_arg)] // windows interface compat pub fn send_raw_blocking(&self, data: &mut Vec, fds: &[RawFd]) -> io::Result<()> { #[cfg(target_os = "macos")] - self.poll_liveness_pipe(); + self.poll_liveness_pipe()?; let fd = self.inner.as_raw_fd(); loop { match sendmsg_raw(fd, data, fds, MsgFlags::empty()) { diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 1e07aed43b..2a554822e5 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -30,6 +30,7 @@ use std::sync::{ atomic::{AtomicU64, Ordering}, Mutex, }; +use crate::platform::message::MAX_FDS; // winapi – only used for things not cleanly available in windows-sys use winapi::shared::minwindef::ULONG; @@ -54,9 +55,6 @@ const PIPE_ACCESS_DUPLEX: u32 = 0x0000_0003; // PIPE_ACCESS_INBOUND | PIPE_ACCES const FILE_FLAG_OVERLAPPED_: u32 = 0x4000_0000; const FILE_FLAG_FIRST_PIPE_INSTANCE_: u32 = 0x0008_0000; -/// Maximum file descriptors (handles) transferable in a single message. -pub const MAX_FDS: usize = 20; - /// Maximum IPC message payload size (4 MiB). pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; diff --git a/datadog-ipc/tests/blocking_client.rs b/datadog-ipc/tests/blocking_client.rs index c9a6005b9b..35e4efdc1e 100644 --- a/datadog-ipc/tests/blocking_client.rs +++ b/datadog-ipc/tests/blocking_client.rs @@ -1,10 +1,7 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 #![cfg(unix)] -use std::{ - io::Write, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; use tokio::runtime; diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 55952f3c84..a17e22af19 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -100,7 +100,7 @@ impl SidecarTransport { Ok(t) => t, Err(e) => return Err(io::Error::other(e.to_string())), }; - match f(&mut *inner) { + match f(&mut inner) { Ok(ret) => Ok(ret), Err(e) => { if e.kind() == io::ErrorKind::BrokenPipe @@ -113,7 +113,7 @@ impl SidecarTransport { #[allow(clippy::unwrap_used)] Some(n) => n.inner.into_inner().unwrap(), }; - f(&mut *inner) + f(&mut inner) } else { Err(e) } @@ -142,7 +142,7 @@ impl From for SidecarTransport { } } -fn lock_sender(transport: &mut SidecarTransport) -> io::Result> { +fn lock_sender(transport: &mut SidecarTransport) -> io::Result> { transport.ensure_alive(); transport.inner.lock().map_err(|e| io::Error::other(e.to_string())) } @@ -399,7 +399,7 @@ pub fn ping(transport: &mut SidecarTransport) -> io::Result { mod tests { use crate::service::blocking::SidecarTransport; use datadog_ipc::{SeqpacketConn, SeqpacketListener}; - use std::time::Duration; + use tempfile::tempdir; #[test] diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 035b2b4d06..c37d701d10 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -215,6 +215,7 @@ impl SidecarSender { self.try_drain_outbox(); } + #[allow(clippy::too_many_arguments)] pub fn set_universal_service_tags( &mut self, instance_id: InstanceId, diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 01da311573..0dddb9d998 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -775,6 +775,7 @@ impl SidecarServer { .add(exception_hash, granularity); } + #[allow(clippy::too_many_arguments)] fn set_universal_service_tags_impl( &self, instance_id: InstanceId, diff --git a/datadog-sidecar/src/setup/unix.rs b/datadog-sidecar/src/setup/unix.rs index 3eab09657e..32cddd2f3a 100644 --- a/datadog-sidecar/src/setup/unix.rs +++ b/datadog-sidecar/src/setup/unix.rs @@ -221,8 +221,8 @@ mod tests { // can't listen twice when some listener is active assert!(liaison.attempt_listen().unwrap().is_none()); - let mut client: SeqpacketConn = liaison.connect_to_server().unwrap(); - let mut srv: SeqpacketConn = listener.try_accept().unwrap(); + let client: SeqpacketConn = liaison.connect_to_server().unwrap(); + let srv: SeqpacketConn = listener.try_accept().unwrap(); client.send_raw_blocking(&mut vec![255], &[]).unwrap(); let mut buf = [0u8; 4]; let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); From 51e66bbb3478fb615258c27da2a9a1f2a290b21a Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 10 Mar 2026 18:58:21 +0100 Subject: [PATCH 05/29] Fixup windows --- .../src/platform/unix/sockets/linux.rs | 8 +- datadog-ipc/src/platform/windows/sockets.rs | 169 ++++++++++++------ datadog-sidecar/src/setup/windows.rs | 4 +- 3 files changed, 119 insertions(+), 62 deletions(-) diff --git a/datadog-ipc/src/platform/unix/sockets/linux.rs b/datadog-ipc/src/platform/unix/sockets/linux.rs index e7bca05b1d..ed2444a6ec 100644 --- a/datadog-ipc/src/platform/unix/sockets/linux.rs +++ b/datadog-ipc/src/platform/unix/sockets/linux.rs @@ -3,10 +3,8 @@ //! Linux-specific IPC socket implementation using `AF_UNIX SOCK_SEQPACKET`. -use super::{ - create_unix_socket, set_nonblocking, SeqpacketConn, SeqpacketListener, PeerCredentials, -}; -use nix::sys::socket::{accept, bind, connect, listen, AddressFamily, Backlog, SockType, UnixAddr}; +use super::{create_unix_socket, SeqpacketConn, SeqpacketListener, PeerCredentials}; +use nix::sys::socket::{accept, bind, connect, listen, Backlog, SockType, UnixAddr}; use std::{ io, os::unix::{ @@ -140,6 +138,6 @@ pub fn get_peer_credentials(fd: RawFd) -> io::Result { } Ok(PeerCredentials { pid: cred.pid as u32, - uid: cred.uid as u32, + uid: cred.uid, }) } diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 2a554822e5..12deca3e4c 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -23,13 +23,14 @@ //! bytes beyond the maximum expected payload size. use std::io; -use std::os::windows::io::{AsRawHandle, FromRawHandle, OwnedHandle, RawHandle}; +use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; use std::path::Path; use std::ptr::{null, null_mut}; use std::sync::{ atomic::{AtomicU64, Ordering}, Mutex, }; +use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; use crate::platform::message::MAX_FDS; // winapi – only used for things not cleanly available in windows-sys @@ -42,7 +43,7 @@ use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; // windows-sys – used for all pipe/IO/threading syscalls use windows_sys::Win32::Foundation::{HANDLE as SysHANDLE, WAIT_OBJECT_0, WAIT_TIMEOUT}; -use windows_sys::Win32::Storage::FileSystem::{ReadFile, WriteFile}; +use windows_sys::Win32::Storage::FileSystem::{ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX}; use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; use windows_sys::Win32::System::Pipes::{ ConnectNamedPipe, CreateNamedPipeA, PeekNamedPipe, SetNamedPipeHandleState, @@ -50,11 +51,6 @@ use windows_sys::Win32::System::Pipes::{ }; use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject, INFINITE}; -// Named-pipe open-mode bits not available in windows-sys 0.48 -const PIPE_ACCESS_DUPLEX: u32 = 0x0000_0003; // PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND -const FILE_FLAG_OVERLAPPED_: u32 = 0x4000_0000; -const FILE_FLAG_FIRST_PIPE_INSTANCE_: u32 = 0x0008_0000; - /// Maximum IPC message payload size (4 MiB). pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; @@ -113,6 +109,36 @@ fn append_handle_suffix( Ok(()) } +/// Parse the handle-suffix wire format from a received message. +/// +/// `buf[..n]` contains the raw bytes received from the pipe. +/// Returns `(payload_len, owned_handles)`. +fn parse_message(buf: &[u8], n: usize) -> io::Result<(usize, Vec)> { + if n < 4 { + return Err(io::Error::from(io::ErrorKind::UnexpectedEof)); + } + let count_bytes: [u8; 4] = buf[n - 4..n] + .try_into() + .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; + let count = u32::from_le_bytes(count_bytes) as usize; + + let handles_start = n + .checked_sub(4 + 8 * count) + .ok_or_else(|| io::Error::from(io::ErrorKind::InvalidData))?; + + let mut handles = Vec::with_capacity(count); + for i in 0..count { + let off = handles_start + 8 * i; + let val_bytes: [u8; 8] = buf[off..off + 8] + .try_into() + .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; + let val = u64::from_le_bytes(val_bytes); + handles.push(unsafe { OwnedHandle::from_raw_handle(val as RawHandle) }); + } + + Ok((handles_start, handles)) +} + /// Read one message from `h` directly into `buf`. /// /// `buf` must be large enough to hold the entire wire message @@ -140,34 +166,7 @@ fn pipe_read( { return Err(io::Error::last_os_error()); } - let n = read as usize; - - // Parse the suffix: last 4 bytes are handle_count (LE u32). - if n < 4 { - return Err(io::Error::from(io::ErrorKind::UnexpectedEof)); - } - let count_bytes: [u8; 4] = buf[n - 4..n] - .try_into() - .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; - let count = u32::from_le_bytes(count_bytes) as usize; - - // Before the count are 8 bytes × count handle values. - let handles_start = n - .checked_sub(4 + 8 * count) - .ok_or_else(|| io::Error::from(io::ErrorKind::InvalidData))?; - - let mut handles = Vec::with_capacity(count); - for i in 0..count { - let off = handles_start + 8 * i; - let val_bytes: [u8; 8] = buf[off..off + 8] - .try_into() - .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; - let val = u64::from_le_bytes(val_bytes); - handles.push(unsafe { OwnedHandle::from_raw_handle(val as RawHandle) }); - } - - // payload occupies buf[0..handles_start] - Ok((handles_start, handles)) + parse_message(buf, read as usize) } fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { @@ -200,12 +199,8 @@ fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result { let open_mode = PIPE_ACCESS_DUPLEX - | FILE_FLAG_OVERLAPPED_ - | if first_instance { - FILE_FLAG_FIRST_PIPE_INSTANCE_ - } else { - 0 - }; + | FILE_FLAG_OVERLAPPED + | if first_instance { FILE_FLAG_FIRST_PIPE_INSTANCE } else { 0 }; let h = unsafe { CreateNamedPipeA( @@ -358,6 +353,7 @@ impl SeqpacketListener { Ok(SeqpacketConn { inner: conn_handle, peer_pid: client_pid, + is_server: true, read_timeout: None, write_timeout: None, }) @@ -371,13 +367,13 @@ impl SeqpacketListener { } } -impl std::os::windows::io::AsRawHandle for SeqpacketListener { +impl AsRawHandle for SeqpacketListener { fn as_raw_handle(&self) -> RawHandle { SeqpacketListener::as_raw_handle(self) } } -impl std::os::windows::io::IntoRawHandle for SeqpacketListener { +impl IntoRawHandle for SeqpacketListener { fn into_raw_handle(self) -> RawHandle { self.inner .into_inner() @@ -390,6 +386,9 @@ impl std::os::windows::io::IntoRawHandle for SeqpacketListener { pub struct SeqpacketConn { pub(crate) inner: OwnedHandle, peer_pid: u32, + /// True for server-side handles (opened with `FILE_FLAG_OVERLAPPED` via `CreateNamedPipeA`); + /// these can be converted to a Tokio async pipe via `into_async_conn`. + is_server: bool, read_timeout: Option, write_timeout: Option, } @@ -435,6 +434,7 @@ impl SeqpacketConn { Ok(Self { inner, peer_pid: server_pid, + is_server: false, read_timeout: None, write_timeout: None, }) @@ -470,6 +470,7 @@ impl SeqpacketConn { let server = Self { inner: server_handle, peer_pid: pid, + is_server: true, read_timeout: None, write_timeout: None, }; @@ -481,6 +482,7 @@ impl SeqpacketConn { Self { inner: handle, peer_pid: client_pid, + is_server: true, read_timeout: None, write_timeout: None, } @@ -556,33 +558,90 @@ pub fn is_listening>(path: P) -> io::Result { Ok(SeqpacketConn::connect(path).is_ok()) } -/// The async connection type on Windows is the synchronous `SeqpacketConn` itself; -/// blocking I/O is wrapped in `tokio::task::block_in_place` for async compatibility. -pub type AsyncConn = SeqpacketConn; +/// Internal: wraps either a `NamedPipeServer` or `NamedPipeClient` for dispatch. +enum AsyncPipe { + Server(NamedPipeServer), + Client(NamedPipeClient), +} + +macro_rules! async_pipe { + ($pipe:expr, $method:ident($($args:expr),*)$($trailing:tt)*) => { + match &$pipe { + AsyncPipe::Server(s) => s.$method($($args),*)$($trailing)*, + AsyncPipe::Client(c) => c.$method($($args),*)$($trailing)*, + } + }; +} + +/// Async connection type for Windows named-pipe IPC. +/// +/// Wraps a Tokio `NamedPipeServer` or `NamedPipeClient` registered with the IOCP reactor, +/// enabling fully async recv/send without blocking any Tokio thread. +pub struct AsyncSeqpacketConn { + inner: AsyncPipe, + pub(crate) peer_pid: u32, +} + +impl AsyncSeqpacketConn { + pub fn peer_credentials(&self) -> io::Result { + Ok(PeerCredentials { pid: self.peer_pid, uid: 0 }) + } +} + +pub type AsyncConn = AsyncSeqpacketConn; impl SeqpacketConn { - /// Convert to an async connection (identity on Windows). + /// Convert to an async connection for use in async server dispatch loops. + /// + /// Requires a running Tokio runtime with IOCP support. + /// Only works for server-side handles (created with `FILE_FLAG_OVERLAPPED` via + /// `CreateNamedPipeA`). Client handles from `connect()` are synchronous and will + /// return an error. pub fn into_async_conn(self) -> io::Result { - Ok(self) + let raw = self.inner.into_raw_handle(); + let inner = if self.is_server { + AsyncPipe::Server(unsafe { NamedPipeServer::from_raw_handle(raw)? }) + } else { + AsyncPipe::Client(unsafe { NamedPipeClient::from_raw_handle(raw)? }) + }; + Ok(AsyncSeqpacketConn { inner, peer_pid: self.peer_pid }) } } /// Async receive on a Windows named pipe IPC connection. /// -/// Wraps `recv_raw_blocking` in `tokio::task::block_in_place` so it can be awaited -/// without blocking the Tokio thread pool. Requires a multi-thread Tokio runtime. +/// Waits for the pipe to become readable (via IOCP), then reads one complete message. +/// The handle-count suffix is stripped and any transferred handles are returned. pub async fn recv_raw_async( conn: &AsyncConn, buf: &mut [u8], ) -> io::Result<(usize, Vec)> { - tokio::task::block_in_place(|| conn.recv_raw_blocking(buf)) + loop { + async_pipe!(conn.inner, readable().await)?; + match async_pipe!(conn.inner, try_read(buf)) { + Ok(0) => return Err(io::Error::from(io::ErrorKind::BrokenPipe)), + Ok(n) => return parse_message(buf, n), + Err(e) if e.kind() == io::ErrorKind::WouldBlock => continue, + Err(e) => return Err(e), + } + } } /// Async send on a Windows named pipe IPC connection. /// -/// Wraps `send_raw_blocking` in `tokio::task::block_in_place`. -/// Server responses never carry handles (handles flow client→server only via in-band suffix). +/// Server responses never carry handles; a zero-handle-count suffix is appended automatically. +/// Waits for writability (via IOCP) and writes the message atomically. pub async fn send_raw_async(conn: &AsyncConn, data: &[u8]) -> io::Result<()> { - let mut data_vec = data.to_vec(); - tokio::task::block_in_place(|| conn.send_raw_blocking(&mut data_vec, &[])) + // Server responses never carry handles; append a 0-handle-count suffix (4 bytes). + let mut buf = data.to_vec(); + buf.extend_from_slice(&0u32.to_le_bytes()); + loop { + async_pipe!(conn.inner, writable().await)?; + match async_pipe!(conn.inner, try_write(&buf)) { + Ok(n) if n == buf.len() => return Ok(()), + Ok(_) => return Err(io::Error::from(io::ErrorKind::WriteZero)), + Err(e) if e.kind() == io::ErrorKind::WouldBlock => continue, + Err(e) => return Err(e), + } + } } diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index 521783cf42..0327a016d5 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -99,8 +99,8 @@ mod tests { // can't listen twice when some listener is active assert!(liaison.attempt_listen().unwrap().is_none()); - let mut client: SeqpacketConn = liaison.connect_to_server().unwrap(); - let mut srv: SeqpacketConn = listener.try_accept().unwrap(); + let client: SeqpacketConn = liaison.connect_to_server().unwrap(); + let srv: SeqpacketConn = listener.try_accept().unwrap(); client.send_raw_blocking(&mut vec![255], &[]).unwrap(); let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); From 960b2f6204d11b5a8a5435ae85ed526f472b104e Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 10 Mar 2026 19:19:10 +0100 Subject: [PATCH 06/29] rm -rf / (not quite) Signed-off-by: Bob Weinand --- Cargo.lock | 3 - datadog-ipc-macros/src/lib.rs | 150 +------------ datadog-ipc/Cargo.toml | 3 - datadog-ipc/src/platform/message.rs | 29 --- datadog-ipc/src/platform/unix/message.rs | 10 - datadog-ipc/src/platform/unix/mod.rs | 3 - datadog-ipc/src/platform/unix/sockets/mod.rs | 42 ++-- datadog-ipc/src/platform/windows/channel.rs | 212 ------------------ .../platform/windows/channel/async_channel.rs | 149 ------------ .../src/platform/windows/channel/metadata.rs | 174 -------------- datadog-ipc/src/platform/windows/message.rs | 13 -- datadog-ipc/src/platform/windows/mod.rs | 6 - tools/docker/Dockerfile.build | 1 - 13 files changed, 32 insertions(+), 763 deletions(-) delete mode 100644 datadog-ipc/src/platform/unix/message.rs delete mode 100644 datadog-ipc/src/platform/windows/channel.rs delete mode 100644 datadog-ipc/src/platform/windows/channel/async_channel.rs delete mode 100644 datadog-ipc/src/platform/windows/channel/metadata.rs delete mode 100644 datadog-ipc/src/platform/windows/message.rs diff --git a/Cargo.lock b/Cargo.lock index d9fdabd98e..9b51b6ae26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1430,7 +1430,6 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "bytes", "criterion", "datadog-ipc-macros", "futures", @@ -1442,14 +1441,12 @@ dependencies = [ "memfd", "nix 0.29.0", "page_size", - "pin-project", "pretty_assertions", "sendfd", "serde", "spawn_worker", "tempfile", "tokio", - "tokio-util", "tracing", "tracing-subscriber", "winapi 0.3.9", diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index 44fb11c8ee..8c544965c0 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -7,23 +7,6 @@ use proc_macro2::Span; use quote::{format_ident, quote, ToTokens}; use syn::{FnArg, Ident, ItemTrait, ReturnType, TraitItem, Type}; -fn snake_to_camel(ident_str: &str) -> String { - let mut camel_ty = String::with_capacity(ident_str.len()); - let mut last_char_was_underscore = true; - for c in ident_str.chars() { - match c { - '_' => last_char_was_underscore = true, - c if last_char_was_underscore => { - camel_ty.extend(c.to_uppercase()); - last_char_was_underscore = false; - } - c => camel_ty.extend(c.to_lowercase()), - } - } - camel_ty.shrink_to_fit(); - camel_ty -} - fn is_unit_type(ty: &Type) -> bool { matches!(ty, Type::Tuple(t) if t.elems.is_empty()) } @@ -34,133 +17,6 @@ fn has_attr(attrs: &[syn::Attribute], name: &str) -> bool { .any(|a| a.meta.path().to_token_stream().to_string() == name) } -// --------------------------------------------------------------------------- -// Old macro — kept during migration to the new #[service] macro. -// --------------------------------------------------------------------------- - -#[proc_macro_attribute] -pub fn impl_transfer_handles(_attr: TokenStream, input: TokenStream) -> TokenStream { - let mut item: ItemTrait = syn::parse(input).unwrap(); - let req_name = format_ident!("{}Request", item.ident); - let res_name = format_ident!("{}Response", item.ident); - let mut arms_req_move: Vec = vec![]; - let mut arms_req_recv: Vec = vec![]; - let mut arms_res_move: Vec = vec![]; - let mut arms_res_recv: Vec = vec![]; - for inner in item.items.iter_mut() { - if let TraitItem::Fn(ref mut func) = inner { - let mut params: Vec = vec![]; - let mut stmts_move: Vec = vec![]; - let mut stmts_recv: Vec = vec![]; - for any_arg in func.sig.inputs.iter_mut() { - if let FnArg::Typed(ref mut arg) = any_arg { - let orig_attr_num = arg.attrs.len(); - arg.attrs.retain(|attr| { - attr.meta.path().to_token_stream().to_string() != "SerializedHandle" - }); - if orig_attr_num != arg.attrs.len() { - if let syn::Pat::Ident(ref ident) = *arg.pat { - params.push(syn::FieldPat { - attrs: vec![], - member: syn::Member::Named(ident.ident.clone()), - colon_token: None, - pat: Box::new(syn::parse_quote! { #ident }), - }); - stmts_move.push( - syn::parse_quote! { __transport.copy_handle(#ident.clone().into())?; }, - ); - stmts_recv - .push(syn::parse_quote! { #ident.receive_handles(__transport)?; }); - } - } - } - } - let method = Ident::new( - &snake_to_camel(&func.sig.ident.to_string()), - Span::mixed_site(), - ); - if !params.is_empty() { - arms_req_move.push(syn::parse_quote! { - #req_name::#method { #(#params,)* .. } => { - #(#stmts_move)* - Ok(()) - } - }); - arms_req_recv.push(syn::parse_quote! { - #req_name::#method { #(#params,)* .. } => { - #(#stmts_recv)* - Ok(()) - } - }); - } - let orig_attr_num = func.attrs.len(); - func.attrs.retain(|attr| { - attr.meta.path().to_token_stream().to_string() != "SerializedHandle" - }); - if orig_attr_num != func.attrs.len() { - arms_res_move.push(syn::parse_quote! { - #res_name::#method(response) => response.copy_handles(transport) - }); - arms_res_recv.push(syn::parse_quote! { - #res_name::#method(response) => response.receive_handles(transport) - }); - } - } - } - - TokenStream::from(quote! { - #item - - impl datadog_ipc::handles::TransferHandles for #req_name { - fn copy_handles( - &self, - __transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - #(#arms_req_move,)* - _ => Ok(()), - } - } - - fn receive_handles( - &mut self, - __transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - #(#arms_req_recv,)* - _ => Ok(()), - } - } - } - - impl datadog_ipc::handles::TransferHandles for #res_name { - fn copy_handles( - &self, - transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - #(#arms_res_move,)* - _ => Ok(()), - } - } - - fn receive_handles( - &mut self, - transport: Transport, - ) -> Result<(), Transport::Error> { - match self { - #(#arms_res_recv,)* - _ => Ok(()), - } - } - } - }) -} - -// --------------------------------------------------------------------------- -// New #[service] macro -// --------------------------------------------------------------------------- - // Each param stores: (non-SerializedHandle attrs, name, type, is_handle). // The attrs include #[cfg(...)], allowing conditional compilation of parameters. type ParamInfo = (Vec, Ident, Box); @@ -219,7 +75,11 @@ fn collect_methods(item: &ItemTrait) -> Vec { .filter(|a| a.meta.path().to_token_stream().to_string() != "SerializedHandle") .cloned() .collect(); - params.push((pass_through_attrs, ident_pat.ident.clone(), pat_ty.ty.clone())); + params.push(( + pass_through_attrs, + ident_pat.ident.clone(), + pat_ty.ty.clone(), + )); } methods.push(MethodInfo { diff --git a/datadog-ipc/Cargo.toml b/datadog-ipc/Cargo.toml index bebd519246..8ccd808bc9 100644 --- a/datadog-ipc/Cargo.toml +++ b/datadog-ipc/Cargo.toml @@ -9,14 +9,11 @@ publish = false [dependencies] anyhow = { version = "1.0" } bincode = { version = "1" } -bytes = { version = "1.11.1" } futures = { version = "0.3", default-features = false } io-lifetimes = { version = "1.0" } page_size = "0.6.0" -pin-project = { version = "1" } memfd = { version = "0.6" } serde = { version = "1.0", default-features = false, features = ["derive"] } -tokio-util = { version = "0.7.11", features = ["codec"] } libc = { version = "0.2" } libdd-tinybytes = { path = "../libdd-tinybytes", optional = true } diff --git a/datadog-ipc/src/platform/message.rs b/datadog-ipc/src/platform/message.rs index dfbea23b22..df3b95ee08 100644 --- a/datadog-ipc/src/platform/message.rs +++ b/datadog-ipc/src/platform/message.rs @@ -1,34 +1,5 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::handles::{HandlesTransport, TransferHandles}; -use crate::platform::Message; - /// Maximum file descriptors transferable in a single message. pub const MAX_FDS: usize = 20; - - -impl Message { - pub fn ref_item(&self) -> &Item { - &self.item - } -} - -impl TransferHandles for Message -where - T: TransferHandles, -{ - fn copy_handles(&self, mover: M) -> Result<(), M::Error> - where - M: HandlesTransport, - { - self.item.copy_handles(mover) - } - - fn receive_handles

(&mut self, provider: P) -> Result<(), P::Error> - where - P: HandlesTransport, - { - self.item.receive_handles(provider) - } -} diff --git a/datadog-ipc/src/platform/unix/message.rs b/datadog-ipc/src/platform/unix/message.rs deleted file mode 100644 index cb0336ff4b..0000000000 --- a/datadog-ipc/src/platform/unix/message.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize, Serialize)] -pub struct Message { - pub item: Item, - pub pid: libc::pid_t, -} diff --git a/datadog-ipc/src/platform/unix/mod.rs b/datadog-ipc/src/platform/unix/mod.rs index eace1efdf0..3bfda6011d 100644 --- a/datadog-ipc/src/platform/unix/mod.rs +++ b/datadog-ipc/src/platform/unix/mod.rs @@ -10,9 +10,6 @@ pub use sockets::*; mod handles; pub use handles::*; -mod message; -pub use message::*; - #[cfg(target_os = "macos")] mod mem_handle_macos; #[cfg(target_os = "macos")] diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 9688e4b506..8e064acf43 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -7,8 +7,8 @@ //! - macOS: `AF_UNIX SOCK_DGRAM` with an fd-passing connection handshake. This emulates the //! semantics which SOCK_SEQPACKET provides us on Linux. -pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAddr}; use nix::sys::socket::{recvmsg, sendmsg, AddressFamily, SockFlag, SockType}; +pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAddr}; use std::{ io, os::unix::io::{AsRawFd, FromRawFd, OwnedFd, RawFd}, @@ -26,11 +26,11 @@ pub use linux::{bind_abstract, connect_abstract, is_listening}; #[cfg(not(target_os = "linux"))] pub use macos::is_listening; +use crate::platform::message::MAX_FDS; #[cfg(not(target_os = "macos"))] use linux::get_peer_credentials; #[cfg(target_os = "macos")] use macos::get_peer_credentials; -use crate::platform::message::MAX_FDS; /// Maximum IPC message payload size (4 MiB). pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; @@ -73,7 +73,12 @@ pub(super) fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { Ok(()) } -pub(super) fn sendmsg_raw(fd: RawFd, data: &[u8], fds: &[RawFd], flags: MsgFlags) -> io::Result<()> { +pub(super) fn sendmsg_raw( + fd: RawFd, + data: &[u8], + fds: &[RawFd], + flags: MsgFlags, +) -> io::Result<()> { let iov = [io::IoSlice::new(data)]; if fds.is_empty() { sendmsg::(fd, &iov, &[], flags, None) @@ -89,14 +94,13 @@ pub(super) fn recvmsg_raw( buf: &mut [u8], flags: MsgFlags, ) -> io::Result<(usize, Vec)> { - let cmsg_space = unsafe { - libc::CMSG_SPACE((size_of::() * MAX_FDS) as libc::c_uint) - } as usize; + let cmsg_space = + unsafe { libc::CMSG_SPACE((size_of::() * MAX_FDS) as libc::c_uint) } as usize; let mut cmsg_buf = vec![0u8; cmsg_space]; let mut iov = [io::IoSliceMut::new(buf)]; - let msg = recvmsg::(fd, &mut iov, Some(&mut cmsg_buf), flags) - .map_err(io::Error::from)?; + let msg = + recvmsg::(fd, &mut iov, Some(&mut cmsg_buf), flags).map_err(io::Error::from)?; let bytes = msg.bytes; if bytes == 0 { @@ -115,12 +119,20 @@ pub(super) fn recvmsg_raw( Ok((bytes, owned_fds)) } -pub(super) fn poll_with_timeout(fd: RawFd, event: libc::c_short, timeout: Option) -> io::Result<()> { +pub(super) fn poll_with_timeout( + fd: RawFd, + event: libc::c_short, + timeout: Option, +) -> io::Result<()> { let timeout_ms: i32 = match timeout { None => -1, Some(d) => d.as_millis().min(i32::MAX as u128) as i32, }; - let mut pfd = libc::pollfd { fd, events: event, revents: 0 }; + let mut pfd = libc::pollfd { + fd, + events: event, + revents: 0, + }; loop { let ret = unsafe { libc::poll(&mut pfd, 1, timeout_ms) }; if ret > 0 { @@ -201,7 +213,10 @@ pub struct SeqpacketConn { } impl SeqpacketConn { - pub(super) fn from_owned(fd: OwnedFd, #[cfg(target_os = "macos")] liveness: Option) -> io::Result { + pub(super) fn from_owned( + fd: OwnedFd, + #[cfg(target_os = "macos")] liveness: Option, + ) -> io::Result { set_nonblocking(fd.as_raw_fd(), true)?; Ok(Self { inner: fd, @@ -297,10 +312,7 @@ pub type AsyncConn = AsyncFd; /// Async receive on a Tokio `AsyncFd`-wrapped IPC connection. /// /// Used by the server dispatch loop (generated by `#[service]` macro). -pub async fn recv_raw_async( - fd: &AsyncConn, - buf: &mut [u8], -) -> io::Result<(usize, Vec)> { +pub async fn recv_raw_async(fd: &AsyncConn, buf: &mut [u8]) -> io::Result<(usize, Vec)> { loop { let mut guard = fd.readable().await?; match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), buf, MsgFlags::empty())) { diff --git a/datadog-ipc/src/platform/windows/channel.rs b/datadog-ipc/src/platform/windows/channel.rs deleted file mode 100644 index b565c5ba6d..0000000000 --- a/datadog-ipc/src/platform/windows/channel.rs +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use crate::handles::TransferHandles; -use crate::platform::metadata::ProcessHandle; -use crate::platform::Message; -use std::ffi::c_void; -use std::fmt::{Debug, Formatter, Pointer}; -use std::os::windows::io::AsRawHandle; -use std::os::windows::prelude::OwnedHandle; -use std::ptr::{null, null_mut}; -use std::{ - io::{self, Read, Write}, - time::Duration, -}; -use winapi::shared::winerror::ERROR_IO_PENDING; -use winapi::um::winbase::INFINITE; -use windows_sys::Win32::Foundation::{HANDLE, WAIT_OBJECT_0}; -use windows_sys::Win32::Storage::FileSystem::{ReadFile, WriteFile}; -use windows_sys::Win32::System::Pipes::{ - PeekNamedPipe, SetNamedPipeHandleState, PIPE_NOWAIT, PIPE_WAIT, -}; -use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject}; -use windows_sys::Win32::System::IO::{GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; - -pub mod async_channel; -pub use async_channel::*; -pub mod metadata; - -use self::metadata::ChannelMetadata; - -struct Inner { - overlapped: OVERLAPPED, - handle: OwnedHandle, - read_timeout: Option, - write_timeout: Option, - blocking: bool, - client: bool, -} - -unsafe impl Send for Inner {} - -impl Debug for Inner { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - Pointer::fmt(&self.handle.as_raw_handle(), f) - } -} - -#[derive(Debug)] -pub struct Channel { - inner: Inner, - pub metadata: ChannelMetadata, -} - -impl Channel { - pub fn set_read_timeout(&mut self, timeout: Option) -> io::Result<()> { - self.inner.read_timeout = timeout; - Ok(()) - } - - pub fn set_write_timeout(&mut self, timeout: Option) -> io::Result<()> { - self.inner.write_timeout = timeout; - Ok(()) - } - - pub fn set_nonblocking(&mut self, nonblocking: bool) -> io::Result<()> { - self.inner.blocking = !nonblocking; - let mode = if nonblocking { PIPE_NOWAIT } else { PIPE_WAIT }; - if unsafe { - SetNamedPipeHandleState( - self.inner.handle.as_raw_handle() as HANDLE, - &mode, - null(), - null(), - ) - } != 0 - { - Ok(()) - } else { - Err(io::Error::last_os_error()) - } - } - - pub fn probe_readable(&self) -> bool { - let mut available_bytes = 0; - if unsafe { - PeekNamedPipe( - self.inner.handle.as_raw_handle() as HANDLE, - null_mut(), - 0, - null_mut(), - &mut available_bytes, - null_mut(), - ) - } != 0 - { - available_bytes > 0 - } else { - true - } - } - - fn wait_io_overlapped(&mut self, duration: Option) -> Result { - match unsafe { - WaitForSingleObject( - self.inner.overlapped.hEvent, - duration.map(|d| d.as_millis() as u32).unwrap_or(INFINITE), - ) - } { - WAIT_OBJECT_0 => { - let mut transferred: u32 = 0; - if unsafe { - GetOverlappedResult( - self.inner.handle.as_raw_handle() as HANDLE, - &self.inner.overlapped, - &mut transferred, - 1, - ) - } == 0 - { - Err(io::Error::last_os_error()) - } else { - Ok(transferred as usize) - } - } - e => Err(io::Error::from_raw_os_error(e as i32)), - } - } - - pub fn create_message(&mut self, item: T) -> Result, io::Error> - where - T: TransferHandles, - { - self.metadata.create_message(item) - } - - pub fn from_client_handle_and_pid(h: OwnedHandle, pid: ProcessHandle) -> Channel { - Channel { - inner: Inner { - overlapped: OVERLAPPED { - Internal: 0, - InternalHigh: 0, - Anonymous: OVERLAPPED_0 { - Pointer: null_mut(), - }, - hEvent: unsafe { CreateEventA(null_mut(), 1, 0, null_mut()) }, - }, - handle: h, - read_timeout: None, - write_timeout: None, - blocking: true, - client: true, - }, - metadata: ChannelMetadata::from_process_handle(pid), - } - } -} - -impl Read for Channel { - fn read<'a>(&mut self, buf: &mut [u8]) -> io::Result { - let mut bytes_read: u32 = 0; - if unsafe { - ReadFile( - self.inner.handle.as_raw_handle() as HANDLE, - buf.as_mut_ptr() as *mut c_void, - buf.len() as u32, - &mut bytes_read, - &mut self.inner.overlapped as *mut OVERLAPPED, - ) - } != 0 - { - Ok(bytes_read as usize) - } else { - let error = io::Error::last_os_error(); - if Some(ERROR_IO_PENDING as i32) == error.raw_os_error() { - self.wait_io_overlapped(self.inner.read_timeout) - } else { - Err(error) - } - } - } -} - -impl Write for Channel { - fn write(&mut self, buf: &[u8]) -> io::Result { - let mut bytes_written: u32 = 0; - if unsafe { - WriteFile( - self.inner.handle.as_raw_handle() as HANDLE, - buf.as_ptr(), - buf.len() as u32, - &mut bytes_written, - &mut self.inner.overlapped as *mut OVERLAPPED, - ) - } != 0 - { - Ok(bytes_written as usize) - } else { - let error = io::Error::last_os_error(); - if Some(ERROR_IO_PENDING as i32) == error.raw_os_error() { - self.wait_io_overlapped(self.inner.write_timeout) - } else { - Err(error) - } - } - } - - fn flush(&mut self) -> io::Result<()> { - // No-op on windows named pipes - Ok(()) - } -} diff --git a/datadog-ipc/src/platform/windows/channel/async_channel.rs b/datadog-ipc/src/platform/windows/channel/async_channel.rs deleted file mode 100644 index 95741376df..0000000000 --- a/datadog-ipc/src/platform/windows/channel/async_channel.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use crate::platform::metadata::ProcessHandle; -use crate::platform::Channel; -use pin_project::pin_project; -use std::fmt::Debug; -use std::os::windows::io::AsRawHandle; -use std::{ - io, - sync::{Arc, Mutex}, - task::Poll, -}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; -use winapi::shared::wtypesbase::ULONG; -use winapi::um::winbase::{GetNamedPipeClientProcessId, GetNamedPipeServerProcessId}; -use winapi::um::winnt::HANDLE; - -use super::ChannelMetadata; - -#[derive(Debug)] -// Note: needs to be #[pin] because impls on AsyncChannel require #[pin] -#[pin_project(project = NamedPipeProject)] -pub enum NamedPipe { - Server(#[pin] NamedPipeServer), - Client(#[pin] NamedPipeClient), -} - -#[derive(Debug)] -#[pin_project] -pub struct AsyncChannel { - #[pin] - inner: NamedPipe, - pub metadata: Arc>, -} - -macro_rules! use_inner { - ($base:expr, $method:ident($($args:expr),*)) => { - match $base.inner { - NamedPipe::Client(ref client) => client.$method($($args),*), - NamedPipe::Server(ref server) => server.$method($($args),*), - } - } -} - -impl AsyncChannel { - pub fn from_raw_and_process(pipe: NamedPipe, process_handle: ProcessHandle) -> AsyncChannel { - AsyncChannel { - inner: pipe, - metadata: Arc::new(Mutex::new(ChannelMetadata::from_process_handle( - process_handle, - ))), - } - } - - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - use_inner!(self, try_read(buf)) - } - - pub fn try_write(&self, buf: &[u8]) -> io::Result { - use_inner!(self, try_write(buf)) - } - - pub fn handle(&self) -> i32 { - use_inner!(self, as_raw_handle()) as i32 - } -} - -impl From for AsyncChannel { - fn from(pipe: NamedPipeServer) -> Self { - let mut pid: ULONG = 0; - unsafe { - GetNamedPipeClientProcessId(pipe.as_raw_handle() as HANDLE, &mut pid); - } - AsyncChannel::from_raw_and_process(NamedPipe::Server(pipe), ProcessHandle::Pid(pid)) - } -} - -impl From for AsyncChannel { - fn from(pipe: NamedPipeClient) -> Self { - let mut pid: ULONG = 0; - unsafe { - GetNamedPipeServerProcessId(pipe.as_raw_handle() as HANDLE, &mut pid); - } - AsyncChannel::from_raw_and_process(NamedPipe::Client(pipe), ProcessHandle::Pid(pid)) - } -} - -impl TryFrom for AsyncChannel { - type Error = io::Error; - - fn try_from(value: Channel) -> Result { - Ok(AsyncChannel { - inner: unsafe { - let handle = value.inner.handle.as_raw_handle(); - if value.inner.client { - NamedPipe::Client(NamedPipeClient::from_raw_handle(handle)?) - } else { - NamedPipe::Server(NamedPipeServer::from_raw_handle(handle)?) - } - }, - metadata: Arc::new(Mutex::new(value.metadata)), - }) - } -} - -macro_rules! pipe_inner { - ($pin:expr, $method:ident($($args:expr),+)) => { - match $pin.project().inner.project() { - NamedPipeProject::Client(ref mut client) => client.as_mut().$method($($args),+), - NamedPipeProject::Server(ref mut server) => server.as_mut().$method($($args),+), - } - } -} - -impl AsyncWrite for AsyncChannel { - fn poll_write( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> Poll> { - pipe_inner!(self, poll_write(cx, buf)) - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - pipe_inner!(self, poll_flush(cx)) - } - - fn poll_shutdown( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - pipe_inner!(self, poll_shutdown(cx)) - } -} - -impl AsyncRead for AsyncChannel { - fn poll_read( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - pipe_inner!(self, poll_read(cx, buf)) - } -} diff --git a/datadog-ipc/src/platform/windows/channel/metadata.rs b/datadog-ipc/src/platform/windows/channel/metadata.rs deleted file mode 100644 index 65b0ef6026..0000000000 --- a/datadog-ipc/src/platform/windows/channel/metadata.rs +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use std::collections::HashMap; -use std::fmt::{Debug, Formatter, Pointer}; -use std::io; -use std::os::windows::io::{AsRawHandle, FromRawHandle, OwnedHandle}; -use std::os::windows::prelude::RawHandle; -use std::ptr::null_mut; -use winapi::shared::minwindef::ULONG; -use winapi::um::handleapi::{CloseHandle, DuplicateHandle}; -use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcess}; -use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; - -use crate::{ - handles::{HandlesTransport, TransferHandles}, - platform::{Message, PlatformHandle}, -}; - -// A small HANDLE wrapper, so that it can have impl Drop. -// We cannot impl Drop for ProcessHandle, otherwise it's closed during moving of ProcessHandle. -pub struct WrappedHANDLE(HANDLE); - -impl Drop for WrappedHANDLE { - fn drop(&mut self) { - unsafe { - CloseHandle(self.0); - } - } -} - -// Deferred ProcessHandle getter -pub enum ProcessHandle { - Handle(WrappedHANDLE), - Pid(ULONG), - Getter(Box io::Result>), -} - -unsafe impl Send for ProcessHandle {} - -impl ProcessHandle { - pub fn get(&mut self) -> io::Result { - match self { - ProcessHandle::Handle(handle) => { - return Ok(handle.0); - } - ProcessHandle::Pid(pid) => { - let handle = unsafe { OpenProcess(PROCESS_DUP_HANDLE, 0, *pid) }; - if handle.is_null() { - return Err(io::Error::last_os_error()); - } - *self = ProcessHandle::Handle(WrappedHANDLE(handle)); - } - ProcessHandle::Getter(getter) => *self = getter()?, - }; - self.get() - } - - pub fn send_file_handle(&mut self, handle: RawHandle) -> io::Result { - let mut dup_handle: HANDLE = null_mut(); - unsafe { - if DuplicateHandle( - GetCurrentProcess(), - handle as HANDLE, - self.get()?, - &mut dup_handle, - 0, - 0, - DUPLICATE_SAME_ACCESS, - ) == 0 - { - return Err(io::Error::last_os_error()); - } - } - Ok(dup_handle as RawHandle) - } -} - -impl HandlesTransport for &mut ChannelMetadata { - type Error = io::Error; - - fn copy_handle(self, handle: PlatformHandle) -> Result<(), Self::Error> { - self.enqueue_for_sending(handle); - Ok(()) - } - - fn provide_handle(self, hint: &PlatformHandle) -> Result, Self::Error> { - self.find_handle(hint).ok_or_else(|| { - io::Error::new( - io::ErrorKind::NotFound, - "handle not found in received handles map", - ) - }) - } -} - -impl Debug for ProcessHandle { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - ProcessHandle::Handle(handle) => Pointer::fmt(&handle.0, f), - ProcessHandle::Pid(pid) => pid.fmt(f), - ProcessHandle::Getter(_) => "".fmt(f), - } - } -} - -#[derive(Debug)] -pub struct ChannelMetadata { - handles_to_send: Vec>, - handles_received: HashMap, - process_handle: ProcessHandle, -} - -impl ChannelMetadata { - pub fn from_process_handle(process_handle: ProcessHandle) -> Self { - Self { - handles_to_send: Default::default(), - handles_received: Default::default(), - process_handle, - } - } - - pub fn unwrap_message(&mut self, message: Message) -> Result - where - T: TransferHandles, - { - let mut item = message.item; - self.handles_received = message.handles; - - item.receive_handles(self)?; - Ok(item) - } - - pub fn create_message(&mut self, item: T) -> Result, io::Error> - where - T: TransferHandles, - { - item.copy_handles(&mut *self)?; - - let mut handle_map = HashMap::new(); - for handle in self.handles_to_send.drain(..) { - handle_map.insert( - handle.fd as u64, - self.process_handle - .send_file_handle(handle.as_raw_handle())? as u64, - ); - } - - let message = Message { - item, - handles: handle_map, - }; - - Ok(message) - } - - pub(crate) fn enqueue_for_sending(&mut self, handle: PlatformHandle) { - self.handles_to_send.push(handle.to_untyped()) - } - - pub(crate) fn find_handle(&mut self, hint: &PlatformHandle) -> Option> { - if hint.as_raw_handle() < 0 as RawHandle { - return Some(hint.clone()); - } - - let fd = self.handles_received.get(&(hint.as_raw_handle() as u64)); - - fd.map(|handle| unsafe { PlatformHandle::from_raw_handle(*handle as RawHandle) }) - } - - pub fn process_handle(&mut self) -> Option { - self.process_handle.get().ok() - } -} diff --git a/datadog-ipc/src/platform/windows/message.rs b/datadog-ipc/src/platform/windows/message.rs deleted file mode 100644 index 3134bec61e..0000000000 --- a/datadog-ipc/src/platform/windows/message.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -#[derive(Deserialize, Serialize)] -pub struct Message { - pub item: Item, - // The handles are to be sent before via DuplicateHandle - post-transfer reassigns the correct - // handle - pub handles: HashMap, -} diff --git a/datadog-ipc/src/platform/windows/mod.rs b/datadog-ipc/src/platform/windows/mod.rs index bc0aff2cbd..7dcc12a8ab 100644 --- a/datadog-ipc/src/platform/windows/mod.rs +++ b/datadog-ipc/src/platform/windows/mod.rs @@ -1,15 +1,9 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -mod channel; -pub use channel::*; - mod platform_handle; pub use platform_handle::*; -mod message; -pub use message::*; - mod mem_handle; pub(crate) use mem_handle::*; diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index b6858b6952..7eb6456b83 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -112,7 +112,6 @@ COPY "spawn_worker/Cargo.toml" "spawn_worker/" COPY "tests/spawn_from_lib/Cargo.toml" "tests/spawn_from_lib/" COPY "datadog-ipc/Cargo.toml" "datadog-ipc/" COPY "datadog-ipc-macros/Cargo.toml" "datadog-ipc-macros/" -COPY "datadog-ipc/tarpc/Cargo.toml" "datadog-ipc/tarpc/" COPY "libdd-data-pipeline/Cargo.toml" "libdd-data-pipeline/" COPY "libdd-data-pipeline-ffi/Cargo.toml" "libdd-data-pipeline-ffi/" COPY "bin_tests/Cargo.toml" "bin_tests/" From cf553a97665846c2f9cab8da064a64cf5e3565f8 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 11 Mar 2026 20:44:56 +0100 Subject: [PATCH 07/29] fmt Signed-off-by: Bob Weinand --- datadog-ipc/benches/ipc.rs | 5 +- datadog-ipc/src/client.rs | 15 +++- datadog-ipc/src/handles.rs | 5 +- datadog-ipc/src/lib.rs | 6 +- datadog-ipc/src/platform/unix/handles.rs | 5 +- datadog-ipc/src/platform/unix/mod.rs | 1 - .../src/platform/unix/sockets/linux.rs | 13 ++- .../src/platform/unix/sockets/macos.rs | 50 ++++++----- datadog-ipc/src/platform/unix/sockets/mod.rs | 4 +- datadog-ipc/src/platform/windows/handles.rs | 5 +- datadog-ipc/src/platform/windows/sockets.rs | 62 +++++++++---- datadog-sidecar-ffi/src/lib.rs | 12 ++- datadog-sidecar/src/service/blocking.rs | 26 ++++-- datadog-sidecar/src/service/mod.rs | 4 +- datadog-sidecar/src/service/sender.rs | 33 ++++--- datadog-sidecar/src/service/session_info.rs | 3 +- .../src/service/sidecar_interface.rs | 4 +- datadog-sidecar/src/service/sidecar_server.rs | 87 ++++++++++++++----- datadog-sidecar/src/setup/windows.rs | 3 +- datadog-sidecar/src/unix.rs | 5 +- datadog-sidecar/src/windows.rs | 3 +- 21 files changed, 226 insertions(+), 125 deletions(-) diff --git a/datadog-ipc/benches/ipc.rs b/datadog-ipc/benches/ipc.rs index 8a39978891..929906fd89 100644 --- a/datadog-ipc/benches/ipc.rs +++ b/datadog-ipc/benches/ipc.rs @@ -38,7 +38,10 @@ fn criterion_benchmark(c: &mut Criterion) { }); #[cfg(not(target_arch = "aarch64"))] - println!("Total requests handled: {}", channel.call_req_cnt().unwrap()); + println!( + "Total requests handled: {}", + channel.call_req_cnt().unwrap() + ); drop(channel); worker.join().unwrap(); diff --git a/datadog-ipc/src/client.rs b/datadog-ipc/src/client.rs index 8ec5d01746..42dc5b34ff 100644 --- a/datadog-ipc/src/client.rs +++ b/datadog-ipc/src/client.rs @@ -115,16 +115,23 @@ impl IpcClientConn { /// intermediate 0-byte acks for prior fire-and-forget messages, until the /// ack for this specific send arrives. Returns the response bytes and any /// transferred file descriptors. - pub fn call(&mut self, data: &mut Vec, fds: &[RawFd]) -> io::Result<(Vec, Vec)> { + pub fn call( + &mut self, + data: &mut Vec, + fds: &[RawFd], + ) -> io::Result<(Vec, Vec)> { self.conn.send_raw_blocking(data, fds).inspect_err(|_| { self.closed = true; })?; self.send_count += 1; let target = self.send_count; loop { - let (n, resp_fds) = self.conn.recv_raw_blocking(&mut self.recv_buf).inspect_err(|_| { - self.closed = true; - })?; + let (n, resp_fds) = self + .conn + .recv_raw_blocking(&mut self.recv_buf) + .inspect_err(|_| { + self.closed = true; + })?; self.ack_count += 1; if self.ack_count == target { return Ok((self.recv_buf[..n].to_vec(), resp_fds)); diff --git a/datadog-ipc/src/handles.rs b/datadog-ipc/src/handles.rs index c6fa4f764e..88aa2f3994 100644 --- a/datadog-ipc/src/handles.rs +++ b/datadog-ipc/src/handles.rs @@ -59,10 +59,7 @@ where } } - fn receive_handles( - &mut self, - transport: Transport, - ) -> Result<(), Transport::Error> + fn receive_handles(&mut self, transport: Transport) -> Result<(), Transport::Error> where Transport: HandlesTransport, { diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index 086f5e6aa5..a9bbfe2756 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -13,11 +13,11 @@ pub mod handles; pub mod platform; pub mod rate_limiter; -pub mod codec; pub mod client; +pub mod codec; +pub use client::IpcClientConn; +pub use platform::{recv_raw_async, send_raw_async}; pub use platform::{ PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE, }; -pub use platform::{recv_raw_async, send_raw_async}; -pub use client::IpcClientConn; diff --git a/datadog-ipc/src/platform/unix/handles.rs b/datadog-ipc/src/platform/unix/handles.rs index 8defacec1f..482597f396 100644 --- a/datadog-ipc/src/platform/unix/handles.rs +++ b/datadog-ipc/src/platform/unix/handles.rs @@ -62,10 +62,7 @@ impl FdSource { impl HandlesTransport for &mut FdSource { type Error = std::io::Error; - fn copy_handle( - self, - _handle: PlatformHandle, - ) -> Result<(), Self::Error> { + fn copy_handle(self, _handle: PlatformHandle) -> Result<(), Self::Error> { Ok(()) } diff --git a/datadog-ipc/src/platform/unix/mod.rs b/datadog-ipc/src/platform/unix/mod.rs index 3bfda6011d..c20ce44540 100644 --- a/datadog-ipc/src/platform/unix/mod.rs +++ b/datadog-ipc/src/platform/unix/mod.rs @@ -26,4 +26,3 @@ pub(crate) use mem_handle::*; pub unsafe extern "C" fn memfd_create(name: libc::c_void, flags: libc::c_uint) -> libc::c_int { libc::syscall(libc::SYS_memfd_create, name, flags) as libc::c_int } - diff --git a/datadog-ipc/src/platform/unix/sockets/linux.rs b/datadog-ipc/src/platform/unix/sockets/linux.rs index ed2444a6ec..45a80aa245 100644 --- a/datadog-ipc/src/platform/unix/sockets/linux.rs +++ b/datadog-ipc/src/platform/unix/sockets/linux.rs @@ -3,8 +3,9 @@ //! Linux-specific IPC socket implementation using `AF_UNIX SOCK_SEQPACKET`. -use super::{create_unix_socket, SeqpacketConn, SeqpacketListener, PeerCredentials}; +use super::{create_unix_socket, PeerCredentials, SeqpacketConn, SeqpacketListener}; use nix::sys::socket::{accept, bind, connect, listen, Backlog, SockType, UnixAddr}; +use std::os::fd::RawFd; use std::{ io, os::unix::{ @@ -13,7 +14,6 @@ use std::{ }, path::Path, }; -use std::os::fd::RawFd; fn create_seqpacket_socket() -> io::Result { create_unix_socket(SockType::SeqPacket) @@ -43,8 +43,8 @@ impl SeqpacketListener { /// Accept a new connection (non-blocking in non-blocking mode). /// - /// Skips intermittent connections left by `is_listening` probes: after `accept()`, peek to check - /// if the peer has already closed the connection (EOF). If so, discard and loop. + /// Skips intermittent connections left by `is_listening` probes: after `accept()`, peek to + /// check if the peer has already closed the connection (EOF). If so, discard and loop. pub fn try_accept(&self) -> io::Result { loop { let new_fd = accept(self.inner.as_raw_fd()).map_err(io::Error::from)?; @@ -73,9 +73,8 @@ impl SeqpacketConn { /// Create a connected pair (SEQPACKET, for testing / in-process use). pub fn socketpair() -> io::Result<(Self, Self)> { let mut fds = [0i32; 2]; - if unsafe { - libc::socketpair(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0, fds.as_mut_ptr()) - } == -1 + if unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0, fds.as_mut_ptr()) } + == -1 { return Err(io::Error::last_os_error()); } diff --git a/datadog-ipc/src/platform/unix/sockets/macos.rs b/datadog-ipc/src/platform/unix/sockets/macos.rs index 8c304de2f6..8f93293b78 100644 --- a/datadog-ipc/src/platform/unix/sockets/macos.rs +++ b/datadog-ipc/src/platform/unix/sockets/macos.rs @@ -14,8 +14,8 @@ //! //! **Client side** (`SeqpacketConn::connect`): //! - Creates a `socketpair(AF_UNIX, SOCK_DGRAM)` with 4 MiB send/recv buffers. -//! - Sends one socketpair end to the server's rendezvous path via a **fresh, unconnected** -//! DGRAM socket (using `sendmsg` with `SCM_RIGHTS`). The client retains the other end. +//! - Sends one socketpair end to the server's rendezvous path via a **fresh, unconnected** DGRAM +//! socket (using `sendmsg` with `SCM_RIGHTS`). The client retains the other end. //! //! **Liveness probe** (`is_listening`): //! - Sends a 1-byte datagram **without** SCM_RIGHTS to the rendezvous socket. @@ -25,14 +25,14 @@ use super::{ create_unix_socket, sendmsg, set_nonblocking, ControlMessage, MsgFlags, SeqpacketConn, SeqpacketListener, UnixAddr, MAX_MESSAGE_SIZE, }; +use crate::PeerCredentials; use nix::sys::socket::{bind, AddressFamily, SockFlag, SockType}; +use std::os::fd::RawFd; use std::{ io, os::unix::io::{AsRawFd, FromRawFd, OwnedFd}, path::Path, }; -use std::os::fd::RawFd; -use crate::PeerCredentials; fn create_dgram_socket() -> io::Result { create_unix_socket(SockType::Datagram) @@ -83,7 +83,8 @@ impl SeqpacketListener { if let Some(client_fd) = it.next() { // The second fd (if present) is the liveness pipe read end from `connect()`. // Holding it alive lets the client detect when we drop this connection. - // Unlike socketpairs, pipes aren't autoclosed when the transferred end is closed locally. + // Unlike socketpairs, pipes aren't autoclosed when the transferred end is closed + // locally. return SeqpacketConn::from_owned(client_fd, it.next()); } // No SCM_RIGHTS: liveness probe — discard and try the next message. @@ -96,10 +97,7 @@ impl SeqpacketConn { /// Create a connected pair (SOCK_DGRAM with 4 MiB buffers, for testing / in-process use). pub fn socketpair() -> io::Result<(Self, Self)> { let mut fds = [0i32; 2]; - if unsafe { - libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) - } == -1 - { + if unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) } == -1 { return Err(io::Error::last_os_error()); } let fd0 = unsafe { OwnedFd::from_raw_fd(fds[0]) }; @@ -120,10 +118,7 @@ impl SeqpacketConn { /// and subsequent sends return `BrokenPipe`. pub fn connect(path: impl AsRef) -> io::Result { let mut fds = [0i32; 2]; - if unsafe { - libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) - } == -1 - { + if unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_DGRAM, 0, fds.as_mut_ptr()) } == -1 { return Err(io::Error::last_os_error()); } let fd_client = unsafe { OwnedFd::from_raw_fd(fds[0]) }; @@ -175,7 +170,11 @@ impl SeqpacketConn { pub(super) fn poll_liveness_pipe(&self) -> io::Result<()> { if let Some(ref lw) = self.liveness { - let mut pfd = libc::pollfd { fd: lw.as_raw_fd(), events: libc::POLLHUP as libc::c_short, revents: 0 }; + let mut pfd = libc::pollfd { + fd: lw.as_raw_fd(), + events: libc::POLLHUP as libc::c_short, + revents: 0, + }; let ret = unsafe { libc::poll(&mut pfd, 1, 0) }; if ret > 0 && pfd.revents & (libc::POLLHUP | libc::POLLERR) != 0 { return Err(io::Error::from(io::ErrorKind::BrokenPipe)); @@ -189,7 +188,11 @@ impl SeqpacketConn { /// On macOS, the peer fd must be kept open locally to maintain the SOCK_DGRAM /// socketpair connection on this end. It is stored in `_peer` and closed when /// this `SeqpacketConn` is dropped. - pub(super) fn from_owned_pair(client: OwnedFd, peer: OwnedFd, liveness: Option) -> io::Result { + pub(super) fn from_owned_pair( + client: OwnedFd, + peer: OwnedFd, + liveness: Option, + ) -> io::Result { set_nonblocking(client.as_raw_fd(), true)?; Ok(Self { inner: client, @@ -219,10 +222,7 @@ pub fn is_listening>(path: P) -> io::Result { .map_err(io::Error::from)?; let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; let iov = [std::io::IoSlice::new(&[0u8])]; - Ok( - sendmsg::(probe.as_raw_fd(), &iov, &[], MsgFlags::empty(), Some(&addr)) - .is_ok(), - ) + Ok(sendmsg::(probe.as_raw_fd(), &iov, &[], MsgFlags::empty(), Some(&addr)).is_ok()) } pub fn get_peer_credentials(fd: RawFd) -> io::Result { @@ -260,14 +260,18 @@ mod tests { let server = listener.try_accept().expect("try_accept"); // Client → server - client.try_send_raw(&mut vec![1u8; 10], &[]).expect("client send"); + client + .try_send_raw(&mut vec![1u8; 10], &[]) + .expect("client send"); let mut buf = vec![0u8; 64]; let (n, _) = server.try_recv_raw(&mut buf).expect("server recv"); assert_eq!(&buf[..n], &[1u8; 10]); // Server → client (use a large enough buffer for 220 bytes) let mut buf220 = vec![0u8; 256]; - server.try_send_raw(&mut vec![2u8; 220], &[]).expect("server send 220B"); + server + .try_send_raw(&mut vec![2u8; 220], &[]) + .expect("server send 220B"); let (n, _) = client.try_recv_raw(&mut buf220).expect("client recv"); assert_eq!(n, 220); } @@ -280,7 +284,9 @@ mod tests { let (conn0, conn1) = SeqpacketConn::socketpair().expect("socketpair"); // Both ends alive: send must succeed. - conn0.try_send_raw(&mut vec![42u8; 10], &[]).expect("send with peer alive"); + conn0 + .try_send_raw(&mut vec![42u8; 10], &[]) + .expect("send with peer alive"); // Drop the peer: on macOS this disconnects conn0. drop(conn1); diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 8e064acf43..5a9f30b60e 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -11,7 +11,7 @@ use nix::sys::socket::{recvmsg, sendmsg, AddressFamily, SockFlag, SockType}; pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAddr}; use std::{ io, - os::unix::io::{AsRawFd, FromRawFd, OwnedFd, RawFd}, + os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}, time::Duration, }; use tokio::io::unix::AsyncFd; @@ -185,7 +185,7 @@ impl AsRawFd for SeqpacketListener { } } -impl std::os::unix::io::IntoRawFd for SeqpacketListener { +impl IntoRawFd for SeqpacketListener { fn into_raw_fd(self) -> RawFd { self.inner.into_raw_fd() } diff --git a/datadog-ipc/src/platform/windows/handles.rs b/datadog-ipc/src/platform/windows/handles.rs index 8dfc36fd9b..3bfa17a83c 100644 --- a/datadog-ipc/src/platform/windows/handles.rs +++ b/datadog-ipc/src/platform/windows/handles.rs @@ -52,10 +52,7 @@ impl FdSource { impl HandlesTransport for &mut FdSource { type Error = std::io::Error; - fn copy_handle( - self, - _handle: PlatformHandle, - ) -> Result<(), Self::Error> { + fn copy_handle(self, _handle: PlatformHandle) -> Result<(), Self::Error> { Ok(()) } diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 12deca3e4c..095d84a46e 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -22,6 +22,7 @@ //! no intermediate copy needed. The caller's buffer must have at least `HANDLE_SUFFIX_SIZE` //! bytes beyond the maximum expected payload size. +use crate::platform::message::MAX_FDS; use std::io; use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; use std::path::Path; @@ -31,7 +32,6 @@ use std::sync::{ Mutex, }; use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; -use crate::platform::message::MAX_FDS; // winapi – only used for things not cleanly available in windows-sys use winapi::shared::minwindef::ULONG; @@ -43,13 +43,15 @@ use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; // windows-sys – used for all pipe/IO/threading syscalls use windows_sys::Win32::Foundation::{HANDLE as SysHANDLE, WAIT_OBJECT_0, WAIT_TIMEOUT}; -use windows_sys::Win32::Storage::FileSystem::{ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX}; -use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; +use windows_sys::Win32::Storage::FileSystem::{ + ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX, +}; use windows_sys::Win32::System::Pipes::{ - ConnectNamedPipe, CreateNamedPipeA, PeekNamedPipe, SetNamedPipeHandleState, - PIPE_NOWAIT, PIPE_READMODE_MESSAGE, PIPE_TYPE_MESSAGE, PIPE_UNLIMITED_INSTANCES, PIPE_WAIT, + ConnectNamedPipe, CreateNamedPipeA, PeekNamedPipe, SetNamedPipeHandleState, PIPE_NOWAIT, + PIPE_READMODE_MESSAGE, PIPE_TYPE_MESSAGE, PIPE_UNLIMITED_INSTANCES, PIPE_WAIT, }; use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject, INFINITE}; +use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; /// Maximum IPC message payload size (4 MiB). pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; @@ -162,7 +164,15 @@ fn pipe_read( } let mut read: u32 = 0; - if unsafe { ReadFile(h, buf.as_mut_ptr() as _, buf.len() as u32, &mut read, null_mut()) } == 0 + if unsafe { + ReadFile( + h, + buf.as_mut_ptr() as _, + buf.len() as u32, + &mut read, + null_mut(), + ) + } == 0 { return Err(io::Error::last_os_error()); } @@ -176,8 +186,15 @@ fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { } let mut written: u32 = 0; - let ok = - unsafe { WriteFile(h, data.as_ptr() as _, data.len() as u32, &mut written, null_mut()) }; + let ok = unsafe { + WriteFile( + h, + data.as_ptr() as _, + data.len() as u32, + &mut written, + null_mut(), + ) + }; if !blocking { let mode = PIPE_WAIT; @@ -187,8 +204,7 @@ fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { if ok == 0 { let err = io::Error::last_os_error(); if !blocking - && err.raw_os_error() - == Some(windows_sys::Win32::Foundation::ERROR_NO_DATA as i32) + && err.raw_os_error() == Some(windows_sys::Win32::Foundation::ERROR_NO_DATA as i32) { return Err(io::ErrorKind::WouldBlock.into()); } @@ -200,7 +216,11 @@ fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result { let open_mode = PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED - | if first_instance { FILE_FLAG_FIRST_PIPE_INSTANCE } else { 0 }; + | if first_instance { + FILE_FLAG_FIRST_PIPE_INSTANCE + } else { + 0 + }; let h = unsafe { CreateNamedPipeA( @@ -232,7 +252,9 @@ fn make_overlapped(event: SysHANDLE) -> OVERLAPPED { OVERLAPPED { Internal: 0, InternalHigh: 0, - Anonymous: OVERLAPPED_0 { Pointer: null_mut() }, + Anonymous: OVERLAPPED_0 { + Pointer: null_mut(), + }, hEvent: event, } } @@ -271,7 +293,11 @@ impl SeqpacketListener { pub fn from_owned_fd(fd: OwnedHandle) -> Self { use crate::platform::named_pipe_name_from_raw_handle; let name = named_pipe_name_from_raw_handle(fd.as_raw_handle()) - .map(|s| { let mut b = s.into_bytes(); b.push(0); b }) + .map(|s| { + let mut b = s.into_bytes(); + b.push(0); + b + }) .unwrap_or_default(); Self { inner: Mutex::new(fd), @@ -584,7 +610,10 @@ pub struct AsyncSeqpacketConn { impl AsyncSeqpacketConn { pub fn peer_credentials(&self) -> io::Result { - Ok(PeerCredentials { pid: self.peer_pid, uid: 0 }) + Ok(PeerCredentials { + pid: self.peer_pid, + uid: 0, + }) } } @@ -604,7 +633,10 @@ impl SeqpacketConn { } else { AsyncPipe::Client(unsafe { NamedPipeClient::from_raw_handle(raw)? }) }; - Ok(AsyncSeqpacketConn { inner, peer_pid: self.peer_pid }) + Ok(AsyncSeqpacketConn { + inner, + peer_pid: self.peer_pid, + }) } } diff --git a/datadog-sidecar-ffi/src/lib.rs b/datadog-sidecar-ffi/src/lib.rs index 48c92c0323..5e8a70debf 100644 --- a/datadog-sidecar-ffi/src/lib.rs +++ b/datadog-sidecar-ffi/src/lib.rs @@ -599,10 +599,10 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( tracer_version: tracer_version.to_utf8_lossy().into(), flush_interval: Duration::from_millis(flush_interval_milliseconds as u64), remote_config_poll_interval: Duration::from_millis( - remote_config_poll_interval_millis as u64 + remote_config_poll_interval_millis as u64, ), telemetry_heartbeat_interval: Duration::from_millis( - telemetry_heartbeat_interval_millis as u64 + telemetry_heartbeat_interval_millis as u64, ), force_flush_size, force_drop_size, @@ -614,13 +614,13 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( }, remote_config_products: ffi::Slice::from_raw_parts( remote_config_products, - remote_config_products_count + remote_config_products_count, ) .as_slice() .to_vec(), remote_config_capabilities: ffi::Slice::from_raw_parts( remote_config_capabilities, - remote_config_capabilities_count + remote_config_capabilities_count, ) .as_slice() .to_vec(), @@ -638,9 +638,7 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( try_c!(blocking::set_session_config( transport, session_id_str, - datadog_sidecar::service::RemoteConfigNotifyFunction( - _remote_config_notify_function, - ), + datadog_sidecar::service::RemoteConfigNotifyFunction(_remote_config_notify_function,), &session_config, is_fork, )); diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index a17e22af19..032598b55f 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -127,7 +127,10 @@ impl SidecarTransport { /// Send garbage data (used in tests to verify error handling). pub fn send_garbage(&mut self) -> io::Result<()> { match self.inner.lock() { - Ok(mut c) => c.channel.0.send_blocking(&mut vec![0xDE, 0xAD, 0xBE, 0xEF], &[]), + Ok(mut c) => c + .channel + .0 + .send_blocking(&mut vec![0xDE, 0xAD, 0xBE, 0xEF], &[]), Err(e) => Err(io::Error::other(e.to_string())), } } @@ -142,9 +145,14 @@ impl From for SidecarTransport { } } -fn lock_sender(transport: &mut SidecarTransport) -> io::Result> { +fn lock_sender( + transport: &mut SidecarTransport, +) -> io::Result> { transport.ensure_alive(); - transport.inner.lock().map_err(|e| io::Error::other(e.to_string())) + transport + .inner + .lock() + .map_err(|e| io::Error::other(e.to_string())) } /// Shuts down a runtime. @@ -188,7 +196,8 @@ pub fn enqueue_actions( pub fn set_session_config( transport: &mut SidecarTransport, session_id: String, - #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, + #[cfg(windows)] + remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: &SessionConfig, is_fork: bool, ) -> io::Result<()> { @@ -243,7 +252,12 @@ pub fn send_debugger_data_shm( handle: ShmHandle, debugger_type: DebuggerType, ) -> io::Result<()> { - lock_sender(transport)?.send_debugger_data_shm(instance_id.clone(), queue_id, handle, debugger_type); + lock_sender(transport)?.send_debugger_data_shm( + instance_id.clone(), + queue_id, + handle, + debugger_type, + ); Ok(()) } @@ -399,7 +413,7 @@ pub fn ping(transport: &mut SidecarTransport) -> io::Result { mod tests { use crate::service::blocking::SidecarTransport; use datadog_ipc::{SeqpacketConn, SeqpacketListener}; - + use tempfile::tempdir; #[test] diff --git a/datadog-sidecar/src/service/mod.rs b/datadog-sidecar/src/service/mod.rs index 4bb2e9ee0b..09fa13d27e 100644 --- a/datadog-sidecar/src/service/mod.rs +++ b/datadog-sidecar/src/service/mod.rs @@ -34,17 +34,17 @@ mod queue_id; mod remote_configs; mod runtime_info; mod runtime_metadata; +pub mod sender; mod serialized_tracer_header_tags; mod session_info; -pub mod sender; pub mod sidecar_interface; pub(crate) mod sidecar_server; pub mod telemetry; pub(crate) mod tracing; -pub use sidecar_interface::DynamicInstrumentationConfigState; #[cfg(windows)] pub use remote_configs::RemoteConfigNotifyFunction; +pub use sidecar_interface::DynamicInstrumentationConfigState; pub use telemetry::{get_telemetry_action_sender, InternalTelemetryActions}; pub(crate) use telemetry::{init_telemetry_sender, telemetry_action_receiver_task}; diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index c37d701d10..33eb96ee99 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -4,15 +4,17 @@ //! Higher-level sender with priority outbox and telemetry load-shedding. //! //! Wraps [`SidecarInterfaceChannel`] with: -//! - A **priority outbox** for state-change messages: coalesced and drained before -//! fire-and-forget sends. -//! - **Telemetry load-shedding**: when `outstanding > max_outstanding / 2`, 90% of -//! `EnqueueActions` calls are dropped (telemetry is low priority). +//! - A **priority outbox** for state-change messages: coalesced and drained before fire-and-forget +//! sends. +//! - **Telemetry load-shedding**: when `outstanding > max_outstanding / 2`, 90% of `EnqueueActions` +//! calls are dropped (telemetry is low priority). //! //! `SidecarSender` takes `&mut self`; the caller is responsible for exclusive access. use crate::service::{ - sidecar_interface::{DynamicInstrumentationConfigState, SidecarInterfaceChannel, SidecarInterfaceRequest}, + sidecar_interface::{ + DynamicInstrumentationConfigState, SidecarInterfaceChannel, SidecarInterfaceRequest, + }, InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; use datadog_ipc::platform::ShmHandle; @@ -58,12 +60,12 @@ impl SidecarOutbox { fn cancel_if_instance(slot: &mut Option, instance_id: &InstanceId) { let should_cancel = match slot { - Some(SidecarInterfaceRequest::SetUniversalServiceTags { instance_id: id, .. }) => { - id == instance_id - } - Some(SidecarInterfaceRequest::SetRequestConfig { instance_id: id, .. }) => { - id == instance_id - } + Some(SidecarInterfaceRequest::SetUniversalServiceTags { + instance_id: id, .. + }) => id == instance_id, + Some(SidecarInterfaceRequest::SetRequestConfig { + instance_id: id, .. + }) => id == instance_id, _ => false, }; if should_cancel { @@ -196,7 +198,8 @@ impl SidecarSender { &mut self.outbox, SidecarInterfaceRequest::SetSessionConfig { session_id, - #[cfg(windows)] remote_config_notify_function, + #[cfg(windows)] + remote_config_notify_function, config, is_fork, }, @@ -298,7 +301,8 @@ impl SidecarSender { } // The 1-in-10 that passes through falls to the try_send below. } - self.channel.try_send_enqueue_actions(instance_id, queue_id, actions); + self.channel + .try_send_enqueue_actions(instance_id, queue_id, actions); } pub fn send_trace_v04_shm( @@ -383,7 +387,8 @@ impl SidecarSender { if !self.try_drain_outbox() { return; } - self.channel.try_send_set_test_session_token(session_id, token); + self.channel + .try_send_set_test_session_token(session_id, token); } pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { diff --git a/datadog-sidecar/src/service/session_info.rs b/datadog-sidecar/src/service/session_info.rs index 0118257cac..c81b73ca45 100644 --- a/datadog-sidecar/src/service/session_info.rs +++ b/datadog-sidecar/src/service/session_info.rs @@ -38,8 +38,7 @@ pub(crate) struct SessionInfo { pub(crate) remote_config_notify_function: Arc>, #[cfg(windows)] - pub(crate) process_handle: - Arc>>, + pub(crate) process_handle: Arc>>, pub(crate) log_guard: Arc, MultiWriterGuard<'static>)>>>, pub(crate) session_id: String, diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index a3c6719d19..bab13c064e 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -3,7 +3,9 @@ #![allow(clippy::too_many_arguments)] -use crate::service::{InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction}; +use crate::service::{ + InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, +}; use datadog_ipc::platform::ShmHandle; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 0dddb9d998..4f2b2dc908 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -827,7 +827,11 @@ impl SidecarServer { ); } - fn send_dogstatsd_actions_impl(&self, instance_id: InstanceId, actions: Vec) { + fn send_dogstatsd_actions_impl( + &self, + instance_id: InstanceId, + actions: Vec, + ) { let server = self.clone(); tokio::spawn(async move { server @@ -884,9 +888,12 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, actions: Vec, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); - self.server.enqueue_actions_impl(instance_id, queue_id, actions); + self.server + .enqueue_actions_impl(instance_id, queue_id, actions); } async fn set_session_config( @@ -897,13 +904,16 @@ impl SidecarInterface for ConnectionSidecarHandler { config: SessionConfig, is_fork: bool, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server .set_session_config_impl( session_id, peer.pid, - #[cfg(windows)] remote_config_notify_function, + #[cfg(windows)] + remote_config_notify_function, config, is_fork, ) @@ -916,20 +926,26 @@ impl SidecarInterface for ConnectionSidecarHandler { session_id: String, process_tags: String, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server .set_session_process_tags_impl(session_id, process_tags); } async fn shutdown_runtime(&self, _peer: PeerCredentials, instance_id: InstanceId) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server.shutdown_runtime_impl(instance_id); } async fn shutdown_session(&self, _peer: PeerCredentials, session_id: String) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server.shutdown_session_impl(session_id); } @@ -942,7 +958,9 @@ impl SidecarInterface for ConnectionSidecarHandler { len: usize, headers: SerializedTracerHeaderTags, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_trace_v04_shm_impl(instance_id, handle, len, headers); @@ -955,7 +973,9 @@ impl SidecarInterface for ConnectionSidecarHandler { data: Vec, headers: SerializedTracerHeaderTags, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_trace_v04_bytes_impl(instance_id, data, headers); @@ -969,7 +989,9 @@ impl SidecarInterface for ConnectionSidecarHandler { handle: ShmHandle, debugger_type: DebuggerType, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_debugger_data_shm_impl(instance_id, queue_id, handle, debugger_type); @@ -982,7 +1004,9 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, diagnostics_payload: Vec, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_debugger_diagnostics_impl(instance_id, queue_id, diagnostics_payload); @@ -994,7 +1018,9 @@ impl SidecarInterface for ConnectionSidecarHandler { exception_hash: u64, granularity: Duration, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.server .acquire_exception_hash_rate_limiter_impl(exception_hash, granularity); } @@ -1010,7 +1036,9 @@ impl SidecarInterface for ConnectionSidecarHandler { global_tags: Vec, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server.set_universal_service_tags_impl( instance_id, @@ -1030,7 +1058,9 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .set_request_config_impl(instance_id, queue_id, dynamic_instrumentation_state); @@ -1042,13 +1072,18 @@ impl SidecarInterface for ConnectionSidecarHandler { instance_id: InstanceId, actions: Vec, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); - self.server.send_dogstatsd_actions_impl(instance_id, actions); + self.server + .send_dogstatsd_actions_impl(instance_id, actions); } async fn flush_traces(&self, _peer: PeerCredentials) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.server.flush_traces_impl().await; } @@ -1058,22 +1093,30 @@ impl SidecarInterface for ConnectionSidecarHandler { session_id: String, token: String, ) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server.set_test_session_token_impl(session_id, token); } async fn ping(&self, _peer: PeerCredentials) { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); } async fn dump(&self, _peer: PeerCredentials) -> String { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.server.dump_impl().await } async fn stats(&self, _peer: PeerCredentials) -> String { - self.server.submitted_payloads.fetch_add(1, Ordering::Relaxed); + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); self.server.stats_impl().await } } diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index 0327a016d5..9e8c40cc01 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -102,7 +102,8 @@ mod tests { let client: SeqpacketConn = liaison.connect_to_server().unwrap(); let srv: SeqpacketConn = listener.try_accept().unwrap(); client.send_raw_blocking(&mut vec![255], &[]).unwrap(); - let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; + let mut buf = + vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); assert_eq!(n, 1); assert_eq!(buf[0], 255); diff --git a/datadog-sidecar/src/unix.rs b/datadog-sidecar/src/unix.rs index 80b11aa7c0..0ec2be0f2b 100644 --- a/datadog-sidecar/src/unix.rs +++ b/datadog-sidecar/src/unix.rs @@ -67,7 +67,10 @@ pub extern "C" fn ddog_daemon_entry_point(trampoline_data: &TrampolineData) { move || stop_listening(listener_fd) }; - Ok((move |handler| accept_socket_loop(async_listener, handler), cancel)) + Ok(( + move |handler| accept_socket_loop(async_listener, handler), + cancel, + )) }; if let Err(err) = enter_listener_loop(acquire_listener) { error!("Error: {err}") diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index 1b32348904..b8c42c2356 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -113,8 +113,7 @@ async fn accept_socket_loop( .ok_or(io::Error::from(io::ErrorKind::InvalidInput))?; // Transfer the listener's handle into a Tokio NamedPipeServer. - let mut pipe = - unsafe { NamedPipeServer::from_raw_handle(listener.into_raw_handle()) }?; + let mut pipe = unsafe { NamedPipeServer::from_raw_handle(listener.into_raw_handle()) }?; let cancellation = cancellation.shared(); loop { From 88c3f07398816c5000ab7634b7ccaf2f09fd341b Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 12 Mar 2026 13:18:35 +0100 Subject: [PATCH 08/29] Add configurable size Signed-off-by: Bob Weinand --- datadog-ipc-macros/src/lib.rs | 2 +- datadog-ipc/src/client.rs | 4 +- datadog-ipc/src/lib.rs | 2 +- .../src/platform/unix/sockets/macos.rs | 6 +-- datadog-ipc/src/platform/unix/sockets/mod.rs | 41 ++++++++++++++++++- datadog-ipc/src/platform/windows/sockets.rs | 41 ++++++++++++++++--- datadog-sidecar/src/config.rs | 19 +++++++++ datadog-sidecar/src/entry.rs | 9 ++++ datadog-sidecar/src/service/blocking.rs | 10 +++++ datadog-sidecar/src/service/sender.rs | 32 ++------------- datadog-sidecar/src/setup/windows.rs | 2 +- datadog-sidecar/src/unix.rs | 13 +++++- datadog-sidecar/src/windows.rs | 7 ++++ 13 files changed, 143 insertions(+), 45 deletions(-) diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index 8c544965c0..ebfdfc0aeb 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -301,7 +301,7 @@ fn gen_serve_fn( } }; let mut recv_counter: u64 = 0; - let mut buf = vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; + let mut buf = vec![0u8; datadog_ipc::max_message_size() + datadog_ipc::HANDLE_SUFFIX_SIZE]; loop { let (n, fds) = match datadog_ipc::recv_raw_async(&async_fd, &mut buf).await { Ok(x) => x, diff --git a/datadog-ipc/src/client.rs b/datadog-ipc/src/client.rs index 42dc5b34ff..f3445640af 100644 --- a/datadog-ipc/src/client.rs +++ b/datadog-ipc/src/client.rs @@ -3,7 +3,7 @@ //! Generic IPC client connection state shared by all generated channel types. -use crate::platform::{SeqpacketConn, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE}; +use crate::platform::{max_message_size, SeqpacketConn, HANDLE_SUFFIX_SIZE}; #[cfg(unix)] use std::os::unix::io::{OwnedFd, RawFd}; @@ -44,7 +44,7 @@ impl IpcClientConn { send_count: 0, ack_count: 0, max_outstanding: MAX_OUTSTANDING, - recv_buf: vec![0u8; MAX_MESSAGE_SIZE + HANDLE_SUFFIX_SIZE], + recv_buf: vec![0u8; max_message_size() + HANDLE_SUFFIX_SIZE], closed: false, } } diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index a9bbfe2756..047eb7b383 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -19,5 +19,5 @@ pub mod codec; pub use client::IpcClientConn; pub use platform::{recv_raw_async, send_raw_async}; pub use platform::{ - PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, MAX_MESSAGE_SIZE, + max_message_size, PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, }; diff --git a/datadog-ipc/src/platform/unix/sockets/macos.rs b/datadog-ipc/src/platform/unix/sockets/macos.rs index 8f93293b78..fa8c83c79f 100644 --- a/datadog-ipc/src/platform/unix/sockets/macos.rs +++ b/datadog-ipc/src/platform/unix/sockets/macos.rs @@ -22,8 +22,8 @@ //! - Success → live server. `ECONNRESET` → stale socket file. use super::{ - create_unix_socket, sendmsg, set_nonblocking, ControlMessage, MsgFlags, SeqpacketConn, - SeqpacketListener, UnixAddr, MAX_MESSAGE_SIZE, + create_unix_socket, max_message_size, sendmsg, set_nonblocking, ControlMessage, MsgFlags, + SeqpacketConn, SeqpacketListener, UnixAddr, }; use crate::PeerCredentials; use nix::sys::socket::{bind, AddressFamily, SockFlag, SockType}; @@ -39,7 +39,7 @@ fn create_dgram_socket() -> io::Result { } fn set_dgram_buffers(fd: i32) -> io::Result<()> { - let size = MAX_MESSAGE_SIZE as libc::c_int; + let size = max_message_size() as libc::c_int; let len = std::mem::size_of::() as libc::socklen_t; for opt in [libc::SO_SNDBUF, libc::SO_RCVBUF] { if unsafe { diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 5a9f30b60e..8062cc3373 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -12,6 +12,7 @@ pub use nix::sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, UnixAd use std::{ io, os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}, + sync::atomic::{AtomicUsize, Ordering}, time::Duration, }; use tokio::io::unix::AsyncFd; @@ -32,8 +33,21 @@ use linux::get_peer_credentials; #[cfg(target_os = "macos")] use macos::get_peer_credentials; -/// Maximum IPC message payload size (4 MiB). -pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; +/// Global socket buffer size. Determines `max_message_size()` and is applied to new connections. +static SOCKET_BUFFER_SIZE: AtomicUsize = AtomicUsize::new(4 * 1024 * 1024); + +/// Set the socket send/receive buffer size used for all future connections. +/// +/// This also determines [`max_message_size()`]. Call before creating connections for the new +/// size to take effect on `socketpair`/`connect` calls (macOS). +pub fn set_socket_buffer_size(size: usize) { + SOCKET_BUFFER_SIZE.store(size, Ordering::Relaxed); +} + +/// Maximum IPC message payload size, equal to the configured socket buffer size. +pub fn max_message_size() -> usize { + SOCKET_BUFFER_SIZE.load(Ordering::Relaxed) +} /// Extra receive-buffer overhead for the wire format. Zero on Unix because fds are /// transferred out-of-band via `SCM_RIGHTS`; non-zero on Windows (see `sockets.rs`). @@ -296,6 +310,29 @@ impl SeqpacketConn { Ok(()) } + fn setsockopt_int(&self, optname: libc::c_int, size: usize) -> io::Result<()> { + let size_c = size as libc::c_int; + let ret = unsafe { + libc::setsockopt( + self.inner.as_raw_fd(), + libc::SOL_SOCKET, + optname, + &size_c as *const _ as *const libc::c_void, + std::mem::size_of::() as libc::socklen_t, + ) + }; + if ret < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } + } + + pub fn set_sndbuf_size(&self, size: usize) -> io::Result<()> { + set_socket_buffer_size(size); + self.setsockopt_int(libc::SO_SNDBUF, size) + } + + pub fn set_rcvbuf_size(&self, size: usize) -> io::Result<()> { + self.setsockopt_int(libc::SO_RCVBUF, size) + } + /// Convert to an async connection for use in async server dispatch loops. pub fn into_async_conn(self) -> io::Result { AsyncFd::new(self.inner) diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 095d84a46e..c61fb0a771 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -28,7 +28,7 @@ use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandl use std::path::Path; use std::ptr::{null, null_mut}; use std::sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicU64, AtomicUsize, Ordering}, Mutex, }; use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; @@ -53,14 +53,31 @@ use windows_sys::Win32::System::Pipes::{ use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject, INFINITE}; use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; -/// Maximum IPC message payload size (4 MiB). -pub const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; - /// Wire-format suffix overhead: 4-byte count + 8 bytes per handle slot. /// /// Receive buffers must be at least `expected_payload_max + HANDLE_SUFFIX_SIZE` bytes. pub const HANDLE_SUFFIX_SIZE: usize = 4 + 8 * MAX_FDS; +/// Global pipe buffer size used by `create_pipe_server`. +/// +/// Defaults to 4 MiB payload + handle suffix. Changed via [`set_pipe_buffer_size`] +/// before binding a listener or creating a socketpair. +static PIPE_BUFFER_SIZE: AtomicUsize = AtomicUsize::new(4 * 1024 * 1024 + HANDLE_SUFFIX_SIZE); + +/// Maximum IPC message payload size, equal to the pipe buffer minus the handle suffix. +pub fn max_message_size() -> usize { + PIPE_BUFFER_SIZE.load(Ordering::Relaxed) - HANDLE_SUFFIX_SIZE +} + +/// Set the named-pipe send/receive buffer size used for all future [`SeqpacketListener::bind`] +/// and [`SeqpacketConn::socketpair`] calls. +/// +/// Named-pipe buffer sizes are fixed at creation time on Windows; this must be called *before* +/// creating a listener or socketpair to take effect on new connections. +pub fn set_pipe_buffer_size(size: usize) { + PIPE_BUFFER_SIZE.store(size, Ordering::Relaxed); +} + /// Credentials of the connected peer. #[derive(Debug, Clone, Copy, Default)] pub struct PeerCredentials { @@ -223,13 +240,14 @@ fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result io::Result<()> { + set_pipe_buffer_size(size); + Ok(()) + } } /// Returns `true` if a server is listening at the given named pipe path. diff --git a/datadog-sidecar/src/config.rs b/datadog-sidecar/src/config.rs index c423c95847..aa82ce0d34 100644 --- a/datadog-sidecar/src/config.rs +++ b/datadog-sidecar/src/config.rs @@ -36,6 +36,8 @@ const ENV_SIDECAR_APPSEC_LOCK_FILE_PATH: &str = "_DD_SIDECAR_APPSEC_LOCK_FILE_PA const ENV_SIDECAR_APPSEC_LOG_FILE_PATH: &str = "_DD_SIDECAR_APPSEC_LOG_FILE_PATH"; const ENV_SIDECAR_APPSEC_LOG_LEVEL: &str = "_DD_SIDECAR_APPSEC_LOG_LEVEL"; +const ENV_SIDECAR_PIPE_BUFFER_SIZE: &str = "_DD_SIDECAR_PIPE_BUFFER_SIZE"; + #[derive(Debug, Copy, Clone, Default)] pub enum IpcMode { #[default] @@ -84,6 +86,9 @@ pub struct Config { pub crashtracker_endpoint: Option, pub appsec_config: Option, pub max_memory: usize, + /// Socket/pipe buffer size for IPC connections (bytes). + /// 0 means use the platform default. + pub pipe_buffer_size: usize, } #[derive(Debug, Clone)] @@ -127,6 +132,12 @@ impl Config { format!("{}", self.max_memory).into(), ); } + if self.pipe_buffer_size != 0 { + res.insert( + ENV_SIDECAR_PIPE_BUFFER_SIZE, + format!("{}", self.pipe_buffer_size).into(), + ); + } res } } @@ -222,6 +233,13 @@ impl FromEnv { .unwrap_or(0) } + fn pipe_buffer_size() -> usize { + std::env::var(ENV_SIDECAR_PIPE_BUFFER_SIZE) + .unwrap_or_default() + .parse() + .unwrap_or(0) + } + fn crashtracker_endpoint() -> Option { std::env::var(ENV_SIDECAR_CRASHTRACKER_ENDPOINT) .ok() @@ -240,6 +258,7 @@ impl FromEnv { crashtracker_endpoint: Self::crashtracker_endpoint(), appsec_config: Self::appsec_config(), max_memory: Self::max_memory(), + pipe_buffer_size: Self::pipe_buffer_size(), } } diff --git a/datadog-sidecar/src/entry.rs b/datadog-sidecar/src/entry.rs index 9137c8826f..aebe9a5d82 100644 --- a/datadog-sidecar/src/entry.rs +++ b/datadog-sidecar/src/entry.rs @@ -220,6 +220,15 @@ pub fn daemonize(listener: IpcServer, mut cfg: Config) -> anyhow::Result<()> { } pub fn start_or_connect_to_sidecar(cfg: Config) -> anyhow::Result { + // On Windows, named-pipe buffer sizes are fixed at creation time. Set the global before + // attempt_listen so that the initial server pipe (created by this process and handed to the + // daemon) uses the configured size. The daemon restores the same value at startup so that + // subsequent try_accept calls also use the right size. + #[cfg(windows)] + if cfg.pipe_buffer_size > 0 { + datadog_ipc::platform::set_pipe_buffer_size(cfg.pipe_buffer_size); + } + let liaison = match cfg.ipc_mode { config::IpcMode::Shared => setup::DefaultLiason::ipc_shared(), config::IpcMode::InstancePerProcess => setup::DefaultLiason::ipc_per_process(), diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 032598b55f..db03efba7b 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -64,6 +64,16 @@ impl SidecarTransport { lock_sender(self)?.set_write_timeout(d) } + pub fn set_backpressure(&mut self, max_bytes: usize, max_queue: u64) -> io::Result<()> { + let mut sender = lock_sender(self)?; + sender.channel.0.max_outstanding = max_queue; + #[cfg(unix)] + sender.channel.0.conn.set_sndbuf_size(max_bytes)?; + #[cfg(not(unix))] + let _ = max_bytes; // handled on pipe creation + Ok(()) + } + pub fn ensure_alive(&mut self) { let closed = match self.inner.lock() { Ok(guard) => guard.channel.0.is_closed(), diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 33eb96ee99..005777b4e7 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -23,10 +23,6 @@ use libdd_common::tag::Tag; use libdd_dogstatsd_client::DogStatsDActionOwned; use std::{io, time::Duration}; -// --------------------------------------------------------------------------- -// Outbox -// --------------------------------------------------------------------------- - /// Priority outbox for state-change (coalesced) messages. /// /// Each slot holds the most recent pending message of its kind. @@ -54,10 +50,6 @@ impl SidecarOutbox { } } -// --------------------------------------------------------------------------- -// Outbox coalescing helpers -// --------------------------------------------------------------------------- - fn cancel_if_instance(slot: &mut Option, instance_id: &InstanceId) { let should_cancel = match slot { Some(SidecarInterfaceRequest::SetUniversalServiceTags { @@ -86,16 +78,12 @@ fn cancel_if_session(slot: &mut Option, session_id: &st } fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { - // For messages that trigger cancellations, do the cancellation first using a - // borrow, then move `incoming` into the slot. if let SidecarInterfaceRequest::ShutdownRuntime { ref instance_id } = incoming { - let id = instance_id.clone(); - cancel_if_instance(&mut outbox.set_request_config, &id); - cancel_if_instance(&mut outbox.set_universal_service_tags, &id); + cancel_if_instance(&mut outbox.set_request_config, instance_id); + cancel_if_instance(&mut outbox.set_universal_service_tags, instance_id); } if let SidecarInterfaceRequest::ShutdownSession { ref session_id } = incoming { - let id = session_id.clone(); - cancel_if_session(&mut outbox.set_session_config, &id); + cancel_if_session(&mut outbox.set_session_config, session_id); } match incoming { @@ -118,15 +106,11 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { outbox.shutdown_session = Some(incoming); } _ => { - // Non-outbox messages should not be routed here. + unreachable!("Not in outbox"); } } } -// --------------------------------------------------------------------------- -// SidecarSender -// --------------------------------------------------------------------------- - /// Higher-level IPC sender with outbox coalescing and telemetry load-shedding. /// /// Takes `&mut self` — callers are responsible for exclusive access. @@ -183,10 +167,6 @@ impl SidecarSender { self.channel.0.send_blocking(&mut data.to_vec(), &[]) } - // ------------------------------------------------------------------------- - // Outbox-coalesced state-change methods - // ------------------------------------------------------------------------- - pub fn set_session_config( &mut self, session_id: String, @@ -399,10 +379,6 @@ impl SidecarSender { self.channel.0.set_write_timeout(d) } - // ------------------------------------------------------------------------- - // Blocking methods (drain outbox blocking first, then call) - // ------------------------------------------------------------------------- - pub fn flush_traces(&mut self) -> io::Result<()> { self.drain_outbox_blocking(); self.channel.call_flush_traces() diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index 9e8c40cc01..3249709a69 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -103,7 +103,7 @@ mod tests { let srv: SeqpacketConn = listener.try_accept().unwrap(); client.send_raw_blocking(&mut vec![255], &[]).unwrap(); let mut buf = - vec![0u8; datadog_ipc::MAX_MESSAGE_SIZE + datadog_ipc::HANDLE_SUFFIX_SIZE]; + vec![0u8; datadog_ipc::max_message_size() + datadog_ipc::HANDLE_SUFFIX_SIZE]; let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); assert_eq!(n, 1); assert_eq!(buf[0], 255); diff --git a/datadog-sidecar/src/unix.rs b/datadog-sidecar/src/unix.rs index 0ec2be0f2b..9a913f4d24 100644 --- a/datadog-sidecar/src/unix.rs +++ b/datadog-sidecar/src/unix.rs @@ -49,6 +49,11 @@ pub extern "C" fn ddog_daemon_entry_point(trampoline_data: &TrampolineData) { warn!("Failed to initialize crashtracker: {e}"); } + let buf_size = Config::get().pipe_buffer_size; + if buf_size > 0 { + datadog_ipc::platform::set_socket_buffer_size(buf_size); + } + let now = Instant::now(); let appsec_started = maybe_start_appsec(); @@ -113,7 +118,13 @@ async fn accept_socket_loop( match ready { Ok(mut guard) => { match guard.try_io(|inner| inner.get_ref().try_accept()) { - Ok(Ok(conn)) => handler(conn), + Ok(Ok(conn)) => { + let buf_size = Config::get().pipe_buffer_size; + if buf_size > 0 { + let _ = conn.set_rcvbuf_size(buf_size); + } + handler(conn); + } Ok(Err(e)) => { error!("IPC accept error: {e}"); break; diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index b8c42c2356..c1ed17aad9 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -50,6 +50,13 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { #[cfg(feature = "tracing")] crate::log::enable_logging().ok(); + // Restore the pipe buffer size the PHP parent process configured before spawning us, + // so subsequent try_accept calls use the same buffer size. + let buf_size = crate::config::Config::get().pipe_buffer_size; + if buf_size > 0 { + datadog_ipc::platform::set_pipe_buffer_size(buf_size); + } + let now = Instant::now(); let pid = unsafe { libc::getpid() }; From 1903e4324c6338322b3781722e88a37a268aa985 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 12 Mar 2026 13:58:56 +0100 Subject: [PATCH 09/29] Add log message for size Signed-off-by: Bob Weinand --- datadog-ipc-macros/src/lib.rs | 30 +++++++++++++++++++++++++ datadog-sidecar/src/service/blocking.rs | 11 ++------- datadog-sidecar/src/service/sender.rs | 4 ---- 3 files changed, 32 insertions(+), 13 deletions(-) diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index ebfdfc0aeb..48fbe5c7c8 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -120,6 +120,15 @@ fn gen_request_enum(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::T }) .collect(); + let name_arms: Vec<_> = methods + .iter() + .map(|m| { + let variant = &m.variant; + let name_str = m.name.to_string(); + quote! { Self::#variant { .. } => #name_str } + }) + .collect(); + quote! { #[derive(::serde::Serialize, ::serde::Deserialize)] pub enum #enum_name { @@ -132,6 +141,12 @@ fn gen_request_enum(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::T #(#disc_arms),* } } + + pub fn variant_name(&self) -> &'static str { + match self { + #(#name_arms),* + } + } } } } @@ -361,6 +376,7 @@ fn gen_channel( let d = m.discriminant; let variant = &m.variant; + let name_str = m.name.to_string(); // Build the request and collect fds via TransferHandles. let build_req_and_fds = quote! { let __req = #enum_name::#variant { #(#field_names),* }; @@ -370,6 +386,12 @@ fn gen_channel( ).ok(); let mut __data = datadog_ipc::codec::encode(#d, &__req); let __fds = __sink.into_fds(); + { + let __max = datadog_ipc::max_message_size(); + if __data.len() > __max { + ::tracing::warn!(method = #name_str, len = __data.len(), max = __max, "IPC message too large"); + } + } }; if m.return_type.is_none() && !m.is_blocking { @@ -420,6 +442,10 @@ fn gen_channel( datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); let __fds = __sink.into_fds(); + let __max = datadog_ipc::max_message_size(); + if __data.len() > __max { + ::tracing::warn!(method = req.variant_name(), len = __data.len(), max = __max, "IPC message too large"); + } self.0.try_send(&mut __data, &__fds) } @@ -432,6 +458,10 @@ fn gen_channel( datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); let __fds = __sink.into_fds(); + let __max = datadog_ipc::max_message_size(); + if __data.len() > __max { + ::tracing::warn!(method = req.variant_name(), len = __data.len(), max = __max, "IPC message too large"); + } self.0.send_blocking(&mut __data, &__fds) } } diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index db03efba7b..14766ffa67 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -191,15 +191,8 @@ pub fn enqueue_actions( queue_id: &QueueId, actions: Vec, ) -> io::Result<()> { - // Pre-serialize once so the Fn closure can borrow the bytes for both the initial - // attempt and the reconnect retry without needing SidecarAction: Clone. - let req = SidecarInterfaceRequest::EnqueueActions { - instance_id: instance_id.clone(), - queue_id: *queue_id, - actions, - }; - let data = datadog_ipc::codec::encode(req.discriminant(), &req); - transport.with_retry(|s| s.drain_and_send_raw_blocking(&data)) + lock_sender(transport)?.enqueue_actions(instance_id.clone(), *queue_id, actions); + Ok(()) } /// Sets the configuration for a session. diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 005777b4e7..f3c6528fb7 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -257,10 +257,6 @@ impl SidecarSender { self.try_drain_outbox(); } - // ------------------------------------------------------------------------- - // Fire-and-forget methods (drain outbox first, then send; drop on EAGAIN) - // ------------------------------------------------------------------------- - /// Enqueue telemetry actions. /// /// When `outstanding > max_outstanding / 2`, 90% of calls are dropped to shed load. From 16d7a002b65ada2e980e39c043f144007bca68d4 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 12 Mar 2026 14:29:17 +0100 Subject: [PATCH 10/29] Refactor ClearQueueId Signed-off-by: Bob Weinand --- datadog-sidecar-ffi/src/lib.rs | 15 ++---- datadog-sidecar/src/service/blocking.rs | 10 ++++ datadog-sidecar/src/service/mod.rs | 1 - datadog-sidecar/src/service/sender.rs | 49 ++++++++++++++++++- .../src/service/sidecar_interface.rs | 8 +++ datadog-sidecar/src/service/sidecar_server.rs | 33 +++++++------ datadog-sidecar/src/service/telemetry.rs | 1 - 7 files changed, 89 insertions(+), 28 deletions(-) diff --git a/datadog-sidecar-ffi/src/lib.rs b/datadog-sidecar-ffi/src/lib.rs index 5e8a70debf..472749c974 100644 --- a/datadog-sidecar-ffi/src/lib.rs +++ b/datadog-sidecar-ffi/src/lib.rs @@ -507,11 +507,11 @@ pub unsafe extern "C" fn ddog_sidecar_lifecycle_end( transport, instance_id, queue_id, - vec![ - SidecarAction::Telemetry(TelemetryActions::Lifecycle(LifecycleAction::Stop)), - SidecarAction::ClearQueueId - ], + vec![SidecarAction::Telemetry(TelemetryActions::Lifecycle( + LifecycleAction::Stop, + ))], )); + try_c!(blocking::clear_queue_id(transport, instance_id, queue_id)); MaybeError::None } @@ -524,12 +524,7 @@ pub unsafe extern "C" fn ddog_sidecar_application_remove( instance_id: &InstanceId, queue_id: &QueueId, ) -> MaybeError { - try_c!(blocking::enqueue_actions( - transport, - instance_id, - queue_id, - vec![SidecarAction::ClearQueueId], - )); + try_c!(blocking::clear_queue_id(transport, instance_id, queue_id)); MaybeError::None } diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 14766ffa67..9287076bf8 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -195,6 +195,16 @@ pub fn enqueue_actions( Ok(()) } +/// Removes the application entry for the given queue ID from the instance. +pub fn clear_queue_id( + transport: &mut SidecarTransport, + instance_id: &InstanceId, + queue_id: &QueueId, +) -> io::Result<()> { + lock_sender(transport)?.clear_queue_id(instance_id.clone(), *queue_id); + Ok(()) +} + /// Sets the configuration for a session. pub fn set_session_config( transport: &mut SidecarTransport, diff --git a/datadog-sidecar/src/service/mod.rs b/datadog-sidecar/src/service/mod.rs index 09fa13d27e..c0bc0516aa 100644 --- a/datadog-sidecar/src/service/mod.rs +++ b/datadog-sidecar/src/service/mod.rs @@ -74,5 +74,4 @@ pub enum SidecarAction { RegisterTelemetryMetric(MetricContext), AddTelemetryMetricPoint((String, f64, Vec)), PhpComposerTelemetryFile(PathBuf), - ClearQueueId, } diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index f3c6528fb7..58570eeacd 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -33,17 +33,19 @@ struct SidecarOutbox { set_session_process_tags: Option, set_universal_service_tags: Option, set_request_config: Option, + clear_queue_id: Option, shutdown_runtime: Option, shutdown_session: Option, } impl SidecarOutbox { - fn slots_mut(&mut self) -> [&mut Option; 6] { + fn slots_mut(&mut self) -> [&mut Option; 7] { [ &mut self.set_session_config, &mut self.set_session_process_tags, &mut self.set_universal_service_tags, &mut self.set_request_config, + &mut self.clear_queue_id, &mut self.shutdown_runtime, &mut self.shutdown_session, ] @@ -77,6 +79,29 @@ fn cancel_if_session(slot: &mut Option, session_id: &st } } +fn cancel_if_queue( + slot: &mut Option, + instance_id: &InstanceId, + queue_id: &QueueId, +) { + let should_cancel = match slot { + Some(SidecarInterfaceRequest::SetUniversalServiceTags { + instance_id: id, + queue_id: q, + .. + }) => id == instance_id && q == queue_id, + Some(SidecarInterfaceRequest::SetRequestConfig { + instance_id: id, + queue_id: q, + .. + }) => id == instance_id && q == queue_id, + _ => false, + }; + if should_cancel { + *slot = None; + } +} + fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { if let SidecarInterfaceRequest::ShutdownRuntime { ref instance_id } = incoming { cancel_if_instance(&mut outbox.set_request_config, instance_id); @@ -85,6 +110,14 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { if let SidecarInterfaceRequest::ShutdownSession { ref session_id } = incoming { cancel_if_session(&mut outbox.set_session_config, session_id); } + if let SidecarInterfaceRequest::ClearQueueId { + ref instance_id, + ref queue_id, + } = incoming + { + cancel_if_queue(&mut outbox.set_request_config, instance_id, queue_id); + cancel_if_queue(&mut outbox.set_universal_service_tags, instance_id, queue_id); + } match incoming { SidecarInterfaceRequest::SetSessionConfig { .. } => { @@ -99,6 +132,9 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { SidecarInterfaceRequest::SetRequestConfig { .. } => { outbox.set_request_config = Some(incoming); } + SidecarInterfaceRequest::ClearQueueId { .. } => { + outbox.clear_queue_id = Some(incoming); + } SidecarInterfaceRequest::ShutdownRuntime { .. } => { outbox.shutdown_runtime = Some(incoming); } @@ -241,6 +277,17 @@ impl SidecarSender { self.try_drain_outbox(); } + pub fn clear_queue_id(&mut self, instance_id: InstanceId, queue_id: QueueId) { + coalesce( + &mut self.outbox, + SidecarInterfaceRequest::ClearQueueId { + instance_id, + queue_id, + }, + ); + self.try_drain_outbox(); + } + pub fn shutdown_runtime(&mut self, instance_id: InstanceId) { coalesce( &mut self.outbox, diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index bab13c064e..f5ca0ddabb 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -62,6 +62,14 @@ pub trait SidecarInterface { /// * `process_tags` - The process tags. async fn set_session_process_tags(session_id: String, process_tags: Vec); + /// Removes the application entry for the given queue ID from the instance. + /// + /// # Arguments + /// + /// * `instance_id` - The ID of the instance. + /// * `queue_id` - The queue ID to clear. + async fn clear_queue_id(instance_id: InstanceId, queue_id: QueueId); + /// Shuts down a runtime. /// /// # Arguments diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 4f2b2dc908..0e56fe5a1e 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -406,13 +406,6 @@ impl SidecarServer { let mut applications = rt_info.lock_applications(); if let Entry::Occupied(entry) = applications.entry(queue_id) { - // Avoid materializing a telemetry client just to clear it - if actions.len() == 1 && matches!(actions[0], SidecarAction::ClearQueueId) { - info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); - entry.remove(); - return; - } - let service = entry .get() .service_name @@ -446,7 +439,6 @@ impl SidecarServer { let mut actions_to_process: Vec = vec![]; let mut composer_paths_to_process = vec![]; let mut buffered_info_changed = false; - let mut remove_entry = false; let mut remove_client = false; for action in actions { @@ -468,9 +460,6 @@ impl SidecarServer { buffered_info_changed = true; actions_to_process.push(action); } - SidecarAction::ClearQueueId => { - remove_entry = true; - } SidecarAction::Telemetry(TelemetryActions::AddEndpoint(_)) => { telemetry.last_endpoints_push = SystemTime::now(); buffered_info_changed = true; @@ -531,10 +520,6 @@ impl SidecarServer { self.telemetry_clients.remove_telemetry_client(service, env); } - if remove_entry { - info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); - entry.remove(); - } } else { info!("No application found for instance {instance_id:?} and queue_id {queue_id:?}"); } @@ -896,6 +881,24 @@ impl SidecarInterface for ConnectionSidecarHandler { .enqueue_actions_impl(instance_id, queue_id, actions); } + async fn clear_queue_id( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + ) { + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); + self.track_instance(&instance_id); + let rt_info = self.server.get_runtime(&instance_id); + let mut applications = rt_info.lock_applications(); + if let Entry::Occupied(entry) = applications.entry(queue_id) { + info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); + entry.remove(); + } + } + async fn set_session_config( &self, peer: PeerCredentials, diff --git a/datadog-sidecar/src/service/telemetry.rs b/datadog-sidecar/src/service/telemetry.rs index 64c125b701..6741cdd2f8 100644 --- a/datadog-sidecar/src/service/telemetry.rs +++ b/datadog-sidecar/src/service/telemetry.rs @@ -171,7 +171,6 @@ impl TelemetryCachedClient { } } SidecarAction::PhpComposerTelemetryFile(_) => {} // handled separately - SidecarAction::ClearQueueId => {} // handled separately } } actions From 754b72bd41133ccd298edf4bd83add9bc3e19b9d Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 12 Mar 2026 14:55:41 +0100 Subject: [PATCH 11/29] Cleanup outstanding Signed-off-by: Bob Weinand --- datadog-ipc/src/client.rs | 31 +++++++++++-------------- datadog-sidecar/src/service/blocking.rs | 2 +- datadog-sidecar/src/service/sender.rs | 12 ++++++---- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/datadog-ipc/src/client.rs b/datadog-ipc/src/client.rs index f3445640af..e8b30a2323 100644 --- a/datadog-ipc/src/client.rs +++ b/datadog-ipc/src/client.rs @@ -13,10 +13,6 @@ use std::os::windows::io::{OwnedHandle as OwnedFd, RawHandle as RawFd}; use std::io; use std::time::Duration; -/// Maximum number of fire-and-forget messages that may be outstanding -/// (sent but not yet acked) before the client blocks or drops new messages. -pub const MAX_OUTSTANDING: u64 = 100; - /// Client-side state for a single IPC connection. /// /// Tracks in-flight message counts for ack-based flow control. @@ -28,13 +24,13 @@ pub struct IpcClientConn { send_count: u64, /// Number of server replies received (acks or typed responses). ack_count: u64, - /// Maximum allowed `send_count - ack_count` before `try_send` returns false. - pub max_outstanding: u64, /// Reusable receive buffer. Sized to hold a maximum payload plus the platform wire overhead /// (`HANDLE_SUFFIX_SIZE`), so that messages can be read directly without an intermediate copy. recv_buf: Vec, /// Set to true when a fatal I/O error occurs on send or receive. closed: bool, + /// Skip draining when the caller already drained + drained_acks_since_send: bool, } impl IpcClientConn { @@ -43,9 +39,9 @@ impl IpcClientConn { conn, send_count: 0, ack_count: 0, - max_outstanding: MAX_OUTSTANDING, recv_buf: vec![0u8; max_message_size() + HANDLE_SUFFIX_SIZE], closed: false, + drained_acks_since_send: true, } } @@ -62,28 +58,29 @@ impl IpcClientConn { self.closed } - /// Number of sent-but-not-yet-acked messages. + /// Number of sent-but-not-yet-acked messages on client side. pub fn outstanding(&self) -> u64 { self.send_count - self.ack_count } - /// Non-blocking drain of all pending acks. Updates `ack_count`. + /// Non-blocking drain of all pending acks for client side. Updates `ack_count`. pub fn drain_acks(&mut self) { + self.drained_acks_since_send = true; while self.conn.try_recv_raw(&mut self.recv_buf).is_ok() { self.ack_count += 1; } } - /// Non-blocking send. + /// Attempt a non-blocking send. /// - /// First drains pending acks, then checks the outstanding limit. - /// Returns `false` if the socket would block (EAGAIN) or the outstanding - /// limit has been reached. `data` is unmodified after the call. + /// Returns `false` if the socket would block (EAGAIN). + /// `data` is unmodified after the call. pub fn try_send(&mut self, data: &mut Vec, fds: &[RawFd]) -> bool { - self.drain_acks(); - if self.outstanding() >= self.max_outstanding { - return false; + if !self.drained_acks_since_send { + self.drain_acks(); } + self.drained_acks_since_send = false; + match self.conn.try_send_raw(data, fds) { Ok(()) => { self.send_count += 1; @@ -136,7 +133,7 @@ impl IpcClientConn { if self.ack_count == target { return Ok((self.recv_buf[..n].to_vec(), resp_fds)); } - // Intermediate ack for a prior fire-and-forget message — discard. + // Intermediate ack for a prior fire-and-forget message — continue. } } } diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 9287076bf8..4ee1ea6148 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -66,7 +66,7 @@ impl SidecarTransport { pub fn set_backpressure(&mut self, max_bytes: usize, max_queue: u64) -> io::Result<()> { let mut sender = lock_sender(self)?; - sender.channel.0.max_outstanding = max_queue; + sender.max_outstanding = max_queue; #[cfg(unix)] sender.channel.0.conn.set_sndbuf_size(max_bytes)?; #[cfg(not(unix))] diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 58570eeacd..2489e4f068 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -148,11 +148,12 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { } /// Higher-level IPC sender with outbox coalescing and telemetry load-shedding. -/// -/// Takes `&mut self` — callers are responsible for exclusive access. pub struct SidecarSender { pub channel: SidecarInterfaceChannel, outbox: SidecarOutbox, + /// Maximum allowed outstanding (sent-but-not-acked) messages before outbox drain is skipped + /// and fire-and-forget sends are blocked. + pub max_outstanding: u64, /// Cycles 0–9; used to implement 90% telemetry drop under backpressure. enqueue_actions_counter: u8, } @@ -162,6 +163,7 @@ impl SidecarSender { Self { channel, outbox: SidecarOutbox::default(), + max_outstanding: 100, enqueue_actions_counter: 0, } } @@ -171,7 +173,7 @@ impl SidecarSender { self.channel.0.drain_acks(); for slot in self.outbox.slots_mut() { if let Some(msg) = slot { - if self.channel.0.outstanding() >= self.channel.0.max_outstanding { + if self.channel.0.outstanding() >= self.max_outstanding { return false; } if !self.channel.try_send_request(msg) { @@ -317,12 +319,12 @@ impl SidecarSender { return; } // Load-shed: drop 90% when buffer is more than half full. - if self.channel.0.outstanding() > self.channel.0.max_outstanding / 2 { + if self.channel.0.outstanding() > self.max_outstanding / 2 { self.enqueue_actions_counter = self.enqueue_actions_counter.wrapping_add(1) % 10; if self.enqueue_actions_counter != 0 { return; } - // The 1-in-10 that passes through falls to the try_send below. + // The 10% that passes through falls to the try_send below. } self.channel .try_send_enqueue_actions(instance_id, queue_id, actions); From 6207930e62340b60943e94dd81f0247b1f54092a Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 13 Mar 2026 19:31:48 +0100 Subject: [PATCH 12/29] Avoid re-registering metrics all the time Signed-off-by: Bob Weinand --- datadog-sidecar/src/service/blocking.rs | 99 ++++++++++--------- datadog-sidecar/src/service/mod.rs | 2 - datadog-sidecar/src/service/sender.rs | 20 ++++ .../src/service/sidecar_interface.rs | 13 +++ datadog-sidecar/src/service/sidecar_server.rs | 40 +++++++- datadog-sidecar/src/service/telemetry.rs | 1 - libdd-telemetry/src/metrics.rs | 2 +- 7 files changed, 125 insertions(+), 52 deletions(-) diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 4ee1ea6148..e0ed7b775a 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -5,8 +5,9 @@ use super::{ DynamicInstrumentationConfigState, InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; +use libdd_telemetry::metrics::MetricContext; use crate::service::sender::SidecarSender; -use crate::service::sidecar_interface::{SidecarInterfaceChannel, SidecarInterfaceRequest}; +use crate::service::sidecar_interface::SidecarInterfaceChannel; use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; use datadog_ipc::SeqpacketConn; use datadog_live_debugger::debugger_defs::DebuggerPayload; @@ -19,7 +20,8 @@ use std::{ io, time::{Duration, Instant}, }; -use tracing::{info, warn}; +use tracing::warn; +use libdd_common::MutexExt; /// `SidecarTransport` wraps a [`SidecarSender`] with transparent reconnection support. /// @@ -37,23 +39,38 @@ impl SidecarTransport { where F: FnOnce() -> Option>, { - let mut transport = match self.inner.lock() { + Self::do_reconnect(&mut self.inner, factory, false); + } + + pub fn do_reconnect(transport: &mut Mutex, factory: F, force_reconnect: bool) -> bool + where + F: FnOnce() -> Option>, + { + let mut transport = match transport.lock() { Ok(t) => t, - Err(_) => return, + Err(_) => return false, }; #[allow(clippy::unwrap_used)] - if transport.channel.0.is_closed() { - info!("The sidecar transport is closed. Reconnecting..."); + if force_reconnect || transport.channel.0.is_closed() { + warn!("The sidecar transport is closed. Reconnecting... This generally indicates a problem with the sidecar, most likely a crash. Check the logs / core dump locations and possibly report a bug."); let new = match factory() { - None => return, + None => return false, Some(n) => n.inner.into_inner(), }; if new.is_err() { - return; + return false; } + let registrations = std::mem::take(&mut transport.metric_registrations); + *transport = new.unwrap(); + + // Replay all registered metrics after a reconnect + for metric in registrations.into_values() { + transport.register_telemetry_metric(metric); + } } + true } pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { @@ -75,21 +92,8 @@ impl SidecarTransport { } pub fn ensure_alive(&mut self) { - let closed = match self.inner.lock() { - Ok(guard) => guard.channel.0.is_closed(), - Err(_) => return, - }; - if closed { - if let Some(ref reconnect) = self.reconnect_fn { - warn!("The sidecar transport is closed. Reconnecting... This generally indicates a problem with the sidecar, most likely a crash. Check the logs / core dump locations and possibly report a bug."); - if let Some(n) = reconnect() { - if let Ok(mut guard) = self.inner.lock() { - if let Ok(new) = n.inner.into_inner() { - *guard = new; - } - } - } - } + if let Some(ref reconnect) = self.reconnect_fn { + Self::do_reconnect(&mut self.inner, reconnect, false); } } @@ -106,32 +110,26 @@ impl SidecarTransport { where F: Fn(&mut SidecarSender) -> io::Result, { - let mut inner = match self.inner.lock() { - Ok(t) => t, - Err(e) => return Err(io::Error::other(e.to_string())), + let e = { + let mut inner = match self.inner.lock() { + Ok(t) => t, + Err(e) => return Err(io::Error::other(e.to_string())), + }; + match f(&mut inner) { + Ok(ret) => return Ok(ret), + Err(e) => e, + } }; - match f(&mut inner) { - Ok(ret) => Ok(ret), - Err(e) => { - if e.kind() == io::ErrorKind::BrokenPipe - || e.kind() == io::ErrorKind::ConnectionReset - { - if let Some(ref reconnect) = self.reconnect_fn { - warn!("The sidecar transport is closed. Reconnecting... This generally indicates a problem with the sidecar, most likely a crash. Check the logs / core dump locations and possibly report a bug."); - *inner = match reconnect() { - None => return Err(e), - #[allow(clippy::unwrap_used)] - Some(n) => n.inner.into_inner().unwrap(), - }; - f(&mut inner) - } else { - Err(e) - } - } else { - Err(e) + if e.kind() == io::ErrorKind::BrokenPipe + || e.kind() == io::ErrorKind::ConnectionReset + { + if let Some(ref reconnect) = self.reconnect_fn { + if Self::do_reconnect(&mut self.inner, reconnect, true) { + return f(&mut self.inner.lock_or_panic()); } } } + Err(e) } /// Send garbage data (used in tests to verify error handling). @@ -205,6 +203,17 @@ pub fn clear_queue_id( Ok(()) } +/// Registers a telemetry metric context on this connection. +/// +/// Connection-bound: deduplicated per connection, never dropped, replayed after reconnect. +pub fn register_telemetry_metric( + transport: &mut SidecarTransport, + metric: MetricContext, +) -> io::Result<()> { + lock_sender(transport)?.register_telemetry_metric(metric); + Ok(()) +} + /// Sets the configuration for a session. pub fn set_session_config( transport: &mut SidecarTransport, diff --git a/datadog-sidecar/src/service/mod.rs b/datadog-sidecar/src/service/mod.rs index c0bc0516aa..e25e1f159f 100644 --- a/datadog-sidecar/src/service/mod.rs +++ b/datadog-sidecar/src/service/mod.rs @@ -6,7 +6,6 @@ use crate::config; use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct}; use libdd_common::tag::Tag; use libdd_common::Endpoint; -use libdd_telemetry::metrics::MetricContext; use libdd_telemetry::worker::TelemetryActions; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -71,7 +70,6 @@ pub struct SessionConfig { #[derive(Debug, Deserialize, Serialize)] pub enum SidecarAction { Telemetry(TelemetryActions), - RegisterTelemetryMetric(MetricContext), AddTelemetryMetricPoint((String, f64, Vec)), PhpComposerTelemetryFile(PathBuf), } diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 2489e4f068..d0d2384cef 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -21,6 +21,8 @@ use datadog_ipc::platform::ShmHandle; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; use libdd_dogstatsd_client::DogStatsDActionOwned; +use libdd_telemetry::metrics::MetricContext; +use std::collections::HashMap; use std::{io, time::Duration}; /// Priority outbox for state-change (coalesced) messages. @@ -156,6 +158,9 @@ pub struct SidecarSender { pub max_outstanding: u64, /// Cycles 0–9; used to implement 90% telemetry drop under backpressure. enqueue_actions_counter: u8, + /// All metric registrations ever sent on this transport (keyed by name). + /// Persisted across reconnects; replayed on new connections before any metric points. + pub metric_registrations: HashMap, } impl SidecarSender { @@ -165,6 +170,7 @@ impl SidecarSender { outbox: SidecarOutbox::default(), max_outstanding: 100, enqueue_actions_counter: 0, + metric_registrations: HashMap::new(), } } @@ -279,6 +285,20 @@ impl SidecarSender { self.try_drain_outbox(); } + /// Registers a telemetry metric context on this connection. + /// + /// Deduplicates by name: if already registered on this connection, the call is a no-op. + /// Sends the registration blocking (bypasses load-shedding). The registration is stored + /// and replayed automatically after any reconnect, before the next `enqueue_actions` call. + pub fn register_telemetry_metric(&mut self, metric: MetricContext) { + if self.metric_registrations.contains_key(&metric.name) { + return; + } + self.metric_registrations.insert(metric.name.clone(), metric.clone()); + let req = SidecarInterfaceRequest::RegisterTelemetryMetric { metric }; + self.channel.send_request_blocking(&req).ok(); + } + pub fn clear_queue_id(&mut self, instance_id: InstanceId, queue_id: QueueId) { coalesce( &mut self.outbox, diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index f5ca0ddabb..9c80ca19d3 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -6,6 +6,7 @@ use crate::service::{ InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; +use libdd_telemetry::metrics::MetricContext; use datadog_ipc::platform::ShmHandle; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; @@ -70,6 +71,18 @@ pub trait SidecarInterface { /// * `queue_id` - The queue ID to clear. async fn clear_queue_id(instance_id: InstanceId, queue_id: QueueId); + /// Registers a telemetry metric context for a specific instance and queue. + /// + /// Registrations are connection-bound: tracked per connection, never dropped, + /// and automatically replayed after a reconnect. + /// + /// # Arguments + /// + /// * `metric` - The metric context to register on this connection. + async fn register_telemetry_metric( + metric: MetricContext, + ); + /// Shuts down a runtime. /// /// # Arguments diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 0e56fe5a1e..56e819c977 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -12,6 +12,7 @@ use crate::service::{ use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; use datadog_ipc::{PeerCredentials, SeqpacketConn}; use libdd_common::{Endpoint, MutexExt}; +use libdd_telemetry::metrics::MetricContext; use libdd_telemetry::worker::{LifecycleAction, TelemetryActions, TelemetryWorkerStats}; use libdd_trace_utils::trace_utils::SendData; use libdd_trace_utils::tracer_payload::decode_to_trace_chunks; @@ -131,8 +132,12 @@ pub struct SidecarServer { /// Per-connection handler wrapper that tracks sessions/instances for cleanup on disconnect. struct ConnectionSidecarHandler { server: SidecarServer, - sessions: std::sync::Mutex>, - instances: std::sync::Mutex>, + sessions: Mutex>, + instances: Mutex>, + /// All telemetry metric registrations received on this connection, keyed by metric name. + /// Used to auto-register metrics in newly-created telemetry clients when a metric point + /// for a previously registered metric arrives for a new (service, env) combination. + metric_registrations: Mutex>, } impl ConnectionSidecarHandler { @@ -141,6 +146,7 @@ impl ConnectionSidecarHandler { server, sessions: Default::default(), instances: Default::default(), + metric_registrations: Default::default(), } } @@ -393,6 +399,7 @@ impl SidecarServer { instance_id: InstanceId, queue_id: QueueId, actions: Vec, + connection_metric_registrations: &HashMap, ) { let session = self.get_session(&instance_id.session_id); let trace_config = session.get_trace_config(); @@ -436,6 +443,18 @@ impl SidecarServer { ); let mut telemetry = telemetry_mutex.lock_or_panic(); + // Auto-register any metrics known to this connection but not yet registered + // in this telemetry client (e.g., the client was just created for a new service/env). + for action in &actions { + if let SidecarAction::AddTelemetryMetricPoint((name, _, _)) = action { + if !telemetry.telemetry_metrics.contains_key(name) { + if let Some(metric) = connection_metric_registrations.get(name) { + telemetry.register_metric(metric.clone()); + } + } + } + } + let mut actions_to_process: Vec = vec![]; let mut composer_paths_to_process = vec![]; let mut buffered_info_changed = false; @@ -877,8 +896,9 @@ impl SidecarInterface for ConnectionSidecarHandler { .submitted_payloads .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); + let metrics_registrations = self.metric_registrations.lock_or_panic().clone(); self.server - .enqueue_actions_impl(instance_id, queue_id, actions); + .enqueue_actions_impl(instance_id, queue_id, actions, &metrics_registrations); } async fn clear_queue_id( @@ -899,6 +919,20 @@ impl SidecarInterface for ConnectionSidecarHandler { } } + async fn register_telemetry_metric( + &self, + _peer: PeerCredentials, + metric: MetricContext, + ) { + self.server + .submitted_payloads + .fetch_add(1, Ordering::Relaxed); + self.metric_registrations + .lock_or_panic() + .entry(metric.name.clone()) + .or_insert(metric); + } + async fn set_session_config( &self, peer: PeerCredentials, diff --git a/datadog-sidecar/src/service/telemetry.rs b/datadog-sidecar/src/service/telemetry.rs index 6741cdd2f8..e9beefedf9 100644 --- a/datadog-sidecar/src/service/telemetry.rs +++ b/datadog-sidecar/src/service/telemetry.rs @@ -161,7 +161,6 @@ impl TelemetryCachedClient { for action in sidecar_actions { match action { SidecarAction::Telemetry(t) => actions.push(t), - SidecarAction::RegisterTelemetryMetric(metric) => self.register_metric(metric), SidecarAction::AddTelemetryMetricPoint(point) => { let metric_name = point.0.clone(); if let Some(telemetry_action) = self.to_telemetry_point(point) { diff --git a/libdd-telemetry/src/metrics.rs b/libdd-telemetry/src/metrics.rs index 9bb933d817..1899f7aabd 100644 --- a/libdd-telemetry/src/metrics.rs +++ b/libdd-telemetry/src/metrics.rs @@ -159,7 +159,7 @@ impl MetricBuckets { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct MetricContext { pub namespace: data::metrics::MetricNamespace, pub name: String, From 5bd465a27d24b1263e3a1efa7e340cc57614d9ca Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 13 Mar 2026 20:38:39 +0100 Subject: [PATCH 13/29] Deduplicate payload counting Signed-off-by: Bob Weinand --- datadog-ipc-macros/src/lib.rs | 7 +- datadog-ipc/src/example_interface.rs | 14 ++-- datadog-sidecar/src/service/sidecar_server.rs | 67 ++----------------- 3 files changed, 17 insertions(+), 71 deletions(-) diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index 48fbe5c7c8..dbfad9a90b 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -255,6 +255,10 @@ fn gen_handler_trait( quote! { #vis trait #trait_name: Send + Sync + 'static { + /// Returns the counter incremented on each received IPC message. + /// The serve loop uses this to track received payloads. + fn recv_counter(&self) -> &::std::sync::atomic::AtomicU64; + #(#handler_methods)* } } @@ -315,7 +319,6 @@ fn gen_serve_fn( return; } }; - let mut recv_counter: u64 = 0; let mut buf = vec![0u8; datadog_ipc::max_message_size() + datadog_ipc::HANDLE_SUFFIX_SIZE]; loop { let (n, fds) = match datadog_ipc::recv_raw_async(&async_fd, &mut buf).await { @@ -339,7 +342,7 @@ fn gen_serve_fn( ::tracing::warn!("IPC serve: failed to receive handles"); break; } - recv_counter += 1; + let recv_counter = handler.recv_counter().fetch_add(1, ::std::sync::atomic::Ordering::Relaxed) + 1; ::tracing::trace!(recv_counter, discriminant, pid = peer.pid, "IPC recv"); match req { diff --git a/datadog-ipc/src/example_interface.rs b/datadog-ipc/src/example_interface.rs index 3b9aa85936..81fe45401f 100644 --- a/datadog-ipc/src/example_interface.rs +++ b/datadog-ipc/src/example_interface.rs @@ -3,7 +3,7 @@ use std::{ fs::File, sync::{ - atomic::{AtomicU32, Ordering}, + atomic::{AtomicU64, Ordering}, Arc, Mutex, }, time::{Duration, Instant}, @@ -25,7 +25,7 @@ pub trait ExampleInterface { #[derive(Default, Clone)] pub struct ExampleServer { - req_cnt: Arc, + req_cnt: Arc, stored_files: Arc>>>, } @@ -37,11 +37,14 @@ impl ExampleServer { } impl ExampleInterface for ExampleServer { + fn recv_counter(&self) -> &AtomicU64 { + &self.req_cnt + } + fn notify( &self, _peer: datadog_ipc::PeerCredentials, ) -> impl std::future::Future + Send + '_ { - self.req_cnt.fetch_add(1, Ordering::AcqRel); std::future::ready(()) } @@ -49,7 +52,6 @@ impl ExampleInterface for ExampleServer { &self, _peer: datadog_ipc::PeerCredentials, ) -> impl std::future::Future + Send + '_ { - self.req_cnt.fetch_add(1, Ordering::AcqRel); std::future::ready(()) } @@ -57,7 +59,6 @@ impl ExampleInterface for ExampleServer { &self, _peer: datadog_ipc::PeerCredentials, ) -> impl std::future::Future + Send + '_ { - self.req_cnt.fetch_add(1, Ordering::AcqRel); std::future::ready(Instant::now().elapsed()) } @@ -65,7 +66,7 @@ impl ExampleInterface for ExampleServer { &self, _peer: datadog_ipc::PeerCredentials, ) -> impl std::future::Future + Send + '_ { - std::future::ready(self.req_cnt.fetch_add(1, Ordering::AcqRel)) + std::future::ready(self.req_cnt.load(Ordering::Relaxed) as u32) } fn store_file( @@ -73,7 +74,6 @@ impl ExampleInterface for ExampleServer { _peer: datadog_ipc::PeerCredentials, file: PlatformHandle, ) -> impl std::future::Future + Send + '_ { - self.req_cnt.fetch_add(1, Ordering::AcqRel); #[allow(clippy::unwrap_used)] self.stored_files.lock().unwrap().push(file); std::future::ready(()) diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 56e819c977..7c103045b1 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -885,6 +885,10 @@ impl SidecarServer { } impl SidecarInterface for ConnectionSidecarHandler { + fn recv_counter(&self) -> &AtomicU64 { + &self.server.submitted_payloads + } + async fn enqueue_actions( &self, _peer: PeerCredentials, @@ -892,9 +896,6 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, actions: Vec, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); let metrics_registrations = self.metric_registrations.lock_or_panic().clone(); self.server @@ -907,9 +908,6 @@ impl SidecarInterface for ConnectionSidecarHandler { instance_id: InstanceId, queue_id: QueueId, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); let rt_info = self.server.get_runtime(&instance_id); let mut applications = rt_info.lock_applications(); @@ -924,9 +922,6 @@ impl SidecarInterface for ConnectionSidecarHandler { _peer: PeerCredentials, metric: MetricContext, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.metric_registrations .lock_or_panic() .entry(metric.name.clone()) @@ -941,9 +936,6 @@ impl SidecarInterface for ConnectionSidecarHandler { config: SessionConfig, is_fork: bool, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server .set_session_config_impl( @@ -963,26 +955,17 @@ impl SidecarInterface for ConnectionSidecarHandler { session_id: String, process_tags: String, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server .set_session_process_tags_impl(session_id, process_tags); } async fn shutdown_runtime(&self, _peer: PeerCredentials, instance_id: InstanceId) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server.shutdown_runtime_impl(instance_id); } async fn shutdown_session(&self, _peer: PeerCredentials, session_id: String) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server.shutdown_session_impl(session_id); } @@ -995,9 +978,6 @@ impl SidecarInterface for ConnectionSidecarHandler { len: usize, headers: SerializedTracerHeaderTags, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_trace_v04_shm_impl(instance_id, handle, len, headers); @@ -1010,9 +990,6 @@ impl SidecarInterface for ConnectionSidecarHandler { data: Vec, headers: SerializedTracerHeaderTags, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_trace_v04_bytes_impl(instance_id, data, headers); @@ -1026,9 +1003,6 @@ impl SidecarInterface for ConnectionSidecarHandler { handle: ShmHandle, debugger_type: DebuggerType, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_debugger_data_shm_impl(instance_id, queue_id, handle, debugger_type); @@ -1041,9 +1015,6 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, diagnostics_payload: Vec, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_debugger_diagnostics_impl(instance_id, queue_id, diagnostics_payload); @@ -1055,9 +1026,6 @@ impl SidecarInterface for ConnectionSidecarHandler { exception_hash: u64, granularity: Duration, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.server .acquire_exception_hash_rate_limiter_impl(exception_hash, granularity); } @@ -1073,9 +1041,6 @@ impl SidecarInterface for ConnectionSidecarHandler { global_tags: Vec, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server.set_universal_service_tags_impl( instance_id, @@ -1095,9 +1060,6 @@ impl SidecarInterface for ConnectionSidecarHandler { queue_id: QueueId, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .set_request_config_impl(instance_id, queue_id, dynamic_instrumentation_state); @@ -1109,18 +1071,12 @@ impl SidecarInterface for ConnectionSidecarHandler { instance_id: InstanceId, actions: Vec, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_instance(&instance_id); self.server .send_dogstatsd_actions_impl(instance_id, actions); } async fn flush_traces(&self, _peer: PeerCredentials) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.server.flush_traces_impl().await; } @@ -1130,30 +1086,17 @@ impl SidecarInterface for ConnectionSidecarHandler { session_id: String, token: String, ) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.track_session(&session_id); self.server.set_test_session_token_impl(session_id, token); } - async fn ping(&self, _peer: PeerCredentials) { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); - } + async fn ping(&self, _peer: PeerCredentials) {} async fn dump(&self, _peer: PeerCredentials) -> String { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.server.dump_impl().await } async fn stats(&self, _peer: PeerCredentials) -> String { - self.server - .submitted_payloads - .fetch_add(1, Ordering::Relaxed); self.server.stats_impl().await } } From 4da94e8bd435e830098854ff258cbaf26be82be8 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 13 Mar 2026 22:48:16 +0100 Subject: [PATCH 14/29] No session ids Signed-off-by: Bob Weinand --- .../src/platform/unix/sockets/linux.rs | 13 +- datadog-sidecar-ffi/src/lib.rs | 4 - datadog-sidecar/src/service/blocking.rs | 10 +- datadog-sidecar/src/service/sender.rs | 31 +- .../src/service/sidecar_interface.rs | 7 +- datadog-sidecar/src/service/sidecar_server.rs | 413 ++++++------------ 6 files changed, 151 insertions(+), 327 deletions(-) diff --git a/datadog-ipc/src/platform/unix/sockets/linux.rs b/datadog-ipc/src/platform/unix/sockets/linux.rs index 45a80aa245..1c94f3736e 100644 --- a/datadog-ipc/src/platform/unix/sockets/linux.rs +++ b/datadog-ipc/src/platform/unix/sockets/linux.rs @@ -25,19 +25,20 @@ impl SeqpacketListener { /// Removes any stale socket file before binding (standard Unix practice). pub fn bind(path: impl AsRef) -> io::Result { let _ = std::fs::remove_file(path.as_ref()); - let fd = create_seqpacket_socket()?; let addr = UnixAddr::new(path.as_ref()).map_err(io::Error::from)?; - bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; - listen(&fd, Backlog::new(128).map_err(io::Error::from)?).map_err(io::Error::from)?; - Ok(Self { inner: fd }) + Self::do_bind(addr) } /// Bind to a Linux abstract socket name and start listening. pub fn bind_abstract(name: &[u8]) -> io::Result { - let fd = create_seqpacket_socket()?; let addr = UnixAddr::new_abstract(name).map_err(io::Error::from)?; + Self::do_bind(addr) + } + + fn do_bind(addr: UnixAddr) -> io::Result { + let fd = create_seqpacket_socket()?; bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; - listen(&fd, Backlog::new(128).map_err(io::Error::from)?).map_err(io::Error::from)?; + listen(&fd, Backlog::new(128).unwrap_or(Backlog::MAXCONN)).map_err(io::Error::from)?; Ok(Self { inner: fd }) } diff --git a/datadog-sidecar-ffi/src/lib.rs b/datadog-sidecar-ffi/src/lib.rs index 472749c974..b88ebbc036 100644 --- a/datadog-sidecar-ffi/src/lib.rs +++ b/datadog-sidecar-ffi/src/lib.rs @@ -646,12 +646,10 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( #[allow(clippy::missing_safety_doc)] pub unsafe extern "C" fn ddog_sidecar_session_set_process_tags( transport: &mut Box, - session_id: ffi::CharSlice, process_tags: &libdd_common_ffi::Vec, ) -> MaybeError { try_c!(blocking::set_session_process_tags( transport, - session_id.to_utf8_lossy().into(), process_tags.to_vec(), )); @@ -1273,12 +1271,10 @@ pub unsafe extern "C" fn ddog_sidecar_dogstatsd_set( #[allow(clippy::missing_safety_doc)] pub unsafe extern "C" fn ddog_sidecar_set_test_session_token( transport: &mut Box, - session_id: ffi::CharSlice, token: ffi::CharSlice, ) -> MaybeError { try_c!(blocking::set_test_session_token( transport, - session_id.to_utf8_lossy().into_owned(), token.to_utf8_lossy().into_owned(), )); diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index e0ed7b775a..77af5adddb 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -173,8 +173,8 @@ pub fn shutdown_runtime( } /// Shuts down a session. -pub fn shutdown_session(transport: &mut SidecarTransport, session_id: String) -> io::Result<()> { - lock_sender(transport)?.shutdown_session(session_id); +pub fn shutdown_session(transport: &mut SidecarTransport) -> io::Result<()> { + lock_sender(transport)?.shutdown_session(); Ok(()) } @@ -236,10 +236,9 @@ pub fn set_session_config( /// Updates the process tags for an existing session. pub fn set_session_process_tags( transport: &mut SidecarTransport, - session_id: String, process_tags: Vec, ) -> io::Result<()> { - lock_sender(transport)?.set_session_process_tags(session_id, process_tags); + lock_sender(transport)?.set_session_process_tags(process_tags); Ok(()) } @@ -401,10 +400,9 @@ pub fn send_dogstatsd_actions( /// Sets x-datadog-test-session-token on all requests for the given session. pub fn set_test_session_token( transport: &mut SidecarTransport, - session_id: String, token: String, ) -> io::Result<()> { - lock_sender(transport)?.set_test_session_token(session_id, token); + lock_sender(transport)?.set_test_session_token(token); Ok(()) } diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index d0d2384cef..ecce9ade34 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -69,17 +69,6 @@ fn cancel_if_instance(slot: &mut Option, instance_id: & } } -fn cancel_if_session(slot: &mut Option, session_id: &str) { - let should_cancel = match slot { - Some(SidecarInterfaceRequest::SetSessionConfig { session_id: id, .. }) => { - id.as_str() == session_id - } - _ => false, - }; - if should_cancel { - *slot = None; - } -} fn cancel_if_queue( slot: &mut Option, @@ -109,8 +98,8 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { cancel_if_instance(&mut outbox.set_request_config, instance_id); cancel_if_instance(&mut outbox.set_universal_service_tags, instance_id); } - if let SidecarInterfaceRequest::ShutdownSession { ref session_id } = incoming { - cancel_if_session(&mut outbox.set_session_config, session_id); + if matches!(incoming, SidecarInterfaceRequest::ShutdownSession {}) { + outbox.set_session_config = None; } if let SidecarInterfaceRequest::ClearQueueId { ref instance_id, @@ -231,13 +220,10 @@ impl SidecarSender { self.try_drain_outbox(); } - pub fn set_session_process_tags(&mut self, session_id: String, process_tags: String) { + pub fn set_session_process_tags(&mut self, process_tags: String) { coalesce( &mut self.outbox, - SidecarInterfaceRequest::SetSessionProcessTags { - session_id, - process_tags, - }, + SidecarInterfaceRequest::SetSessionProcessTags { process_tags }, ); self.try_drain_outbox(); } @@ -318,10 +304,10 @@ impl SidecarSender { self.try_drain_outbox(); } - pub fn shutdown_session(&mut self, session_id: String) { + pub fn shutdown_session(&mut self) { coalesce( &mut self.outbox, - SidecarInterfaceRequest::ShutdownSession { session_id }, + SidecarInterfaceRequest::ShutdownSession {}, ); self.try_drain_outbox(); } @@ -428,12 +414,11 @@ impl SidecarSender { .try_send_send_dogstatsd_actions(instance_id, actions); } - pub fn set_test_session_token(&mut self, session_id: String, token: String) { + pub fn set_test_session_token(&mut self, token: String) { if !self.try_drain_outbox() { return; } - self.channel - .try_send_set_test_session_token(session_id, token); + self.channel.try_send_set_test_session_token(token); } pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index 9c80ca19d3..00dfa45511 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -59,9 +59,8 @@ pub trait SidecarInterface { /// /// # Arguments /// - /// * `session_id` - The ID of the session. /// * `process_tags` - The process tags. - async fn set_session_process_tags(session_id: String, process_tags: Vec); + async fn set_session_process_tags(process_tags: Vec); /// Removes the application entry for the given queue ID from the instance. /// @@ -95,7 +94,7 @@ pub trait SidecarInterface { /// # Arguments /// /// * `session_id` - The ID of the session. - async fn shutdown_session(session_id: String); + async fn shutdown_session(); /// Sends a trace via shared memory. /// @@ -214,7 +213,7 @@ pub trait SidecarInterface { /// /// * `session_id` - The ID of the session. /// * `token` - The session token. - async fn set_test_session_token(session_id: String, token: String); + async fn set_test_session_token(token: String); /// Sends a ping to the service. #[blocking] diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 7c103045b1..60bcb87dab 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -132,7 +132,7 @@ pub struct SidecarServer { /// Per-connection handler wrapper that tracks sessions/instances for cleanup on disconnect. struct ConnectionSidecarHandler { server: SidecarServer, - sessions: Mutex>, + session_id: std::sync::OnceLock, instances: Mutex>, /// All telemetry metric registrations received on this connection, keyed by metric name. /// Used to auto-register metrics in newly-created telemetry clients when a metric point @@ -144,35 +144,20 @@ impl ConnectionSidecarHandler { fn new(server: SidecarServer) -> Self { Self { server, - sessions: Default::default(), + session_id: Default::default(), instances: Default::default(), metric_registrations: Default::default(), } } - fn track_session(&self, session_id: &str) { - if self.sessions.lock_or_panic().insert(session_id.to_owned()) { - let mut counter = self.server.session_counter.lock_or_panic(); - match counter.entry(session_id.to_owned()) { - Entry::Occupied(mut e) => { - e.insert(e.get() + 1); - } - Entry::Vacant(e) => { - e.insert(1); - } - } - } - } - fn track_instance(&self, instance_id: &InstanceId) { self.instances.lock_or_panic().insert(instance_id.clone()); } async fn cleanup(&self) { - let sessions: Vec = self.sessions.lock_or_panic().iter().cloned().collect(); let instances: Vec = self.instances.lock_or_panic().iter().cloned().collect(); - for session_id in &sessions { + if let Some(session_id) = self.session_id.get() { let stop = { let mut counter = self.server.session_counter.lock_or_panic(); if let Entry::Occupied(mut entry) = counter.entry(session_id.clone()) { @@ -190,6 +175,7 @@ impl ConnectionSidecarHandler { self.server.stop_session(session_id).await; } } + for instance_id in instances { let maybe_session = self .server @@ -393,15 +379,23 @@ impl SidecarServer { pub fn shutdown(&self) { self.remote_configs.shutdown(); } +} + +impl SidecarInterface for ConnectionSidecarHandler { + fn recv_counter(&self) -> &AtomicU64 { + &self.server.submitted_payloads + } - fn enqueue_actions_impl( + async fn enqueue_actions( &self, + _peer: PeerCredentials, instance_id: InstanceId, queue_id: QueueId, actions: Vec, - connection_metric_registrations: &HashMap, ) { - let session = self.get_session(&instance_id.session_id); + self.track_instance(&instance_id); + let connection_metric_registrations = self.metric_registrations.lock_or_panic().clone(); + let session = self.server.get_session(&instance_id.session_id); let trace_config = session.get_trace_config(); let runtime_metadata = RuntimeMetadata::new( trace_config.language.clone(), @@ -409,7 +403,7 @@ impl SidecarServer { trace_config.tracer_version.clone(), ); - let rt_info = self.get_runtime(&instance_id); + let rt_info = self.server.get_runtime(&instance_id); let mut applications = rt_info.lock_applications(); if let Entry::Occupied(entry) = applications.entry(queue_id) { @@ -423,7 +417,7 @@ impl SidecarServer { let process_tags = session.process_tags.lock_or_panic().clone(); // Lock telemetry client - let telemetry_mutex = self.telemetry_clients.get_or_create( + let telemetry_mutex = self.server.telemetry_clients.get_or_create( service, env, &instance_id, @@ -536,26 +530,57 @@ impl SidecarServer { if remove_client { info!("Removing telemetry client for instance {instance_id:?}"); - self.telemetry_clients.remove_telemetry_client(service, env); + self.server.telemetry_clients.remove_telemetry_client(service, env); } - } else { info!("No application found for instance {instance_id:?} and queue_id {queue_id:?}"); } } - async fn set_session_config_impl( + async fn clear_queue_id( + &self, + _peer: PeerCredentials, + instance_id: InstanceId, + queue_id: QueueId, + ) { + let rt_info = self.server.get_runtime(&instance_id); + let mut applications = rt_info.lock_applications(); + if let Entry::Occupied(entry) = applications.entry(queue_id) { + info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); + entry.remove(); + } + } + + async fn register_telemetry_metric( + &self, + _peer: PeerCredentials, + metric: MetricContext, + ) { + self.metric_registrations + .lock_or_panic() + .entry(metric.name.clone()) + .or_insert(metric); + } + + async fn set_session_config( &self, + peer: PeerCredentials, session_id: String, - peer_pid: u32, #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: SessionConfig, is_fork: bool, ) { + if self.session_id.set(session_id.clone()).is_ok() { + let mut counter = self.server.session_counter.lock_or_panic(); + match counter.entry(session_id.clone()) { + Entry::Occupied(mut e) => { e.insert(e.get() + 1); } + Entry::Vacant(e) => { e.insert(1); } + } + } debug!("Set session config for {session_id} to {config:?}"); - let session = self.get_session(&session_id); - session.pid.store(peer_pid as i32, Ordering::Relaxed); + let session = self.server.get_session(&session_id); + session.pid.store(peer.pid as i32, Ordering::Relaxed); #[cfg(windows)] #[allow(clippy::unwrap_used)] { @@ -564,7 +589,7 @@ impl SidecarServer { winapi::um::processthreadsapi::OpenProcess( winapi::um::winnt::PROCESS_ALL_ACCESS, 0, - peer_pid, + peer.pid, ) }; if !handle.is_null() { @@ -605,7 +630,7 @@ impl SidecarServer { }); if config.endpoint.api_key.is_none() { // no agent info if agentless - let agent_info = self.agent_infos.query_for(config.endpoint.clone()); + let agent_info = self.server.agent_infos.query_for(config.endpoint.clone()); let session_info = session.clone(); run_or_spawn_shared(agent_info.get(), move |info| { if !agent_info_supports_debugger_v2_endpoint(info) { @@ -626,13 +651,16 @@ impl SidecarServer { capabilities: config.remote_config_capabilities, }); *session.remote_config_interval.lock_or_panic() = config.remote_config_poll_interval; - self.trace_flusher + self.server + .trace_flusher .interval_ms .store(config.flush_interval.as_millis() as u64, Ordering::Relaxed); - self.trace_flusher + self.server + .trace_flusher .min_force_flush_size_bytes .store(config.force_flush_size as u32, Ordering::Relaxed); - self.trace_flusher + self.server + .trace_flusher .min_force_drop_size_bytes .store(config.force_drop_size as u32, Ordering::Relaxed); @@ -641,7 +669,7 @@ impl SidecarServer { MULTI_LOG_WRITER.add(config.log_file), )); - if let Some(completer) = self.self_telemetry_config.lock_or_panic().take() { + if let Some(completer) = self.server.self_telemetry_config.lock_or_panic().take() { #[allow(clippy::expect_used)] let config = session .session_config @@ -659,35 +687,44 @@ impl SidecarServer { } } - fn set_session_process_tags_impl(&self, session_id: String, process_tags: Vec) { - let session = self.get_session(&session_id); + async fn set_session_process_tags( + &self, + _peer: PeerCredentials, + process_tags: Vec, + ) { + let session_id = self.session_id.get().cloned().unwrap_or_default(); + let session = self.server.get_session(&session_id); *session.process_tags.lock_or_panic() = process_tags; } - fn shutdown_runtime_impl(&self, instance_id: InstanceId) { - let session = self.get_session(&instance_id.session_id); + async fn shutdown_runtime(&self, _peer: PeerCredentials, instance_id: InstanceId) { + let session = self.server.get_session(&instance_id.session_id); tokio::spawn(async move { session.shutdown_runtime(&instance_id.runtime_id).await }); } - fn shutdown_session_impl(&self, session_id: String) { - let server = self.clone(); + async fn shutdown_session(&self, _peer: PeerCredentials) { + let server = self.server.clone(); + let session_id = self.session_id.get().cloned().unwrap_or_default(); tokio::spawn(async move { server.stop_session(&session_id).await }); } - fn send_trace_v04_shm_impl( + async fn send_trace_v04_shm( &self, + _peer: PeerCredentials, instance_id: InstanceId, handle: ShmHandle, _len: usize, headers: SerializedTracerHeaderTags, ) { + self.track_instance(&instance_id); if let Some(endpoint) = self + .server .get_session(&instance_id.session_id) .get_trace_config() .endpoint .clone() { - let server = self.clone(); + let server = self.server.clone(); tokio::spawn(async move { match handle.map() { Ok(mapped) => { @@ -705,19 +742,22 @@ impl SidecarServer { } } - fn send_trace_v04_bytes_impl( + async fn send_trace_v04_bytes( &self, + _peer: PeerCredentials, instance_id: InstanceId, data: Vec, headers: SerializedTracerHeaderTags, ) { + self.track_instance(&instance_id); if let Some(endpoint) = self + .server .get_session(&instance_id.session_id) .get_trace_config() .endpoint .clone() { - let server = self.clone(); + let server = self.server.clone(); tokio::spawn(async move { let bytes = tinybytes::Bytes::from(data); server.send_trace_v04(&headers, bytes, &endpoint); @@ -730,14 +770,16 @@ impl SidecarServer { } } - fn send_debugger_data_shm_impl( + async fn send_debugger_data_shm( &self, + _peer: PeerCredentials, instance_id: InstanceId, queue_id: QueueId, handle: ShmHandle, debugger_type: DebuggerType, ) { - let session = self.get_session(&instance_id.session_id); + self.track_instance(&instance_id); + let session = self.server.get_session(&instance_id.session_id); match handle.map() { Ok(mapped) => { session.send_debugger_data( @@ -751,19 +793,21 @@ impl SidecarServer { } } - fn send_debugger_diagnostics_impl( + async fn send_debugger_diagnostics( &self, + _peer: PeerCredentials, instance_id: InstanceId, queue_id: QueueId, diagnostics_payload: Vec, ) { - let session = self.get_session(&instance_id.session_id); + self.track_instance(&instance_id); + let session = self.server.get_session(&instance_id.session_id); #[allow(clippy::unwrap_used)] let payload = serde_json::from_slice(diagnostics_payload.as_slice()).unwrap(); // We segregate RC by endpoint. // So we assume that runtime ids are unique per endpoint and we can safely filter globally. #[allow(clippy::unwrap_used)] - if self.debugger_diagnostics_bookkeeper.add_payload(&payload) { + if self.server.debugger_diagnostics_bookkeeper.add_payload(&payload) { session.send_debugger_data( DebuggerType::Diagnostics, &instance_id.runtime_id, @@ -773,15 +817,21 @@ impl SidecarServer { } } - fn acquire_exception_hash_rate_limiter_impl(&self, exception_hash: u64, granularity: Duration) { + async fn acquire_exception_hash_rate_limiter( + &self, + _peer: PeerCredentials, + exception_hash: u64, + granularity: Duration, + ) { EXCEPTION_HASH_LIMITER .lock_or_panic() .add(exception_hash, granularity); } #[allow(clippy::too_many_arguments)] - fn set_universal_service_tags_impl( + async fn set_universal_service_tags( &self, + _peer: PeerCredentials, instance_id: InstanceId, queue_id: QueueId, service_name: String, @@ -790,18 +840,19 @@ impl SidecarServer { global_tags: Vec, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { + self.track_instance(&instance_id); debug!("Registered remote config metadata: instance {instance_id:?}, queue_id: {queue_id:?}, service: {service_name}, env: {env_name}, version: {app_version}"); - let session = self.get_session(&instance_id.session_id); + let session = self.server.get_session(&instance_id.session_id); let runtime_info = session.get_runtime(&instance_id.runtime_id); let mut applications = runtime_info.lock_applications(); let app = applications.entry(queue_id).or_default(); app.set_metadata(env_name, app_version, service_name, global_tags); - let Some(notify_target) = self.get_notify_target(&session) else { + let Some(notify_target) = self.server.get_notify_target(&session) else { return; }; app.update_remote_config( - &self.remote_configs, + &self.server.remote_configs, &session, instance_id, notify_target, @@ -809,21 +860,23 @@ impl SidecarServer { ); } - fn set_request_config_impl( + async fn set_request_config( &self, + _peer: PeerCredentials, instance_id: InstanceId, queue_id: QueueId, dynamic_instrumentation_state: DynamicInstrumentationConfigState, ) { - let session = self.get_session(&instance_id.session_id); + self.track_instance(&instance_id); + let session = self.server.get_session(&instance_id.session_id); let runtime_info = session.get_runtime(&instance_id.runtime_id); let mut applications = runtime_info.lock_applications(); let app = applications.entry(queue_id).or_default(); - let Some(notify_target) = self.get_notify_target(&session) else { + let Some(notify_target) = self.server.get_notify_target(&session) else { return; }; app.update_remote_config( - &self.remote_configs, + &self.server.remote_configs, &session, instance_id, notify_target, @@ -831,12 +884,14 @@ impl SidecarServer { ); } - fn send_dogstatsd_actions_impl( + async fn send_dogstatsd_actions( &self, + _peer: PeerCredentials, instance_id: InstanceId, actions: Vec, ) { - let server = self.clone(); + self.track_instance(&instance_id); + let server = self.server.clone(); tokio::spawn(async move { server .get_session(&instance_id.session_id) @@ -846,15 +901,20 @@ impl SidecarServer { }); } - async fn flush_traces_impl(&self) { - let flusher = self.trace_flusher.clone(); + async fn flush_traces(&self, _peer: PeerCredentials) { + let flusher = self.server.trace_flusher.clone(); if let Err(e) = tokio::spawn(async move { flusher.flush().await }).await { error!("Failed flushing traces: {e:?}"); } } - fn set_test_session_token_impl(&self, session_id: String, token: String) { - let session = self.get_session(&session_id); + async fn set_test_session_token( + &self, + _peer: PeerCredentials, + token: String, + ) { + let session_id = self.session_id.get().cloned().unwrap_or_default(); + let session = self.server.get_session(&session_id); let token = if token.is_empty() { None } else { @@ -873,231 +933,16 @@ impl SidecarServer { // }); } - async fn dump_impl(&self) -> String { - crate::dump::dump().await - } - - async fn stats_impl(&self) -> String { - let stats = self.compute_stats().await; - #[allow(clippy::expect_used)] - simd_json::serde::to_string(&stats).expect("unable to serialize stats to string") - } -} - -impl SidecarInterface for ConnectionSidecarHandler { - fn recv_counter(&self) -> &AtomicU64 { - &self.server.submitted_payloads - } - - async fn enqueue_actions( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - actions: Vec, - ) { - self.track_instance(&instance_id); - let metrics_registrations = self.metric_registrations.lock_or_panic().clone(); - self.server - .enqueue_actions_impl(instance_id, queue_id, actions, &metrics_registrations); - } - - async fn clear_queue_id( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - ) { - self.track_instance(&instance_id); - let rt_info = self.server.get_runtime(&instance_id); - let mut applications = rt_info.lock_applications(); - if let Entry::Occupied(entry) = applications.entry(queue_id) { - info!("Removing queue_id {queue_id:?} from instance {instance_id:?}"); - entry.remove(); - } - } - - async fn register_telemetry_metric( - &self, - _peer: PeerCredentials, - metric: MetricContext, - ) { - self.metric_registrations - .lock_or_panic() - .entry(metric.name.clone()) - .or_insert(metric); - } - - async fn set_session_config( - &self, - peer: PeerCredentials, - session_id: String, - #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, - config: SessionConfig, - is_fork: bool, - ) { - self.track_session(&session_id); - self.server - .set_session_config_impl( - session_id, - peer.pid, - #[cfg(windows)] - remote_config_notify_function, - config, - is_fork, - ) - .await; - } - - async fn set_session_process_tags( - &self, - _peer: PeerCredentials, - session_id: String, - process_tags: String, - ) { - self.track_session(&session_id); - self.server - .set_session_process_tags_impl(session_id, process_tags); - } - - async fn shutdown_runtime(&self, _peer: PeerCredentials, instance_id: InstanceId) { - self.track_instance(&instance_id); - self.server.shutdown_runtime_impl(instance_id); - } - - async fn shutdown_session(&self, _peer: PeerCredentials, session_id: String) { - self.track_session(&session_id); - self.server.shutdown_session_impl(session_id); - } - - async fn send_trace_v04_shm( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - handle: ShmHandle, - len: usize, - headers: SerializedTracerHeaderTags, - ) { - self.track_instance(&instance_id); - self.server - .send_trace_v04_shm_impl(instance_id, handle, len, headers); - } - - async fn send_trace_v04_bytes( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - data: Vec, - headers: SerializedTracerHeaderTags, - ) { - self.track_instance(&instance_id); - self.server - .send_trace_v04_bytes_impl(instance_id, data, headers); - } - - async fn send_debugger_data_shm( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - handle: ShmHandle, - debugger_type: DebuggerType, - ) { - self.track_instance(&instance_id); - self.server - .send_debugger_data_shm_impl(instance_id, queue_id, handle, debugger_type); - } - - async fn send_debugger_diagnostics( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - diagnostics_payload: Vec, - ) { - self.track_instance(&instance_id); - self.server - .send_debugger_diagnostics_impl(instance_id, queue_id, diagnostics_payload); - } - - async fn acquire_exception_hash_rate_limiter( - &self, - _peer: PeerCredentials, - exception_hash: u64, - granularity: Duration, - ) { - self.server - .acquire_exception_hash_rate_limiter_impl(exception_hash, granularity); - } - - async fn set_universal_service_tags( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - service_name: String, - env_name: String, - app_version: String, - global_tags: Vec, - dynamic_instrumentation_state: DynamicInstrumentationConfigState, - ) { - self.track_instance(&instance_id); - self.server.set_universal_service_tags_impl( - instance_id, - queue_id, - service_name, - env_name, - app_version, - global_tags, - dynamic_instrumentation_state, - ); - } - - async fn set_request_config( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - queue_id: QueueId, - dynamic_instrumentation_state: DynamicInstrumentationConfigState, - ) { - self.track_instance(&instance_id); - self.server - .set_request_config_impl(instance_id, queue_id, dynamic_instrumentation_state); - } - - async fn send_dogstatsd_actions( - &self, - _peer: PeerCredentials, - instance_id: InstanceId, - actions: Vec, - ) { - self.track_instance(&instance_id); - self.server - .send_dogstatsd_actions_impl(instance_id, actions); - } - - async fn flush_traces(&self, _peer: PeerCredentials) { - self.server.flush_traces_impl().await; - } - - async fn set_test_session_token( - &self, - _peer: PeerCredentials, - session_id: String, - token: String, - ) { - self.track_session(&session_id); - self.server.set_test_session_token_impl(session_id, token); - } - async fn ping(&self, _peer: PeerCredentials) {} async fn dump(&self, _peer: PeerCredentials) -> String { - self.server.dump_impl().await + crate::dump::dump().await } async fn stats(&self, _peer: PeerCredentials) -> String { - self.server.stats_impl().await + let stats = self.server.compute_stats().await; + #[allow(clippy::expect_used)] + simd_json::serde::to_string(&stats).expect("unable to serialize stats to string") } } From 654bda0a48a8b6ecce0c8d3e9ca18631b3134802 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 13 Mar 2026 23:51:27 +0100 Subject: [PATCH 15/29] Cleanup windows pid shm stuff Signed-off-by: Bob Weinand --- datadog-ipc/src/platform/windows/sockets.rs | 8 ++++++- datadog-sidecar/src/setup/windows.rs | 6 ------ datadog-sidecar/src/windows.rs | 24 ++------------------- 3 files changed, 9 insertions(+), 29 deletions(-) diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index c61fb0a771..52194809b8 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -37,6 +37,7 @@ use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; use winapi::shared::minwindef::ULONG; use winapi::shared::winerror::ERROR_PIPE_CONNECTED; use winapi::um::handleapi::{CloseHandle, DuplicateHandle, INVALID_HANDLE_VALUE}; +use winapi::um::minwinbase::SECURITY_ATTRIBUTES; use winapi::um::processthreadsapi::{GetCurrentProcess, GetCurrentProcessId, OpenProcess}; use winapi::um::winbase::{GetNamedPipeClientProcessId, GetNamedPipeServerProcessId}; use winapi::um::winnt::{DUPLICATE_SAME_ACCESS, HANDLE, PROCESS_DUP_HANDLE}; @@ -241,6 +242,11 @@ fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result() as u32, + lpSecurityDescriptor: null_mut(), + bInheritHandle: 1, // We want this one to be inherited + }; CreateNamedPipeA( name.as_ptr(), open_mode, @@ -249,7 +255,7 @@ fn create_pipe_server(name: &[u8], first_instance: bool) -> io::Result CString { - #[allow(clippy::unwrap_used)] - CString::new(&pipe_path[PIPE_PATH.len() - 1..]).unwrap() -} #[cfg(test)] mod tests { diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index c1ed17aad9..e516e5bf1d 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -2,10 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::enter_listener_loop; -use crate::setup::pid_shm_path; -use datadog_ipc::platform::{ - named_pipe_name_from_raw_handle, FileBackedHandle, MappedMem, NamedShmHandle, -}; +use datadog_ipc::platform::named_pipe_name_from_raw_handle; use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use futures::FutureExt; @@ -62,19 +59,6 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { let pid = unsafe { libc::getpid() }; if let Some(handle) = spawn_worker::recv_passed_handle() { - let mut shm = match named_pipe_name_from_raw_handle(handle.as_raw_handle()) - .ok_or(io::Error::from(io::ErrorKind::InvalidInput)) - .and_then(|name| NamedShmHandle::create(pid_shm_path(&name), 4)) - .and_then(FileBackedHandle::map) - { - Ok(ok) => ok, - Err(err) => { - error!("Couldn't store pid to shared memory: {err}"); - return; - } - }; - shm.as_slice_mut().copy_from_slice(&pid.to_ne_bytes()); - info!("Starting sidecar, pid: {}", pid); let acquire_listener = move || { @@ -88,10 +72,8 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { } }; - // We pass the shm to ensure we drop the shm handle with the pid immediately after - // cancellation To avoid actual race conditions Ok(( - |handler| accept_socket_loop(listener, closed_future, handler, shm), + |handler| accept_socket_loop(listener, closed_future, handler), cancel, )) }; @@ -112,7 +94,6 @@ async fn accept_socket_loop( listener: SeqpacketListener, cancellation: ManualFuture<()>, handler: Box, - _: MappedMem, ) -> io::Result<()> { // Wrap the first server instance as a Tokio NamedPipeServer for async connect polling. // After each accepted connection we create a fresh Tokio server for the next client. @@ -144,7 +125,6 @@ async fn accept_socket_loop( let conn = SeqpacketConn::from_server_handle(owned, client_pid); handler(conn); } - // drops pipe and shm here Ok(()) } From 194d602b8ee2fc73d42fe1412767f7717f2a2dcb Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 16 Mar 2026 19:58:11 +0100 Subject: [PATCH 16/29] Fixup windows --- datadog-ipc/src/example_interface.rs | 32 +++- datadog-ipc/src/platform/windows/handles.rs | 15 +- datadog-ipc/src/platform/windows/sockets.rs | 175 +++++++++++--------- datadog-ipc/tests/windows_shm.rs | 75 +++++++++ datadog-sidecar/src/setup/windows.rs | 7 +- datadog-sidecar/src/windows.rs | 39 ++--- 6 files changed, 228 insertions(+), 115 deletions(-) create mode 100644 datadog-ipc/tests/windows_shm.rs diff --git a/datadog-ipc/src/example_interface.rs b/datadog-ipc/src/example_interface.rs index 81fe45401f..670eff4415 100644 --- a/datadog-ipc/src/example_interface.rs +++ b/datadog-ipc/src/example_interface.rs @@ -9,7 +9,7 @@ use std::{ time::{Duration, Instant}, }; -use super::platform::PlatformHandle; +use super::platform::{FileBackedHandle, PlatformHandle, ShmHandle}; extern crate self as datadog_ipc; @@ -21,6 +21,13 @@ pub trait ExampleInterface { async fn time_now() -> Duration; async fn req_cnt() -> u32; async fn store_file(#[SerializedHandle] file: PlatformHandle); + /// Receives a shared memory handle, maps it, and returns the sum of the first `len` bytes. + /// Used to verify cross-process handle transfer (Windows DuplicateHandle / Unix SCM_RIGHTS). + async fn shm_sum(#[SerializedHandle] handle: ShmHandle, len: usize) -> u64; + /// Receives a byte payload and returns its length. + /// Used to verify that messages larger than mio's 4 KB internal read buffer are handled + /// correctly (no ERROR_MORE_DATA panic). + async fn echo_len(payload: Vec) -> u32; } #[derive(Default, Clone)] @@ -29,7 +36,6 @@ pub struct ExampleServer { stored_files: Arc>>>, } -#[cfg(unix)] impl ExampleServer { pub async fn accept_connection(self, conn: crate::SeqpacketConn) { serve_example_interface_connection(conn, Arc::new(self)).await @@ -78,4 +84,26 @@ impl ExampleInterface for ExampleServer { self.stored_files.lock().unwrap().push(file); std::future::ready(()) } + + fn shm_sum( + &self, + _peer: datadog_ipc::PeerCredentials, + handle: ShmHandle, + len: usize, + ) -> impl std::future::Future + Send + '_ { + async move { + match handle.map() { + Ok(mapped) => mapped.as_slice()[..len].iter().map(|&b| b as u64).sum(), + Err(_) => u64::MAX, + } + } + } + + fn echo_len( + &self, + _peer: datadog_ipc::PeerCredentials, + payload: Vec, + ) -> impl std::future::Future + Send + '_ { + std::future::ready(payload.len() as u32) + } } diff --git a/datadog-ipc/src/platform/windows/handles.rs b/datadog-ipc/src/platform/windows/handles.rs index 3bfa17a83c..99adf37667 100644 --- a/datadog-ipc/src/platform/windows/handles.rs +++ b/datadog-ipc/src/platform/windows/handles.rs @@ -6,16 +6,19 @@ use crate::platform::PlatformHandle; use std::collections::VecDeque; use std::os::windows::io::{FromRawHandle, IntoRawHandle, OwnedHandle}; -/// No-op sink — Windows handles are transferred in-band via message suffix, not out-of-band. -pub struct FdSink; +/// Collects Windows handles to be transferred in-band in the message suffix. +/// +/// `copy_handle` records each handle's raw value; `into_fds` returns them so +/// `append_handle_suffix` can duplicate them into the peer process before sending. +pub struct FdSink(Vec); impl FdSink { pub fn new() -> Self { - FdSink + FdSink(Vec::new()) } pub fn into_fds(self) -> Vec { - Vec::new() + self.0 } } @@ -28,7 +31,9 @@ impl Default for FdSink { impl HandlesTransport for &mut FdSink { type Error = std::convert::Infallible; - fn copy_handle(self, _handle: PlatformHandle) -> Result<(), Self::Error> { + fn copy_handle(self, handle: PlatformHandle) -> Result<(), Self::Error> { + use std::os::windows::io::AsRawHandle; + self.0.push(handle.as_raw_handle()); Ok(()) } diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 52194809b8..80df221384 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -31,7 +31,6 @@ use std::sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Mutex, }; -use tokio::net::windows::named_pipe::{NamedPipeClient, NamedPipeServer}; // winapi – only used for things not cleanly available in windows-sys use winapi::shared::minwindef::ULONG; @@ -286,8 +285,9 @@ fn make_overlapped(event: SysHANDLE) -> OVERLAPPED { /// A named-pipe server that accepts message-mode IPC connections. /// /// `try_accept` swaps the connected pipe instance for a fresh server instance so the listener -/// remains ready for the next client. Interior mutability (`Mutex`) allows `&self` in -/// `try_accept`. +/// remains ready for the next client. `accept_blocking` does the same but blocks until a client +/// connects (polling `try_accept` with a short sleep). Interior mutability (`Mutex`) allows +/// `&self` in both methods. pub struct SeqpacketListener { inner: Mutex, name: Vec, // NUL-terminated ANSI pipe name, e.g. `\\.\\pipe\\…` @@ -400,15 +400,52 @@ impl SeqpacketListener { // Swap: the connected handle goes to the SeqpacketConn; the fresh server replaces it. let conn_handle = std::mem::replace(&mut *guard, new_server); + // PID handshake: write our PID to the client so it can correctly DuplicateHandle into us. + // + // The named pipe creator is determined by who calls CreateNamedPipeA. When PHP creates the + // listener and passes it to the sidecar, GetNamedPipeServerProcessId on the client side + // returns PHP's own PID — not the sidecar's — causing DuplicateHandle to target the wrong + // process. This one-shot 4-byte message lets the client discover the actual acceptor PID + // before sending any handles. + let my_pid = unsafe { GetCurrentProcessId() }; + let pid_bytes = my_pid.to_le_bytes(); + let mut written: u32 = 0; + unsafe { + WriteFile( + conn_handle.as_raw_handle() as SysHANDLE, + pid_bytes.as_ptr() as _, + 4, + &mut written, + null_mut(), + ) + }; + Ok(SeqpacketConn { - inner: conn_handle, + handle: conn_handle, peer_pid: client_pid, - is_server: true, read_timeout: None, write_timeout: None, }) } + /// Block until a client connects and return the accepted connection. + /// + /// Polls `try_accept` in a loop with a short sleep so that callers running on a + /// `spawn_blocking` thread do not spin. Because this does not go through Tokio's + /// IOCP reactor, the accepted handle is a raw synchronous handle with **no** pending + /// overlapped I/O — safe to use with `block_in_place` reads. + pub fn accept_blocking(&self) -> io::Result { + loop { + match self.try_accept() { + Ok(conn) => return Ok(conn), + Err(e) if e.kind() == io::ErrorKind::WouldBlock => { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + Err(e) => return Err(e), + } + } + } + pub fn as_raw_handle(&self) -> RawHandle { self.inner .lock() @@ -434,11 +471,8 @@ impl IntoRawHandle for SeqpacketListener { /// A connected named pipe providing message-boundary-preserving IPC. pub struct SeqpacketConn { - pub(crate) inner: OwnedHandle, + handle: OwnedHandle, peer_pid: u32, - /// True for server-side handles (opened with `FILE_FLAG_OVERLAPPED` via `CreateNamedPipeA`); - /// these can be converted to a Tokio async pipe via `into_async_conn`. - is_server: bool, read_timeout: Option, write_timeout: Option, } @@ -476,15 +510,37 @@ impl SeqpacketConn { let mode = PIPE_READMODE_MESSAGE; unsafe { SetNamedPipeHandleState(h as SysHANDLE, &mode, null(), null()) }; - let inner = unsafe { OwnedHandle::from_raw_handle(h as RawHandle) }; - let mut server_pid: ULONG = 0; - unsafe { - GetNamedPipeServerProcessId(inner.as_raw_handle() as HANDLE, &mut server_pid); - } + // PID handshake: read the 4-byte PID written by try_accept() so that we know the real + // acceptor PID, not the pipe-creator PID returned by GetNamedPipeServerProcessId. + // + // When PHP creates the listener and passes it to the sidecar, GetNamedPipeServerProcessId + // returns PHP's own PID. Using that for DuplicateHandle silently duplicates handles back + // into PHP rather than into the sidecar, causing ERROR_INVALID_HANDLE on the sidecar side. + let mut pid_buf = [0u8; 4]; + let mut read_bytes: u32 = 0; + let pid_ok = unsafe { + ReadFile( + h as SysHANDLE, + pid_buf.as_mut_ptr() as _, + 4, + &mut read_bytes, + null_mut(), + ) + }; + let server_pid: ULONG = if pid_ok != 0 && read_bytes == 4 { + u32::from_le_bytes(pid_buf) + } else { + // Fallback: use GetNamedPipeServerProcessId (returns the creator's PID, which may be + // our own PID if we created the pipe and passed it to the sidecar). + let mut spid: ULONG = 0; + unsafe { GetNamedPipeServerProcessId(h as HANDLE, &mut spid) }; + spid + }; + + let handle = unsafe { OwnedHandle::from_raw_handle(h as RawHandle) }; Ok(Self { - inner, + handle, peer_pid: server_pid, - is_server: false, read_timeout: None, write_timeout: None, }) @@ -518,9 +574,8 @@ impl SeqpacketConn { } let server = Self { - inner: server_handle, + handle: server_handle, peer_pid: pid, - is_server: true, read_timeout: None, write_timeout: None, }; @@ -530,14 +585,17 @@ impl SeqpacketConn { /// Build a `SeqpacketConn` from a server-side pipe handle (after `ConnectNamedPipe`). pub fn from_server_handle(handle: OwnedHandle, client_pid: u32) -> Self { Self { - inner: handle, + handle, peer_pid: client_pid, - is_server: true, read_timeout: None, write_timeout: None, } } + fn raw_handle(&self) -> SysHANDLE { + self.handle.as_raw_handle() as SysHANDLE + } + /// Retrieve the peer process's credentials (pid, uid). pub fn peer_credentials(&self) -> io::Result { Ok(PeerCredentials { @@ -557,7 +615,7 @@ impl SeqpacketConn { data.truncate(orig_len); return Err(e); } - let result = pipe_write(self.inner.as_raw_handle() as SysHANDLE, data, false); + let result = pipe_write(self.raw_handle(), data, false); data.truncate(orig_len); result } @@ -569,7 +627,7 @@ impl SeqpacketConn { data.truncate(orig_len); return Err(e); } - let result = pipe_write(self.inner.as_raw_handle() as SysHANDLE, data, true); + let result = pipe_write(self.raw_handle(), data, true); data.truncate(orig_len); result } @@ -578,18 +636,18 @@ impl SeqpacketConn { /// /// `buf` must be at least `payload_max + HANDLE_SUFFIX_SIZE` bytes. pub fn try_recv_raw(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { - pipe_read(self.inner.as_raw_handle() as SysHANDLE, buf, false) + pipe_read(self.raw_handle(), buf, false) } /// Blocking receive. /// /// `buf` must be at least `payload_max + HANDLE_SUFFIX_SIZE` bytes. pub fn recv_raw_blocking(&self, buf: &mut [u8]) -> io::Result<(usize, Vec)> { - pipe_read(self.inner.as_raw_handle() as SysHANDLE, buf, true) + pipe_read(self.raw_handle(), buf, true) } pub fn as_raw_handle(&self) -> RawHandle { - self.inner.as_raw_handle() + self.raw_handle() as RawHandle } pub fn set_read_timeout(&mut self, d: Option) -> io::Result<()> { @@ -619,30 +677,19 @@ pub fn is_listening>(path: P) -> io::Result { Ok(SeqpacketConn::connect(path).is_ok()) } -/// Internal: wraps either a `NamedPipeServer` or `NamedPipeClient` for dispatch. -enum AsyncPipe { - Server(NamedPipeServer), - Client(NamedPipeClient), -} - -macro_rules! async_pipe { - ($pipe:expr, $method:ident($($args:expr),*)$($trailing:tt)*) => { - match &$pipe { - AsyncPipe::Server(s) => s.$method($($args),*)$($trailing)*, - AsyncPipe::Client(c) => c.$method($($args),*)$($trailing)*, - } - }; -} - /// Async connection type for Windows named-pipe IPC. /// -/// Wraps a Tokio `NamedPipeServer` or `NamedPipeClient` registered with the IOCP reactor, -/// enabling fully async recv/send without blocking any Tokio thread. +/// Wraps a raw synchronous pipe handle; recv/send go through `block_in_place` + +/// `ReadFile`/`WriteFile` with a caller-supplied large buffer, bypassing mio's +/// 4 KB internal IOCP read buffer limit. pub struct AsyncSeqpacketConn { - inner: AsyncPipe, + handle: OwnedHandle, pub(crate) peer_pid: u32, } +// SAFETY: the inner OwnedHandle is not shared. +unsafe impl Send for AsyncSeqpacketConn {} + impl AsyncSeqpacketConn { pub fn peer_credentials(&self) -> io::Result { Ok(PeerCredentials { @@ -656,20 +703,9 @@ pub type AsyncConn = AsyncSeqpacketConn; impl SeqpacketConn { /// Convert to an async connection for use in async server dispatch loops. - /// - /// Requires a running Tokio runtime with IOCP support. - /// Only works for server-side handles (created with `FILE_FLAG_OVERLAPPED` via - /// `CreateNamedPipeA`). Client handles from `connect()` are synchronous and will - /// return an error. pub fn into_async_conn(self) -> io::Result { - let raw = self.inner.into_raw_handle(); - let inner = if self.is_server { - AsyncPipe::Server(unsafe { NamedPipeServer::from_raw_handle(raw)? }) - } else { - AsyncPipe::Client(unsafe { NamedPipeClient::from_raw_handle(raw)? }) - }; Ok(AsyncSeqpacketConn { - inner, + handle: self.handle, peer_pid: self.peer_pid, }) } @@ -677,38 +713,23 @@ impl SeqpacketConn { /// Async receive on a Windows named pipe IPC connection. /// -/// Waits for the pipe to become readable (via IOCP), then reads one complete message. -/// The handle-count suffix is stripped and any transferred handles are returned. +/// Calls `block_in_place` with a direct blocking `ReadFile` into the caller-supplied buffer, +/// bypassing mio's 4 KB internal read buffer and correctly handling messages of any size. pub async fn recv_raw_async( conn: &AsyncConn, buf: &mut [u8], ) -> io::Result<(usize, Vec)> { - loop { - async_pipe!(conn.inner, readable().await)?; - match async_pipe!(conn.inner, try_read(buf)) { - Ok(0) => return Err(io::Error::from(io::ErrorKind::BrokenPipe)), - Ok(n) => return parse_message(buf, n), - Err(e) if e.kind() == io::ErrorKind::WouldBlock => continue, - Err(e) => return Err(e), - } - } + let h = conn.handle.as_raw_handle() as SysHANDLE; + tokio::task::block_in_place(|| pipe_read(h, buf, true)) } /// Async send on a Windows named pipe IPC connection. /// /// Server responses never carry handles; a zero-handle-count suffix is appended automatically. -/// Waits for writability (via IOCP) and writes the message atomically. +/// Uses `block_in_place` with a direct `WriteFile`. pub async fn send_raw_async(conn: &AsyncConn, data: &[u8]) -> io::Result<()> { - // Server responses never carry handles; append a 0-handle-count suffix (4 bytes). let mut buf = data.to_vec(); buf.extend_from_slice(&0u32.to_le_bytes()); - loop { - async_pipe!(conn.inner, writable().await)?; - match async_pipe!(conn.inner, try_write(&buf)) { - Ok(n) if n == buf.len() => return Ok(()), - Ok(_) => return Err(io::Error::from(io::ErrorKind::WriteZero)), - Err(e) if e.kind() == io::ErrorKind::WouldBlock => continue, - Err(e) => return Err(e), - } - } + let h = conn.handle.as_raw_handle() as SysHANDLE; + tokio::task::block_in_place(|| pipe_write(h, &buf, true)) } diff --git a/datadog-ipc/tests/windows_shm.rs b/datadog-ipc/tests/windows_shm.rs new file mode 100644 index 0000000000..1c83c119a6 --- /dev/null +++ b/datadog-ipc/tests/windows_shm.rs @@ -0,0 +1,75 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 +#![cfg(windows)] + +use tokio::runtime; + +use datadog_ipc::example_interface::{ExampleInterfaceChannel, ExampleServer}; +use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; +use datadog_ipc::SeqpacketConn; + +/// Verifies that a `ShmHandle` (Windows named file mapping) can be transferred across an IPC +/// connection via `DuplicateHandle`-based in-band handle passing, and that the receiving side +/// can successfully map the memory and read the data written by the sender. +#[test] +fn test_shm_handle_transfer() { + let (conn_server, conn_client) = SeqpacketConn::socketpair().unwrap(); + + let rt = runtime::Builder::new_multi_thread() + .worker_threads(1) + .enable_all() + .build() + .unwrap(); + rt.spawn({ + let server = ExampleServer::default(); + async move { server.accept_connection(conn_server).await } + }); + + let mut channel = ExampleInterfaceChannel::new(conn_client); + + // Allocate shared memory and write a known pattern into it. + let shm = ShmHandle::new(4096).unwrap(); + let mut mapped = shm.clone().map().unwrap(); + let payload: Vec = (0u8..32).collect(); + mapped.as_slice_mut()[..32].copy_from_slice(&payload); + + // Transfer the ShmHandle to the server via IPC and ask it to sum the first 32 bytes. + let expected_sum: u64 = payload.iter().map(|&b| b as u64).sum(); + let received_sum = channel.call_shm_sum(shm, 32).unwrap(); + + assert_ne!(received_sum, u64::MAX, "shm mapping failed on server side"); + assert_eq!(received_sum, expected_sum); +} + +/// Verifies that IPC messages larger than 4 KB are handled without panicking. +/// +/// Before the fix, Tokio's `NamedPipeServer` registered the pipe handle with mio/IOCP, which +/// posted overlapped `ReadFile` calls into a fixed 4 KB internal buffer. Messages larger than +/// 4 KB caused `ReadFile` to return `ERROR_MORE_DATA` synchronously; Windows still queued an +/// IOCP completion, but mio had already transitioned `io.read` to `State::Err`. When the +/// completion fired, mio's `read_done` hit `_ => unreachable!()` (named_pipe.rs:871). +/// +/// The fix routes serve-loop I/O through `block_in_place` + direct `ReadFile` into the +/// caller-supplied large buffer, bypassing mio's 4 KB limit entirely. +#[test] +fn test_large_message() { + let (conn_server, conn_client) = SeqpacketConn::socketpair().unwrap(); + + let rt = runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_all() + .build() + .unwrap(); + rt.spawn({ + let server = ExampleServer::default(); + async move { server.accept_connection(conn_server).await } + }); + + let mut channel = ExampleInterfaceChannel::new(conn_client); + + // Send a 64 KB payload — well above mio's 4 KB internal read-buffer limit. + let payload: Vec = (0u8..=255).cycle().take(64 * 1024).collect(); + let expected_len = payload.len() as u32; + let received_len = channel.call_echo_len(payload).unwrap(); + assert_eq!(received_len, expected_len); +} diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index a826423e2c..483e1fbc18 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -93,16 +93,19 @@ mod tests { // can't listen twice when some listener is active assert!(liaison.attempt_listen().unwrap().is_none()); + // try_accept() must run concurrently with connect_to_server() because connect() + // blocks reading the 4-byte PID handshake that try_accept() writes after accepting. + let srv_thread = std::thread::spawn(move || listener.try_accept().unwrap()); let client: SeqpacketConn = liaison.connect_to_server().unwrap(); - let srv: SeqpacketConn = listener.try_accept().unwrap(); + let srv: SeqpacketConn = srv_thread.join().unwrap(); client.send_raw_blocking(&mut vec![255], &[]).unwrap(); let mut buf = vec![0u8; datadog_ipc::max_message_size() + datadog_ipc::HANDLE_SUFFIX_SIZE]; let (n, _) = srv.recv_raw_blocking(&mut buf).unwrap(); assert_eq!(n, 1); assert_eq!(buf[0], 255); - drop(listener); drop(client); + // listener was moved into srv_thread and is dropped when the thread completes } // we should be able to open a new listener now diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index e516e5bf1d..a141030491 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use crate::enter_listener_loop; -use datadog_ipc::platform::named_pipe_name_from_raw_handle; use datadog_ipc::{SeqpacketConn, SeqpacketListener}; use futures::FutureExt; @@ -14,16 +13,13 @@ use manual_future::ManualFuture; use spawn_worker::{write_crashtracking_trampoline, SpawnWorker, Stdio, TrampolineData}; use std::ffi::CStr; use std::io::{self, Error}; -use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle}; +use std::os::windows::io::{FromRawHandle, IntoRawHandle, OwnedHandle}; use std::ptr::null_mut; use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::time::Instant; -use tokio::net::windows::named_pipe::{NamedPipeServer, ServerOptions}; use tokio::select; use tracing::{error, info}; -use winapi::shared::minwindef::ULONG; -use winapi::um::winbase::GetNamedPipeClientProcessId; use winapi::um::winnt::HANDLE; use winapi::{ shared::{ @@ -95,35 +91,20 @@ async fn accept_socket_loop( cancellation: ManualFuture<()>, handler: Box, ) -> io::Result<()> { - // Wrap the first server instance as a Tokio NamedPipeServer for async connect polling. - // After each accepted connection we create a fresh Tokio server for the next client. - let name = named_pipe_name_from_raw_handle(listener.as_raw_handle()) - .ok_or(io::Error::from(io::ErrorKind::InvalidInput))?; - - // Transfer the listener's handle into a Tokio NamedPipeServer. - let mut pipe = unsafe { NamedPipeServer::from_raw_handle(listener.into_raw_handle()) }?; - + // Use spawn_blocking + accept_blocking so the accepted handle has no pending overlapped + // I/O registered with mio/IOCP. recv_raw_async/send_raw_async then use block_in_place + // with a caller-supplied large buffer, bypassing mio's 4 KB internal ReadFile buffer + // (which would cause ERROR_MORE_DATA → unreachable!() panic for larger messages). + let listener = Arc::new(listener); let cancellation = cancellation.shared(); loop { + let listener_clone = Arc::clone(&listener); select! { _ = cancellation.clone() => break, - result = pipe.connect() => result?, + result = tokio::task::spawn_blocking(move || listener_clone.accept_blocking()) => { + handler(result??); + } } - let connected_pipe = pipe; - pipe = ServerOptions::new() - .pipe_mode(tokio::net::windows::named_pipe::PipeMode::Message) - .create(&name)?; - - // Convert the connected NamedPipeServer into a SeqpacketConn. - let raw = connected_pipe.as_raw_handle(); - let mut client_pid: ULONG = 0; - unsafe { GetNamedPipeClientProcessId(raw as HANDLE, &mut client_pid) }; - // Transfer ownership: forget the Tokio wrapper (which doesn't implement IntoRawHandle) - // and take the handle ourselves. - std::mem::forget(connected_pipe); - let owned = unsafe { OwnedHandle::from_raw_handle(raw) }; - let conn = SeqpacketConn::from_server_handle(owned, client_pid); - handler(conn); } Ok(()) } From daf0713c1ecac31d183cd21a9e9fd9ee7397d92e Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 16 Mar 2026 21:58:20 +0100 Subject: [PATCH 17/29] Dynamically allocate receive buffer to avoid unbounded committed memory growth Signed-off-by: Bob Weinand --- datadog-ipc-macros/src/lib.rs | 5 ++--- datadog-ipc/src/platform/unix/sockets/mod.rs | 17 ++++++++++++++--- datadog-ipc/src/platform/windows/sockets.rs | 17 +++++++++++------ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index dbfad9a90b..b26312b667 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -319,9 +319,8 @@ fn gen_serve_fn( return; } }; - let mut buf = vec![0u8; datadog_ipc::max_message_size() + datadog_ipc::HANDLE_SUFFIX_SIZE]; loop { - let (n, fds) = match datadog_ipc::recv_raw_async(&async_fd, &mut buf).await { + let (buf, fds) = match datadog_ipc::recv_raw_async(&async_fd).await { Ok(x) => x, Err(e) => { ::tracing::trace!("IPC serve: recv (connection closed?): {e}"); @@ -329,7 +328,7 @@ fn gen_serve_fn( } }; let Ok((discriminant, mut req)) = - datadog_ipc::codec::decode::<#enum_name>(&buf[..n]) + datadog_ipc::codec::decode::<#enum_name>(&buf) else { ::tracing::warn!("IPC serve: failed to decode request"); break; diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index 8062cc3373..eb5b59802b 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -348,12 +348,23 @@ pub type AsyncConn = AsyncFd; /// Async receive on a Tokio `AsyncFd`-wrapped IPC connection. /// +/// Allocates a buffer sized to `max_message_size()` per call and returns only the received +/// bytes (truncated), so no large buffer is held between receives. +/// /// Used by the server dispatch loop (generated by `#[service]` macro). -pub async fn recv_raw_async(fd: &AsyncConn, buf: &mut [u8]) -> io::Result<(usize, Vec)> { +pub async fn recv_raw_async(fd: &AsyncConn) -> io::Result<(Vec, Vec)> { loop { let mut guard = fd.readable().await?; - match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), buf, MsgFlags::empty())) { - Ok(result) => return result, + // SAFETY: recvmsg writes exactly the first n bytes; we truncate to n before returning, + // so no uninitialized bytes are ever exposed to the caller. + let mut buf = Vec::with_capacity(max_message_size()); + unsafe { buf.set_len(max_message_size()) }; + match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), &mut buf, MsgFlags::empty())) { + Ok(Ok((n, fds))) => { + buf.truncate(n); + return Ok((buf, fds)); + } + Ok(Err(e)) => return Err(e), Err(_would_block) => continue, } } diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 80df221384..838b788a15 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -713,14 +713,19 @@ impl SeqpacketConn { /// Async receive on a Windows named pipe IPC connection. /// -/// Calls `block_in_place` with a direct blocking `ReadFile` into the caller-supplied buffer, +/// Calls `block_in_place` with a direct blocking `ReadFile` into a caller-owned buffer, /// bypassing mio's 4 KB internal read buffer and correctly handling messages of any size. -pub async fn recv_raw_async( - conn: &AsyncConn, - buf: &mut [u8], -) -> io::Result<(usize, Vec)> { +pub async fn recv_raw_async(conn: &AsyncConn) -> io::Result<(Vec, Vec)> { let h = conn.handle.as_raw_handle() as SysHANDLE; - tokio::task::block_in_place(|| pipe_read(h, buf, true)) + tokio::task::block_in_place(|| { + // SAFETY: ReadFile writes exactly the first n bytes; we truncate to n before returning, + // so no uninitialized bytes are ever exposed to the caller. + let mut buf = Vec::with_capacity(max_message_size() + HANDLE_SUFFIX_SIZE); + unsafe { buf.set_len(max_message_size() + HANDLE_SUFFIX_SIZE) }; + let (n, handles) = pipe_read(h, &mut buf, true)?; + buf.truncate(n); + Ok((buf, handles)) + }) } /// Async send on a Windows named pipe IPC connection. From f7c604e1d15d0f64052015ee8e072282e4564311 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 16 Mar 2026 22:55:41 +0100 Subject: [PATCH 18/29] Fmt Signed-off-by: Bob Weinand --- LICENSE-3rdparty.yml | 579 +----------------- datadog-ipc-macros/src/lib.rs | 9 +- datadog-ipc/src/example_interface.rs | 12 +- datadog-ipc/src/lib.rs | 2 +- .../src/platform/unix/sockets/linux.rs | 2 +- datadog-ipc/src/platform/unix/sockets/mod.rs | 16 +- datadog-ipc/src/platform/windows/sockets.rs | 16 +- datadog-ipc/tests/windows_shm.rs | 7 +- datadog-sidecar/src/service/blocking.rs | 21 +- datadog-sidecar/src/service/sender.rs | 10 +- .../src/service/sidecar_interface.rs | 6 +- datadog-sidecar/src/service/sidecar_server.rs | 65 +- datadog-sidecar/src/setup/windows.rs | 1 - 13 files changed, 77 insertions(+), 669 deletions(-) diff --git a/LICENSE-3rdparty.yml b/LICENSE-3rdparty.yml index db7415cf26..267cfa91d5 100644 --- a/LICENSE-3rdparty.yml +++ b/LICENSE-3rdparty.yml @@ -1,4 +1,4 @@ -root_name: builder, build_common, tools, libdd-alloc, libdd-crashtracker, libdd-common, libdd-telemetry, libdd-ddsketch, libdd-libunwind-sys, libdd-crashtracker-ffi, libdd-common-ffi, datadog-ffe, datadog-ffe-ffi, datadog-ipc, datadog-ipc-macros, libdd-tinybytes, tarpc, tarpc-plugins, spawn_worker, cc_utils, libdd-library-config, libdd-trace-protobuf, libdd-library-config-ffi, datadog-live-debugger, libdd-data-pipeline, libdd-dogstatsd-client, libdd-trace-stats, libdd-trace-utils, libdd-trace-normalization, libdd-log, datadog-live-debugger-ffi, libdd-profiling, libdd-profiling-protobuf, libdd-profiling-ffi, libdd-data-pipeline-ffi, libdd-ddsketch-ffi, libdd-log-ffi, libdd-telemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, datadog-remote-config, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, libdd-trace-obfuscation, datadog-tracer-flare, sidecar_mockgen, test_spawn_from_lib +root_name: builder, build_common, tools, libdd-alloc, libdd-crashtracker, libdd-common, libdd-telemetry, libdd-ddsketch, libdd-libunwind-sys, libdd-crashtracker-ffi, libdd-common-ffi, datadog-ffe, datadog-ffe-ffi, datadog-ipc, datadog-ipc-macros, libdd-tinybytes, spawn_worker, cc_utils, libdd-library-config, libdd-trace-protobuf, libdd-library-config-ffi, datadog-live-debugger, libdd-data-pipeline, libdd-dogstatsd-client, libdd-trace-stats, libdd-trace-utils, libdd-trace-normalization, libdd-log, datadog-live-debugger-ffi, libdd-profiling, libdd-profiling-protobuf, libdd-profiling-ffi, libdd-data-pipeline-ffi, libdd-ddsketch-ffi, libdd-log-ffi, libdd-telemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, datadog-remote-config, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, libdd-trace-obfuscation, datadog-tracer-flare, sidecar_mockgen, test_spawn_from_lib third_party_libraries: - package_name: addr2line package_version: 0.24.2 @@ -10110,34 +10110,6 @@ third_party_libraries: of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS -- package_name: educe - package_version: 0.4.23 - repository: https://github.com/magiclen/educe - license: MIT - licenses: - - license: MIT - text: | - MIT License - - Copyright (c) 2018 magiclen.org (Ron Li) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - package_name: either package_version: 1.13.0 repository: https://github.com/rayon-rs/either @@ -10641,34 +10613,6 @@ third_party_libraries: See the License for the specific language governing permissions and limitations under the License. -- package_name: enum-ordinalize - package_version: 3.1.15 - repository: https://github.com/magiclen/enum-ordinalize - license: MIT - licenses: - - license: MIT - text: | - MIT License - - Copyright (c) 2018 magiclen.org (Ron Li) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - package_name: equivalent package_version: 1.0.1 repository: https://github.com/cuviper/equivalent @@ -14135,243 +14079,6 @@ third_party_libraries: LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -- package_name: humantime - package_version: 2.1.0 - repository: https://github.com/tailhook/humantime - license: MIT/Apache-2.0 - licenses: - - license: MIT - text: | - Copyright (c) 2016 The humantime Developers - - Includes parts of http date with the following copyright: - Copyright (c) 2016 Pyfisch - - Includes portions of musl libc with the following copyright: - Copyright © 2005-2013 Rich Felker - - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - - license: Apache-2.0 - text: |2+ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - package_name: hyper package_version: 1.6.0 repository: https://github.com/hyperium/hyper @@ -19885,40 +19592,6 @@ third_party_libraries: licenses: - license: MIT text: "The MIT License (MIT)\r\n\r\nCopyright (c) 2014 Benjamin Sago\r\nCopyright (c) 2021-2022 The Nushell Project Developers\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n" -- package_name: num-bigint - package_version: 0.4.6 - repository: https://github.com/rust-num/num-bigint - license: MIT OR Apache-2.0 - licenses: - - license: MIT - text: | - Copyright (c) 2014 The Rust Project Developers - - Permission is hereby granted, free of charge, to any - person obtaining a copy of this software and associated - documentation files (the "Software"), to deal in the - Software without restriction, including without - limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software - is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice - shall be included in all copies or substantial portions - of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF - ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED - TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR - IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - license: Apache-2.0 - text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" - package_name: num-conv package_version: 0.1.0 repository: https://github.com/jhpratt/num-conv @@ -20183,40 +19856,6 @@ third_party_libraries: DEALINGS IN THE SOFTWARE. - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" -- package_name: num-integer - package_version: 0.1.46 - repository: https://github.com/rust-num/num-integer - license: MIT OR Apache-2.0 - licenses: - - license: MIT - text: | - Copyright (c) 2014 The Rust Project Developers - - Permission is hereby granted, free of charge, to any - person obtaining a copy of this software and associated - documentation files (the "Software"), to deal in the - Software without restriction, including without - limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software - is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice - shall be included in all copies or substantial portions - of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF - ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED - TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR - IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - license: Apache-2.0 - text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" - package_name: num-traits package_version: 0.2.19 repository: https://github.com/rust-num/num-traits @@ -32288,9 +31927,9 @@ third_party_libraries: - package_name: stringmetrics package_version: 2.2.2 repository: https://github.com/pluots/stringmetrics - license: License specified in file ($CARGO_HOME/registry/src/index.crates.io-1949cf8c6b5b557f/stringmetrics-2.2.2/LICENSE) + license: License specified in file ($CARGO_HOME/registry/src/github.com-25cdd57fae9f0462/stringmetrics-2.2.2/LICENSE) licenses: - - license: License specified in file ($CARGO_HOME/registry/src/index.crates.io-1949cf8c6b5b557f/stringmetrics-2.2.2/LICENSE) + - license: License specified in file ($CARGO_HOME/registry/src/github.com-25cdd57fae9f0462/stringmetrics-2.2.2/LICENSE) text: | Copyright 2022 Trevor Gross @@ -35608,218 +35247,6 @@ third_party_libraries: DEALINGS IN THE SOFTWARE. - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright 2017 quininer kel\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" -- package_name: tokio-serde - package_version: 0.8.0 - repository: https://github.com/carllerche/tokio-serde - license: MIT OR Apache-2.0 - licenses: - - license: MIT - text: | - Copyright (c) 2017 Carl Lerche - Copyright (c) 2018 Bastian Köcher - Copyright (c) 2019-2020 Artem Vorotnikov - - Permission is hereby granted, free of charge, to any - person obtaining a copy of this software and associated - documentation files (the "Software"), to deal in the - Software without restriction, including without - limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software - is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice - shall be included in all copies or substantial portions - of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF - ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED - TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR - IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - license: Apache-2.0 - text: |2 - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - package_name: tokio-util package_version: 0.7.12 repository: https://github.com/tokio-rs/tokio diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index b26312b667..cc1de63867 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -130,7 +130,7 @@ fn gen_request_enum(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::T .collect(); quote! { - #[derive(::serde::Serialize, ::serde::Deserialize)] + #[derive(::serde::Serialize, ::serde::Deserialize, Debug)] pub enum #enum_name { #(#variants),* } @@ -341,12 +341,13 @@ fn gen_serve_fn( ::tracing::warn!("IPC serve: failed to receive handles"); break; } - let recv_counter = handler.recv_counter().fetch_add(1, ::std::sync::atomic::Ordering::Relaxed) + 1; - ::tracing::trace!(recv_counter, discriminant, pid = peer.pid, "IPC recv"); + let recv_counter = handler.recv_counter().load(::std::sync::atomic::Ordering::Relaxed) + 1; + ::tracing::trace!(recv_counter, ?req, pid = peer.pid, "IPC recv"); match req { #(#match_arms)* } + handler.recv_counter().fetch_add(1, ::std::sync::atomic::Ordering::Relaxed); } } } @@ -470,7 +471,7 @@ fn gen_channel( } } -/// `#[service]` replaces `#[tarpc::service]` + `#[impl_transfer_handles]`. +/// `#[service]` macro. /// /// Generates from a `trait` definition: /// - `{Trait}Request` enum (Clone, Serialize, Deserialize, TransferHandles) diff --git a/datadog-ipc/src/example_interface.rs b/datadog-ipc/src/example_interface.rs index 670eff4415..8585a41b08 100644 --- a/datadog-ipc/src/example_interface.rs +++ b/datadog-ipc/src/example_interface.rs @@ -85,17 +85,15 @@ impl ExampleInterface for ExampleServer { std::future::ready(()) } - fn shm_sum( + async fn shm_sum( &self, _peer: datadog_ipc::PeerCredentials, handle: ShmHandle, len: usize, - ) -> impl std::future::Future + Send + '_ { - async move { - match handle.map() { - Ok(mapped) => mapped.as_slice()[..len].iter().map(|&b| b as u64).sum(), - Err(_) => u64::MAX, - } + ) -> u64 { + match handle.map() { + Ok(mapped) => mapped.as_slice()[..len].iter().map(|&b| b as u64).sum(), + Err(_) => u64::MAX, } } diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index 047eb7b383..715120f0eb 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -17,7 +17,7 @@ pub mod client; pub mod codec; pub use client::IpcClientConn; -pub use platform::{recv_raw_async, send_raw_async}; pub use platform::{ max_message_size, PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, }; +pub use platform::{recv_raw_async, send_raw_async}; diff --git a/datadog-ipc/src/platform/unix/sockets/linux.rs b/datadog-ipc/src/platform/unix/sockets/linux.rs index 1c94f3736e..e076a12370 100644 --- a/datadog-ipc/src/platform/unix/sockets/linux.rs +++ b/datadog-ipc/src/platform/unix/sockets/linux.rs @@ -34,7 +34,7 @@ impl SeqpacketListener { let addr = UnixAddr::new_abstract(name).map_err(io::Error::from)?; Self::do_bind(addr) } - + fn do_bind(addr: UnixAddr) -> io::Result { let fd = create_seqpacket_socket()?; bind(fd.as_raw_fd(), &addr).map_err(io::Error::from)?; diff --git a/datadog-ipc/src/platform/unix/sockets/mod.rs b/datadog-ipc/src/platform/unix/sockets/mod.rs index eb5b59802b..bed3b15baf 100644 --- a/datadog-ipc/src/platform/unix/sockets/mod.rs +++ b/datadog-ipc/src/platform/unix/sockets/mod.rs @@ -321,7 +321,11 @@ impl SeqpacketConn { std::mem::size_of::() as libc::socklen_t, ) }; - if ret < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } + if ret < 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } } pub fn set_sndbuf_size(&self, size: usize) -> io::Result<()> { @@ -355,13 +359,13 @@ pub type AsyncConn = AsyncFd; pub async fn recv_raw_async(fd: &AsyncConn) -> io::Result<(Vec, Vec)> { loop { let mut guard = fd.readable().await?; - // SAFETY: recvmsg writes exactly the first n bytes; we truncate to n before returning, - // so no uninitialized bytes are ever exposed to the caller. let mut buf = Vec::with_capacity(max_message_size()); - unsafe { buf.set_len(max_message_size()) }; - match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), &mut buf, MsgFlags::empty())) { + // SAFETY: all bit patterns are valid for u8; recvmsg writes exactly n bytes into + // the spare capacity before set_len(n) is called below. + let slice = unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr(), max_message_size()) }; + match guard.try_io(|inner| recvmsg_raw(inner.as_raw_fd(), slice, MsgFlags::empty())) { Ok(Ok((n, fds))) => { - buf.truncate(n); + unsafe { buf.set_len(n) }; return Ok((buf, fds)); } Ok(Err(e)) => return Err(e), diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 838b788a15..784c46ca5f 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -718,12 +718,16 @@ impl SeqpacketConn { pub async fn recv_raw_async(conn: &AsyncConn) -> io::Result<(Vec, Vec)> { let h = conn.handle.as_raw_handle() as SysHANDLE; tokio::task::block_in_place(|| { - // SAFETY: ReadFile writes exactly the first n bytes; we truncate to n before returning, - // so no uninitialized bytes are ever exposed to the caller. - let mut buf = Vec::with_capacity(max_message_size() + HANDLE_SUFFIX_SIZE); - unsafe { buf.set_len(max_message_size() + HANDLE_SUFFIX_SIZE) }; - let (n, handles) = pipe_read(h, &mut buf, true)?; - buf.truncate(n); + let size = max_message_size() + HANDLE_SUFFIX_SIZE; + let mut buf = Vec::with_capacity(size); + // SAFETY: all bit patterns are valid for u8; pipe_read writes exactly n bytes into + // the spare capacity before set_len(n) is called below. + let (n, handles) = pipe_read( + h, + unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr(), size) }, + true, + )?; + unsafe { buf.set_len(n) }; Ok((buf, handles)) }) } diff --git a/datadog-ipc/tests/windows_shm.rs b/datadog-ipc/tests/windows_shm.rs index 1c83c119a6..4f74afa867 100644 --- a/datadog-ipc/tests/windows_shm.rs +++ b/datadog-ipc/tests/windows_shm.rs @@ -43,14 +43,15 @@ fn test_shm_handle_transfer() { /// Verifies that IPC messages larger than 4 KB are handled without panicking. /// -/// Before the fix, Tokio's `NamedPipeServer` registered the pipe handle with mio/IOCP, which +/// Using Tokio's `NamedPipeServer`, it registered the pipe handle with mio/IOCP, which /// posted overlapped `ReadFile` calls into a fixed 4 KB internal buffer. Messages larger than /// 4 KB caused `ReadFile` to return `ERROR_MORE_DATA` synchronously; Windows still queued an /// IOCP completion, but mio had already transitioned `io.read` to `State::Err`. When the /// completion fired, mio's `read_done` hit `_ => unreachable!()` (named_pipe.rs:871). /// -/// The fix routes serve-loop I/O through `block_in_place` + direct `ReadFile` into the -/// caller-supplied large buffer, bypassing mio's 4 KB limit entirely. +/// Which is why we route serve-loop I/O through `block_in_place` + direct `ReadFile` into the +/// caller-supplied large buffer, bypassing mio's 4 KB limit entirely. This serves as regression +/// test. #[test] fn test_large_message() { let (conn_server, conn_client) = SeqpacketConn::socketpair().unwrap(); diff --git a/datadog-sidecar/src/service/blocking.rs b/datadog-sidecar/src/service/blocking.rs index 77af5adddb..6d013e970e 100644 --- a/datadog-sidecar/src/service/blocking.rs +++ b/datadog-sidecar/src/service/blocking.rs @@ -5,7 +5,6 @@ use super::{ DynamicInstrumentationConfigState, InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; -use libdd_telemetry::metrics::MetricContext; use crate::service::sender::SidecarSender; use crate::service::sidecar_interface::SidecarInterfaceChannel; use datadog_ipc::platform::{FileBackedHandle, ShmHandle}; @@ -13,7 +12,9 @@ use datadog_ipc::SeqpacketConn; use datadog_live_debugger::debugger_defs::DebuggerPayload; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; +use libdd_common::MutexExt; use libdd_dogstatsd_client::DogStatsDActionOwned; +use libdd_telemetry::metrics::MetricContext; use serde::Serialize; use std::sync::Mutex; use std::{ @@ -21,7 +22,6 @@ use std::{ time::{Duration, Instant}, }; use tracing::warn; -use libdd_common::MutexExt; /// `SidecarTransport` wraps a [`SidecarSender`] with transparent reconnection support. /// @@ -42,11 +42,15 @@ impl SidecarTransport { Self::do_reconnect(&mut self.inner, factory, false); } - pub fn do_reconnect(transport: &mut Mutex, factory: F, force_reconnect: bool) -> bool + pub fn do_reconnect( + transport: &mut Mutex, + factory: F, + force_reconnect: bool, + ) -> bool where F: FnOnce() -> Option>, { - let mut transport = match transport.lock() { + let transport = match transport.get_mut() { Ok(t) => t, Err(_) => return false, }; @@ -120,9 +124,7 @@ impl SidecarTransport { Err(e) => e, } }; - if e.kind() == io::ErrorKind::BrokenPipe - || e.kind() == io::ErrorKind::ConnectionReset - { + if e.kind() == io::ErrorKind::BrokenPipe || e.kind() == io::ErrorKind::ConnectionReset { if let Some(ref reconnect) = self.reconnect_fn { if Self::do_reconnect(&mut self.inner, reconnect, true) { return f(&mut self.inner.lock_or_panic()); @@ -398,10 +400,7 @@ pub fn send_dogstatsd_actions( } /// Sets x-datadog-test-session-token on all requests for the given session. -pub fn set_test_session_token( - transport: &mut SidecarTransport, - token: String, -) -> io::Result<()> { +pub fn set_test_session_token(transport: &mut SidecarTransport, token: String) -> io::Result<()> { lock_sender(transport)?.set_test_session_token(token); Ok(()) } diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index ecce9ade34..474d8b9c39 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -69,7 +69,6 @@ fn cancel_if_instance(slot: &mut Option, instance_id: & } } - fn cancel_if_queue( slot: &mut Option, instance_id: &InstanceId, @@ -107,7 +106,11 @@ fn coalesce(outbox: &mut SidecarOutbox, incoming: SidecarInterfaceRequest) { } = incoming { cancel_if_queue(&mut outbox.set_request_config, instance_id, queue_id); - cancel_if_queue(&mut outbox.set_universal_service_tags, instance_id, queue_id); + cancel_if_queue( + &mut outbox.set_universal_service_tags, + instance_id, + queue_id, + ); } match incoming { @@ -280,7 +283,8 @@ impl SidecarSender { if self.metric_registrations.contains_key(&metric.name) { return; } - self.metric_registrations.insert(metric.name.clone(), metric.clone()); + self.metric_registrations + .insert(metric.name.clone(), metric.clone()); let req = SidecarInterfaceRequest::RegisterTelemetryMetric { metric }; self.channel.send_request_blocking(&req).ok(); } diff --git a/datadog-sidecar/src/service/sidecar_interface.rs b/datadog-sidecar/src/service/sidecar_interface.rs index 00dfa45511..25424cc71a 100644 --- a/datadog-sidecar/src/service/sidecar_interface.rs +++ b/datadog-sidecar/src/service/sidecar_interface.rs @@ -6,11 +6,11 @@ use crate::service::{ InstanceId, QueueId, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; -use libdd_telemetry::metrics::MetricContext; use datadog_ipc::platform::ShmHandle; use datadog_live_debugger::sender::DebuggerType; use libdd_common::tag::Tag; use libdd_dogstatsd_client::DogStatsDActionOwned; +use libdd_telemetry::metrics::MetricContext; use serde::{Deserialize, Serialize}; use std::time::Duration; @@ -78,9 +78,7 @@ pub trait SidecarInterface { /// # Arguments /// /// * `metric` - The metric context to register on this connection. - async fn register_telemetry_metric( - metric: MetricContext, - ); + async fn register_telemetry_metric(metric: MetricContext); /// Shuts down a runtime. /// diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 60bcb87dab..a21ae59bc3 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -50,36 +50,11 @@ use libdd_trace_utils::tracer_header_tags::TracerHeaderTags; /// Wraps a raw `HANDLE` value (from `OpenProcess`). The handle is intentionally not /// closed on drop — it is valid for the lifetime of the session. #[cfg(windows)] -#[derive(Clone)] +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] pub struct ProcessHandle(pub winapi::um::winnt::HANDLE); -#[cfg(windows)] -impl std::fmt::Debug for ProcessHandle { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ProcessHandle({:p})", self.0) - } -} - -#[cfg(windows)] -impl PartialEq for ProcessHandle { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -#[cfg(windows)] -impl Eq for ProcessHandle {} - -#[cfg(windows)] -impl std::hash::Hash for ProcessHandle { - fn hash(&self, state: &mut H) { - (self.0 as usize).hash(state); - } -} - #[cfg(windows)] unsafe impl Send for ProcessHandle {} - #[cfg(windows)] unsafe impl Sync for ProcessHandle {} @@ -293,7 +268,7 @@ impl SidecarServer { if notify_function.0.is_null() { return None; } - let process_handle = session.process_handle.lock_or_panic().clone()?; + let process_handle = (*session.process_handle.lock_or_panic())?; Some(RemoteConfigNotifyTarget { process_handle, notify_function, @@ -530,7 +505,9 @@ impl SidecarInterface for ConnectionSidecarHandler { if remove_client { info!("Removing telemetry client for instance {instance_id:?}"); - self.server.telemetry_clients.remove_telemetry_client(service, env); + self.server + .telemetry_clients + .remove_telemetry_client(service, env); } } else { info!("No application found for instance {instance_id:?} and queue_id {queue_id:?}"); @@ -551,11 +528,7 @@ impl SidecarInterface for ConnectionSidecarHandler { } } - async fn register_telemetry_metric( - &self, - _peer: PeerCredentials, - metric: MetricContext, - ) { + async fn register_telemetry_metric(&self, _peer: PeerCredentials, metric: MetricContext) { self.metric_registrations .lock_or_panic() .entry(metric.name.clone()) @@ -573,8 +546,12 @@ impl SidecarInterface for ConnectionSidecarHandler { if self.session_id.set(session_id.clone()).is_ok() { let mut counter = self.server.session_counter.lock_or_panic(); match counter.entry(session_id.clone()) { - Entry::Occupied(mut e) => { e.insert(e.get() + 1); } - Entry::Vacant(e) => { e.insert(1); } + Entry::Occupied(mut e) => { + e.insert(e.get() + 1); + } + Entry::Vacant(e) => { + e.insert(1); + } } } debug!("Set session config for {session_id} to {config:?}"); @@ -687,11 +664,7 @@ impl SidecarInterface for ConnectionSidecarHandler { } } - async fn set_session_process_tags( - &self, - _peer: PeerCredentials, - process_tags: Vec, - ) { + async fn set_session_process_tags(&self, _peer: PeerCredentials, process_tags: Vec) { let session_id = self.session_id.get().cloned().unwrap_or_default(); let session = self.server.get_session(&session_id); *session.process_tags.lock_or_panic() = process_tags; @@ -807,7 +780,11 @@ impl SidecarInterface for ConnectionSidecarHandler { // We segregate RC by endpoint. // So we assume that runtime ids are unique per endpoint and we can safely filter globally. #[allow(clippy::unwrap_used)] - if self.server.debugger_diagnostics_bookkeeper.add_payload(&payload) { + if self + .server + .debugger_diagnostics_bookkeeper + .add_payload(&payload) + { session.send_debugger_data( DebuggerType::Diagnostics, &instance_id.runtime_id, @@ -908,11 +885,7 @@ impl SidecarInterface for ConnectionSidecarHandler { } } - async fn set_test_session_token( - &self, - _peer: PeerCredentials, - token: String, - ) { + async fn set_test_session_token(&self, _peer: PeerCredentials, token: String) { let session_id = self.session_id.get().cloned().unwrap_or_default(); let session = self.server.get_session(&session_id); let token = if token.is_empty() { diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index 483e1fbc18..a3c83aa754 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -68,7 +68,6 @@ impl Default for NamedPipeLiaison { pub type DefaultLiason = NamedPipeLiaison; - #[cfg(test)] mod tests { use super::Liaison; From 02cb449ed416cc06c96f9c500ef2050ffe21586e Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 17 Mar 2026 14:37:41 +0100 Subject: [PATCH 19/29] More cleanup Signed-off-by: Bob Weinand --- datadog-sidecar-macros/src/lib.rs | 109 ++---------------------------- 1 file changed, 5 insertions(+), 104 deletions(-) diff --git a/datadog-sidecar-macros/src/lib.rs b/datadog-sidecar-macros/src/lib.rs index 80bc00480e..3447b66c57 100644 --- a/datadog-sidecar-macros/src/lib.rs +++ b/datadog-sidecar-macros/src/lib.rs @@ -2,110 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use proc_macro::TokenStream; -use quote::{format_ident, quote}; -use syn::FnArg::Typed; -use syn::__private::Span; -use syn::parse::{Parse, ParseStream}; -use syn::{parse_macro_input, parse_quote, Arm, Ident, ItemTrait, Pat, TraitItem}; - -fn snake_to_camel(ident_str: &str) -> String { - let mut camel_ty = String::with_capacity(ident_str.len()); - - let mut last_char_was_underscore = true; - for c in ident_str.chars() { - match c { - '_' => last_char_was_underscore = true, - c if last_char_was_underscore => { - camel_ty.extend(c.to_uppercase()); - last_char_was_underscore = false; - } - c => camel_ty.extend(c.to_lowercase()), - } - } - - camel_ty.shrink_to_fit(); - camel_ty -} - -#[proc_macro_attribute] -pub fn extract_request_id(_attr: TokenStream, input: TokenStream) -> TokenStream { - let mut item: ItemTrait = syn::parse(input).unwrap(); - let name = &format_ident!("{}Request", item.ident); - let mut arms: Vec = vec![]; - let mut backpressure_variants: Vec = vec![]; - - for inner in item.items.iter_mut() { - if let TraitItem::Fn(func) = inner { - // Strip #[force_backpressure] and record which methods carry it. - let had_force_backpressure = func.attrs.iter().any(|attr| { - attr.meta - .path() - .get_ident() - .is_some_and(|i| i == "force_backpressure") - }); - func.attrs.retain(|attr| { - attr.meta - .path() - .get_ident() - .is_none_or(|i| i != "force_backpressure") - }); - - let method = Ident::new( - &snake_to_camel(&func.sig.ident.to_string()), - Span::mixed_site(), - ); - - if had_force_backpressure { - backpressure_variants.push(method.clone()); - } - - for any_arg in &func.sig.inputs { - if let Typed(arg) = any_arg { - if let Pat::Ident(ident) = &*arg.pat { - let matched_enum_type = match ident.ident.to_string().as_str() { - "session_id" => Some(format_ident!("SessionId")), - "instance_id" => Some(format_ident!("InstanceId")), - _ => None, - }; - if let Some(enum_type) = matched_enum_type { - arms.push(parse_quote! { - #name::#method { #ident, .. } => RequestIdentifier::#enum_type(#ident.clone()) - }); - } - } - } - } - } - } - - let backpressure_body = if backpressure_variants.is_empty() { - quote! { false } - } else { - quote! { matches!(self, #(#name::#backpressure_variants { .. })|*) } - }; - - TokenStream::from(quote! { - #item - - impl RequestIdentification for tarpc::Request<#name> { - fn extract_identifier(&self) -> RequestIdentifier { - match &self.message { - #( - #arms, - )* - _ => RequestIdentifier::None, - } - } - } - - impl #name { - /// Returns true if this request variant was annotated with `#[force_backpressure]`. - pub fn requires_backpressure(&self) -> bool { - #backpressure_body - } - } - }) -} +use quote::quote; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, +}; struct EnvOrDefault { name: syn::LitStr, From 17570455cd3c191bb3cbedcf6ad973e86cd7d229 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 17 Mar 2026 18:09:55 +0100 Subject: [PATCH 20/29] PR Feedback Signed-off-by: Bob Weinand --- datadog-ipc-macros/src/lib.rs | 56 ++++++----------------------------- datadog-ipc/src/codec.rs | 39 +++++------------------- 2 files changed, 17 insertions(+), 78 deletions(-) diff --git a/datadog-ipc-macros/src/lib.rs b/datadog-ipc-macros/src/lib.rs index cc1de63867..e85e30a883 100644 --- a/datadog-ipc-macros/src/lib.rs +++ b/datadog-ipc-macros/src/lib.rs @@ -24,7 +24,6 @@ type ParamInfo = (Vec, Ident, Box); struct MethodInfo { name: Ident, variant: Ident, - discriminant: u32, is_blocking: bool, return_type: Option>, params: Vec, @@ -33,7 +32,6 @@ struct MethodInfo { fn collect_methods(item: &ItemTrait) -> Vec { let mut methods = Vec::new(); - let mut discriminant: u32 = 0; for trait_item in &item.items { let TraitItem::Fn(func) = trait_item else { @@ -85,13 +83,11 @@ fn collect_methods(item: &ItemTrait) -> Vec { methods.push(MethodInfo { name, variant, - discriminant, is_blocking, return_type, params, handle_param_indices, }); - discriminant += 1; } methods @@ -111,43 +107,11 @@ fn gen_request_enum(enum_name: &Ident, methods: &[MethodInfo]) -> proc_macro2::T }) .collect(); - let disc_arms: Vec<_> = methods - .iter() - .map(|m| { - let variant = &m.variant; - let d = m.discriminant; - quote! { Self::#variant { .. } => #d } - }) - .collect(); - - let name_arms: Vec<_> = methods - .iter() - .map(|m| { - let variant = &m.variant; - let name_str = m.name.to_string(); - quote! { Self::#variant { .. } => #name_str } - }) - .collect(); - quote! { #[derive(::serde::Serialize, ::serde::Deserialize, Debug)] pub enum #enum_name { #(#variants),* } - - impl #enum_name { - pub fn discriminant(&self) -> u32 { - match self { - #(#disc_arms),* - } - } - - pub fn variant_name(&self) -> &'static str { - match self { - #(#name_arms),* - } - } - } } } @@ -287,7 +251,7 @@ fn gen_serve_fn( let response_code = if m.return_type.is_some() { quote! { let result = handler.#name(peer, #(#field_names),*).await; - let __resp_data = datadog_ipc::codec::encode_response(&result); + let __resp_data = datadog_ipc::codec::encode(&result); datadog_ipc::send_raw_async(&async_fd, &__resp_data).await.ok(); } } else { @@ -327,7 +291,7 @@ fn gen_serve_fn( break; } }; - let Ok((discriminant, mut req)) = + let Ok(mut req) = datadog_ipc::codec::decode::<#enum_name>(&buf) else { ::tracing::warn!("IPC serve: failed to decode request"); @@ -376,10 +340,8 @@ fn gen_channel( .iter() .map(|(attrs, n, _)| quote! { #(#attrs)* #n }) .collect(); - let d = m.discriminant; let variant = &m.variant; - let name_str = m.name.to_string(); // Build the request and collect fds via TransferHandles. let build_req_and_fds = quote! { let __req = #enum_name::#variant { #(#field_names),* }; @@ -387,12 +349,12 @@ fn gen_channel( datadog_ipc::handles::TransferHandles::copy_handles( &__req, &mut __sink ).ok(); - let mut __data = datadog_ipc::codec::encode(#d, &__req); + let mut __data = datadog_ipc::codec::encode(&__req); let __fds = __sink.into_fds(); { let __max = datadog_ipc::max_message_size(); if __data.len() > __max { - ::tracing::warn!(method = #name_str, len = __data.len(), max = __max, "IPC message too large"); + ::tracing::warn!(?__req, len = __data.len(), max = __max, "IPC message too large"); } } }; @@ -422,7 +384,7 @@ fn gen_channel( #build_req_and_fds let (__resp, _) = self.0.call(&mut __data, &__fds) .map_err(datadog_ipc::codec::DecodeError::Io)?; - datadog_ipc::codec::decode_response::<#ret_ty>(&__resp) + datadog_ipc::codec::decode::<#ret_ty>(&__resp) } } } @@ -443,11 +405,11 @@ fn gen_channel( pub fn try_send_request(&mut self, req: &#enum_name) -> bool { let mut __sink = datadog_ipc::handles::FdSink::new(); datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); - let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); + let mut __data = datadog_ipc::codec::encode(req); let __fds = __sink.into_fds(); let __max = datadog_ipc::max_message_size(); if __data.len() > __max { - ::tracing::warn!(method = req.variant_name(), len = __data.len(), max = __max, "IPC message too large"); + ::tracing::warn!(?req, len = __data.len(), max = __max, "IPC message too large"); } self.0.try_send(&mut __data, &__fds) } @@ -459,11 +421,11 @@ fn gen_channel( ) -> ::std::io::Result<()> { let mut __sink = datadog_ipc::handles::FdSink::new(); datadog_ipc::handles::TransferHandles::copy_handles(req, &mut __sink).ok(); - let mut __data = datadog_ipc::codec::encode(req.discriminant(), req); + let mut __data = datadog_ipc::codec::encode(req); let __fds = __sink.into_fds(); let __max = datadog_ipc::max_message_size(); if __data.len() > __max { - ::tracing::warn!(method = req.variant_name(), len = __data.len(), max = __max, "IPC message too large"); + ::tracing::warn!(?req, len = __data.len(), max = __max, "IPC message too large"); } self.0.send_blocking(&mut __data, &__fds) } diff --git a/datadog-ipc/src/codec.rs b/datadog-ipc/src/codec.rs index f2c5119b71..70ba80dd6c 100644 --- a/datadog-ipc/src/codec.rs +++ b/datadog-ipc/src/codec.rs @@ -3,48 +3,26 @@ //! Codec for IPC messages. //! -//! Request wire format: `[4 bytes: u32 LE discriminant][N bytes: bincode payload]` +//! Request wire format: `[N bytes: bincode payload]` //! Response wire format: `[N bytes: bincode payload]` (no discriminant) -//! Ack wire format: `[0 bytes]` (empty datagram) +//! Ack wire format: `[1 byte: 0x00]` use serde::{de::DeserializeOwned, Serialize}; use std::fmt; -pub const DISCRIMINANT_SIZE: usize = 4; - -/// Encode a request: 4-byte LE discriminant prefix + bincode payload. -pub fn encode(discriminant: u32, value: &T) -> Vec { - let payload = bincode::serialize(value).unwrap_or_default(); - let mut buf = Vec::with_capacity(DISCRIMINANT_SIZE + payload.len()); - buf.extend_from_slice(&discriminant.to_le_bytes()); - buf.extend_from_slice(&payload); - buf -} - -/// Decode a request: returns `(discriminant, value)`. -pub fn decode(buf: &[u8]) -> Result<(u32, T), DecodeError> { - if buf.len() < DISCRIMINANT_SIZE { - return Err(DecodeError::TooShort); - } - let disc_bytes: [u8; 4] = buf[..DISCRIMINANT_SIZE].try_into().unwrap_or([0u8; 4]); - let discriminant = u32::from_le_bytes(disc_bytes); - let value = bincode::deserialize(&buf[DISCRIMINANT_SIZE..]).map_err(DecodeError::Bincode)?; - Ok((discriminant, value)) -} - -/// Encode a response (no discriminant prefix). -pub fn encode_response(value: &T) -> Vec { - bincode::serialize(value).unwrap_or_default() +/// Encode data as a bincode payload. +pub fn encode(value: &T) -> Vec { + #[allow(clippy::expect_used)] + bincode::serialize(value).expect("Encoding the response failed. This should never happen") } -/// Decode a response (no discriminant prefix). -pub fn decode_response(buf: &[u8]) -> Result { +/// Decode data from a bincode payload. +pub fn decode(buf: &[u8]) -> Result { bincode::deserialize(buf).map_err(DecodeError::Bincode) } #[derive(Debug)] pub enum DecodeError { - TooShort, Bincode(bincode::Error), Io(std::io::Error), } @@ -52,7 +30,6 @@ pub enum DecodeError { impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - DecodeError::TooShort => write!(f, "IPC message too short (missing discriminant)"), DecodeError::Bincode(e) => write!(f, "IPC bincode decode error: {e}"), DecodeError::Io(e) => write!(f, "IPC I/O error: {e}"), } From adbae41010c84bfc9624db8b7c1dcc949ffaf670 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 17 Mar 2026 18:18:48 +0100 Subject: [PATCH 21/29] Avoid unnecessary session_id clones Signed-off-by: Bob Weinand --- datadog-sidecar/src/service/sender.rs | 2 +- datadog-sidecar/src/service/sidecar_server.rs | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/datadog-sidecar/src/service/sender.rs b/datadog-sidecar/src/service/sender.rs index 474d8b9c39..7763165406 100644 --- a/datadog-sidecar/src/service/sender.rs +++ b/datadog-sidecar/src/service/sender.rs @@ -223,7 +223,7 @@ impl SidecarSender { self.try_drain_outbox(); } - pub fn set_session_process_tags(&mut self, process_tags: String) { + pub fn set_session_process_tags(&mut self, process_tags: Vec) { coalesce( &mut self.outbox, SidecarInterfaceRequest::SetSessionProcessTags { process_tags }, diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index a21ae59bc3..567b7d760a 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -190,15 +190,15 @@ impl SidecarServer { self.session_counter.lock_or_panic().len() } - pub(crate) fn get_session(&self, session_id: &String) -> SessionInfo { + pub(crate) fn get_session(&self, session_id: &str) -> SessionInfo { let mut sessions = self.sessions.lock_or_panic(); match sessions.get(session_id) { Some(session) => session.clone(), None => { let mut session = SessionInfo::default(); - session.session_id.clone_from(session_id); + session.session_id = session_id.to_string(); info!("Initializing new session: {}", session_id); - sessions.insert(session_id.clone(), session.clone()); + sessions.insert(session_id.to_string(), session.clone()); session } } @@ -209,7 +209,7 @@ impl SidecarServer { session.get_runtime(&instance_id.runtime_id) } - async fn stop_session(&self, session_id: &String) { + async fn stop_session(&self, session_id: &str) { let session = match self.sessions.lock_or_panic().remove(session_id) { Some(session) => session, None => return, @@ -665,8 +665,8 @@ impl SidecarInterface for ConnectionSidecarHandler { } async fn set_session_process_tags(&self, _peer: PeerCredentials, process_tags: Vec) { - let session_id = self.session_id.get().cloned().unwrap_or_default(); - let session = self.server.get_session(&session_id); + let session_id = self.session_id.get().map(|s| s.as_str()).unwrap_or_default(); + let session = self.server.get_session(session_id); *session.process_tags.lock_or_panic() = process_tags; } @@ -886,8 +886,8 @@ impl SidecarInterface for ConnectionSidecarHandler { } async fn set_test_session_token(&self, _peer: PeerCredentials, token: String) { - let session_id = self.session_id.get().cloned().unwrap_or_default(); - let session = self.server.get_session(&session_id); + let session_id = self.session_id.get().map(|s| s.as_str()).unwrap_or_default(); + let session = self.server.get_session(session_id); let token = if token.is_empty() { None } else { From 29333caa49e2393ec1e487bc9065a7761c085154 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 19 Mar 2026 19:58:48 +0100 Subject: [PATCH 22/29] Don't try reading empty agent info Signed-off-by: Bob Weinand --- datadog-sidecar/src/service/agent_info.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/datadog-sidecar/src/service/agent_info.rs b/datadog-sidecar/src/service/agent_info.rs index fc58fd08aa..74778fa905 100644 --- a/datadog-sidecar/src/service/agent_info.rs +++ b/datadog-sidecar/src/service/agent_info.rs @@ -191,11 +191,16 @@ impl AgentInfoReader { } pub fn read(&mut self) -> (bool, &Option) { - let (updated, data) = self.reader.read(); + let (mut updated, data) = self.reader.read(); if updated { - match serde_json::from_slice(data) { - Ok(info) => self.info = Some(info), - Err(e) => error!("Failed deserializing the agent info: {e:?}"), + // This may transiently happen during AgentInfo initialization + if data.is_empty() { + updated = false + } else { + match serde_json::from_slice(data) { + Ok(info) => self.info = Some(info), + Err(e) => error!("Failed deserializing the agent info: {e:?}"), + } } } (updated, &self.info) From e5b61d0279a30ee320f2ae611822b610f80a3f3d Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 20 Mar 2026 15:12:01 +0100 Subject: [PATCH 23/29] Fix windows --- datadog-ipc/src/platform/windows/sockets.rs | 24 +++++++++++++++++-- datadog-sidecar/src/service/sidecar_server.rs | 12 ++++++++-- .../src/setup/thread_listener_windows.rs | 6 ++--- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 784c46ca5f..cff7cfbcb7 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -565,14 +565,34 @@ impl SeqpacketConn { let srv_raw = server_handle.as_raw_handle() as SysHANDLE; unsafe { ConnectNamedPipe(srv_raw, &mut ov) }; - let client = Self::connect(&name_str)?; + // connect() blocks reading the 4-byte PID handshake that try_accept() writes after + // accepting. Run connect() on a thread so we can wait for ConnectNamedPipe and write + // the PID bytes concurrently, matching what try_accept() does. + let client_thread = std::thread::spawn(move || Self::connect(name_str)); - // Wait for the server-side accept to complete. + // Wait for the client to connect (ConnectNamedPipe completes). unsafe { WaitForSingleObject(event, INFINITE); CloseHandle(event as HANDLE); } + // Write PID handshake to unblock the client thread's ReadFile in connect(). + let pid_bytes = pid.to_le_bytes(); + let mut written: u32 = 0; + unsafe { + WriteFile( + srv_raw, + pid_bytes.as_ptr() as _, + 4, + &mut written, + null_mut(), + ) + }; + + let client = client_thread + .join() + .map_err(|_| io::Error::from(io::ErrorKind::Other))??; + let server = Self { handle: server_handle, peer_pid: pid, diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 567b7d760a..225e6a5440 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -665,7 +665,11 @@ impl SidecarInterface for ConnectionSidecarHandler { } async fn set_session_process_tags(&self, _peer: PeerCredentials, process_tags: Vec) { - let session_id = self.session_id.get().map(|s| s.as_str()).unwrap_or_default(); + let session_id = self + .session_id + .get() + .map(|s| s.as_str()) + .unwrap_or_default(); let session = self.server.get_session(session_id); *session.process_tags.lock_or_panic() = process_tags; } @@ -886,7 +890,11 @@ impl SidecarInterface for ConnectionSidecarHandler { } async fn set_test_session_token(&self, _peer: PeerCredentials, token: String) { - let session_id = self.session_id.get().map(|s| s.as_str()).unwrap_or_default(); + let session_id = self + .session_id + .get() + .map(|s| s.as_str()) + .unwrap_or_default(); let session = self.server.get_session(session_id); let token = if token.is_empty() { None diff --git a/datadog-sidecar/src/setup/thread_listener_windows.rs b/datadog-sidecar/src/setup/thread_listener_windows.rs index 81d8fb20f4..9ba4d8bcff 100644 --- a/datadog-sidecar/src/setup/thread_listener_windows.rs +++ b/datadog-sidecar/src/setup/thread_listener_windows.rs @@ -188,9 +188,9 @@ pub fn connect_to_master(pid: i32) -> io::Result> { info!("Connecting to master listener via named pipe (PID {})", pid); let liaison = NamedPipeLiaison::new(format!("libdatadog_{}_", pid)); - let conn = liaison.connect_to_server().map_err(|e| { - io::Error::other(format!("Failed to connect to master listener: {}", e)) - })?; + let conn = liaison + .connect_to_server() + .map_err(|e| io::Error::other(format!("Failed to connect to master listener: {}", e)))?; info!("Successfully connected to master listener"); Ok(Box::new(SidecarTransport::from(conn))) From 5689f4ca8ff1f1e71bdc2a0ec12c9894acc2744c Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 20 Mar 2026 17:41:28 +0100 Subject: [PATCH 24/29] Fix accidental blocking --- datadog-ipc/src/platform/windows/sockets.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index cff7cfbcb7..1a039bf883 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -198,7 +198,7 @@ fn pipe_read( fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { if !blocking { - let mode = PIPE_NOWAIT; + let mode = PIPE_NOWAIT | PIPE_READMODE_MESSAGE; unsafe { SetNamedPipeHandleState(h, &mode, null(), null()) }; } @@ -214,7 +214,7 @@ fn pipe_write(h: SysHANDLE, data: &[u8], blocking: bool) -> io::Result<()> { }; if !blocking { - let mode = PIPE_WAIT; + let mode = PIPE_WAIT | PIPE_READMODE_MESSAGE; unsafe { SetNamedPipeHandleState(h, &mode, null(), null()) }; } From c9f50116fd7965aeb3e13aa1bd6dff82488a75d2 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 20 Mar 2026 23:33:13 +0100 Subject: [PATCH 25/29] overlapped accept instead of blocking --- datadog-ipc/Cargo.toml | 2 +- datadog-ipc/src/lib.rs | 3 +- datadog-ipc/src/platform/windows/sockets.rs | 245 +++++++++++++----- datadog-ipc/tests/windows_shm.rs | 19 +- datadog-sidecar/src/service/sidecar_server.rs | 2 +- .../src/setup/thread_listener_windows.rs | 49 +--- datadog-sidecar/src/setup/windows.rs | 4 +- datadog-sidecar/src/windows.rs | 14 +- 8 files changed, 209 insertions(+), 129 deletions(-) diff --git a/datadog-ipc/Cargo.toml b/datadog-ipc/Cargo.toml index 0365a36f31..5cb6383c17 100644 --- a/datadog-ipc/Cargo.toml +++ b/datadog-ipc/Cargo.toml @@ -49,7 +49,7 @@ glibc_version = "0.1.2" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3.9", features = ["handleapi", "memoryapi", "winbase", "winnt", "winerror", "processthreadsapi", "fileapi", "minwinbase"] } windows-sys = { version = "0.48.0", features = ["Win32_System", "Win32_System_WindowsProgramming", "Win32_Foundation", "Win32_System_Pipes", "Win32_Storage_FileSystem", "Win32_System_IO", "Win32_System_Threading"] } -tokio = { version = "1.23", features = ["sync", "io-util", "signal", "net"] } +tokio = { version = "1.23", features = ["sync", "io-util", "signal", "net", "rt-multi-thread", "rt"] } [lib] bench = false diff --git a/datadog-ipc/src/lib.rs b/datadog-ipc/src/lib.rs index 715120f0eb..1ea96791da 100644 --- a/datadog-ipc/src/lib.rs +++ b/datadog-ipc/src/lib.rs @@ -18,6 +18,7 @@ pub mod codec; pub use client::IpcClientConn; pub use platform::{ - max_message_size, PeerCredentials, SeqpacketConn, SeqpacketListener, HANDLE_SUFFIX_SIZE, + max_message_size, AsyncConn, PeerCredentials, SeqpacketConn, SeqpacketListener, + HANDLE_SUFFIX_SIZE, }; pub use platform::{recv_raw_async, send_raw_async}; diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 1a039bf883..19251568b6 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -18,19 +18,22 @@ //! ``` //! //! Because `PIPE_READMODE_MESSAGE` delivers the entire message in one `ReadFile` call, the -//! receiver can read directly into the caller-provided buffer, then strip the suffix in-place — +//! receiver can read directly into the caller-provided buffer, then strip the suffix in-place - //! no intermediate copy needed. The caller's buffer must have at least `HANDLE_SUFFIX_SIZE` //! bytes beyond the maximum expected payload size. use crate::platform::message::MAX_FDS; +use std::future::Future; use std::io; use std::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; use std::path::Path; +use std::pin::Pin; use std::ptr::{null, null_mut}; use std::sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, - Mutex, + Arc, Mutex, }; +use std::task::{Context, Poll}; // winapi – only used for things not cleanly available in windows-sys use winapi::shared::minwindef::ULONG; @@ -50,8 +53,10 @@ use windows_sys::Win32::System::Pipes::{ ConnectNamedPipe, CreateNamedPipeA, PeekNamedPipe, SetNamedPipeHandleState, PIPE_NOWAIT, PIPE_READMODE_MESSAGE, PIPE_TYPE_MESSAGE, PIPE_UNLIMITED_INSTANCES, PIPE_WAIT, }; -use windows_sys::Win32::System::Threading::{CreateEventA, WaitForSingleObject, INFINITE}; -use windows_sys::Win32::System::IO::{CancelIo, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; +use windows_sys::Win32::System::Threading::{ + CreateEventA, SetEvent, WaitForMultipleObjects, WaitForSingleObject, INFINITE, +}; +use windows_sys::Win32::System::IO::{CancelIo, CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; /// Wire-format suffix overhead: 4-byte count + 8 bytes per handle slot. /// @@ -285,8 +290,8 @@ fn make_overlapped(event: SysHANDLE) -> OVERLAPPED { /// A named-pipe server that accepts message-mode IPC connections. /// /// `try_accept` swaps the connected pipe instance for a fresh server instance so the listener -/// remains ready for the next client. `accept_blocking` does the same but blocks until a client -/// connects (polling `try_accept` with a short sleep). Interior mutability (`Mutex`) allows +/// remains ready for the next client. `accept_async` does the same but awaits the connection +/// using overlapped I/O with proper cancellation support. Interior mutability (`Mutex`) allows /// `&self` in both methods. pub struct SeqpacketListener { inner: Mutex, @@ -300,7 +305,7 @@ impl SeqpacketListener { /// Bind to a named pipe derived from `path` and prepare to accept connections. /// /// Uses `FILE_FLAG_FIRST_PIPE_INSTANCE` so that a second concurrent `bind` to the same path - /// fails with `ERROR_ACCESS_DENIED` — the signal used by `attempt_listen` to detect that + /// fails with `ERROR_ACCESS_DENIED` - the signal used by `attempt_listen` to detect that /// another process is already serving. pub fn bind(path: impl AsRef) -> io::Result { let name = path_to_null_terminated(path.as_ref()); @@ -404,7 +409,7 @@ impl SeqpacketListener { // // The named pipe creator is determined by who calls CreateNamedPipeA. When PHP creates the // listener and passes it to the sidecar, GetNamedPipeServerProcessId on the client side - // returns PHP's own PID — not the sidecar's — causing DuplicateHandle to target the wrong + // returns PHP's own PID - not the sidecar's - causing DuplicateHandle to target the wrong // process. This one-shot 4-byte message lets the client discover the actual acceptor PID // before sending any handles. let my_pid = unsafe { GetCurrentProcessId() }; @@ -428,24 +433,6 @@ impl SeqpacketListener { }) } - /// Block until a client connects and return the accepted connection. - /// - /// Polls `try_accept` in a loop with a short sleep so that callers running on a - /// `spawn_blocking` thread do not spin. Because this does not go through Tokio's - /// IOCP reactor, the accepted handle is a raw synchronous handle with **no** pending - /// overlapped I/O — safe to use with `block_in_place` reads. - pub fn accept_blocking(&self) -> io::Result { - loop { - match self.try_accept() { - Ok(conn) => return Ok(conn), - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - Err(e) => return Err(e), - } - } - } - pub fn as_raw_handle(&self) -> RawHandle { self.inner .lock() @@ -627,7 +614,7 @@ impl SeqpacketConn { /// Non-blocking send. /// /// Appends the handle suffix to `data` in-place, writes the message, then truncates `data` - /// back to its original length — whether the write succeeded or failed. On `WouldBlock` + /// back to its original length - whether the write succeeded or failed. On `WouldBlock` /// the caller can retry without re-encoding `data`. pub fn try_send_raw(&self, data: &mut Vec, handles: &[RawHandle]) -> io::Result<()> { let orig_len = data.len(); @@ -685,7 +672,7 @@ impl SeqpacketConn { /// Named-pipe buffer sizes are fixed at creation time on Windows, so this does not affect /// the current connection. It updates the global [`PIPE_BUFFER_SIZE`] used by all /// subsequent [`SeqpacketListener::bind`] / [`try_accept`] / [`SeqpacketConn::socketpair`] - /// calls — i.e. it takes effect on the next reconnect. + /// calls - i.e. it takes effect on the next reconnect. pub fn set_sndbuf_size(&self, size: usize) -> io::Result<()> { set_pipe_buffer_size(size); Ok(()) @@ -697,68 +684,192 @@ pub fn is_listening>(path: P) -> io::Result { Ok(SeqpacketConn::connect(path).is_ok()) } -/// Async connection type for Windows named-pipe IPC. +/// On Windows, `AsyncConn` is the same type as `SeqpacketConn` — both hold an +/// `OwnedHandle` and a peer PID. The async serve loop drives I/O via +/// `block_in_place` + raw `ReadFile`/`WriteFile`, bypassing mio entirely. +pub type AsyncConn = SeqpacketConn; + +impl SeqpacketConn { + /// No-op on Windows: the connection is already usable as an `AsyncConn`. + pub fn into_async_conn(self) -> io::Result { + Ok(self) + } +} + +/// ConnectFuture is a cancellable overlapped ConnectNamedPipe /// -/// Wraps a raw synchronous pipe handle; recv/send go through `block_in_place` + -/// `ReadFile`/`WriteFile` with a caller-supplied large buffer, bypassing mio's -/// 4 KB internal IOCP read buffer limit. -pub struct AsyncSeqpacketConn { - handle: OwnedHandle, - pub(crate) peer_pid: u32, +/// A future that resolves when a client connects to the pipe server handle, or +/// returns `Interrupted` if dropped before completion. +/// +/// On drop, `SetEvent(cancel_event)` is called. The dedicated OS thread +/// detects this via `WaitForMultipleObjects`, calls `CancelIoEx` to abort the +/// overlapped `ConnectNamedPipe`, and then exits - no Tokio `spawn_blocking` +/// task is left behind. +struct ConnectFuture { + rx: tokio::sync::oneshot::Receiver>, + /// Windows manual-reset event shared with the worker thread. Signalled + /// here on drop to tell the thread to cancel its pending operation. + cancel_event: Arc, } -// SAFETY: the inner OwnedHandle is not shared. -unsafe impl Send for AsyncSeqpacketConn {} +impl Drop for ConnectFuture { + fn drop(&mut self) { + unsafe { SetEvent(self.cancel_event.as_raw_handle() as SysHANDLE) }; + } +} -impl AsyncSeqpacketConn { - pub fn peer_credentials(&self) -> io::Result { - Ok(PeerCredentials { - pid: self.peer_pid, - uid: 0, +impl Future for ConnectFuture { + type Output = io::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::new(&mut self.rx).poll(cx).map(|r| { + r.unwrap_or_else(|_| Err(io::Error::from(io::ErrorKind::BrokenPipe))) }) } } -pub type AsyncConn = AsyncSeqpacketConn; +impl SeqpacketListener { + /// Asynchronously accept one client connection. + /// + /// Installs a fresh server handle *before* any `await` point so the + /// listener remains ready even if this future is dropped mid-accept. + /// + /// A dedicated OS thread (not a Tokio `spawn_blocking` task) manages the + /// overlapped `ConnectNamedPipe` call. When the future is dropped + /// (`select!` shutdown), `SetEvent` signals the thread to call + /// `CancelIoEx` and exit immediately - Tokio's runtime shutdown is never + /// blocked waiting for a lingering thread-pool task. + pub async fn accept_async(&self) -> io::Result { + // Create the replacement server handle before taking the lock. + let new_server = create_pipe_server(&self.name, false)?; + + // Atomically swap: the listener now holds a fresh handle ready for + // the *next* accept; `current` is the handle we will connect. + let current = { + let mut guard = self + .inner + .lock() + .map_err(|_| io::Error::from(io::ErrorKind::Other))?; + std::mem::replace(&mut *guard, new_server) + }; -impl SeqpacketConn { - /// Convert to an async connection for use in async server dispatch loops. - pub fn into_async_conn(self) -> io::Result { - Ok(AsyncSeqpacketConn { - handle: self.handle, - peer_pid: self.peer_pid, - }) + // Cancel event shared between the future's Drop and the worker thread. + let cancel_raw = unsafe { CreateEventA(null_mut(), 1, 0, null_mut()) }; + if cancel_raw == 0 { + return Err(io::Error::last_os_error()); + } + let cancel_arc = + Arc::new(unsafe { OwnedHandle::from_raw_handle(cancel_raw as RawHandle) }); + let cancel_for_thread = Arc::clone(&cancel_arc); + + let (tx, rx) = tokio::sync::oneshot::channel::>(); + + std::thread::spawn(move || { + let raw = current.as_raw_handle() as SysHANDLE; + let cancel_raw = cancel_for_thread.as_raw_handle() as SysHANDLE; + + // Create event for the overlapped ConnectNamedPipe. + let overlapped_event = unsafe { CreateEventA(null_mut(), 1, 0, null_mut()) }; + if overlapped_event == 0 { + let _ = tx.send(Err(io::Error::last_os_error())); + return; + } + + // `ov` is on the thread's stack - stable for the thread's lifetime. + let mut overlapped = make_overlapped(overlapped_event); + + let connect_result = unsafe { ConnectNamedPipe(raw, &mut overlapped) }; + let connect_err = io::Error::last_os_error(); + + // conn_result: io::Result - PID handshake applied below. + let conn_result: io::Result = if connect_result != 0 + || connect_err.raw_os_error() == Some(ERROR_PIPE_CONNECTED as i32) + { + // Already connected (e.g. client arrived before ConnectNamedPipe). + unsafe { CloseHandle(overlapped_event as HANDLE) }; + Ok(current) + } else if connect_err.raw_os_error() + == Some(windows_sys::Win32::Foundation::ERROR_IO_PENDING as i32) + { + // Overlapped pending - wait for connection or cancellation. + let handles = [overlapped_event, cancel_raw]; + let wait = + unsafe { WaitForMultipleObjects(2, handles.as_ptr() as _, 0, INFINITE) }; + + unsafe { CloseHandle(overlapped_event as HANDLE) }; + + if wait == WAIT_OBJECT_0 { + // Connected. + let mut transferred: u32 = 0; + let ok = unsafe { GetOverlappedResult(raw, &overlapped, &mut transferred, 0) }; + if ok != 0 { + Ok(current) + } else { + Err(io::Error::last_os_error()) + } + } else { + // Cancelled (or error) - abort the overlapped op. + unsafe { CancelIoEx(raw, &overlapped) }; + let mut transferred: u32 = 0; + // bWait=1: block until the cancellation IOCP completion arrives. + unsafe { GetOverlappedResult(raw, &overlapped, &mut transferred, 1) }; + Err(io::Error::from(io::ErrorKind::Interrupted)) + } + } else { + unsafe { CloseHandle(overlapped_event as HANDLE) }; + Err(connect_err) + }; + + // Write PID handshake and build AsyncConn on success. + let result = conn_result.map(|conn_handle| { + let conn_raw = conn_handle.as_raw_handle() as SysHANDLE; + let mut client_pid: ULONG = 0; + unsafe { + GetNamedPipeClientProcessId(conn_raw as HANDLE, &mut client_pid); + } + let pid_bytes = unsafe { GetCurrentProcessId() }.to_le_bytes(); + let mut written: u32 = 0; + unsafe { + WriteFile(conn_raw, pid_bytes.as_ptr() as _, 4, &mut written, null_mut()); + } + SeqpacketConn::from_server_handle(conn_handle, client_pid) + }); + + let _ = tx.send(result); + // cancel_for_thread (Arc) is dropped here. + }); + + ConnectFuture { + rx, + cancel_event: cancel_arc, + } + .await } } /// Async receive on a Windows named pipe IPC connection. /// -/// Calls `block_in_place` with a direct blocking `ReadFile` into a caller-owned buffer, -/// bypassing mio's 4 KB internal read buffer and correctly handling messages of any size. +/// Uses `block_in_place` + raw `ReadFile` to avoid mio's 4 KB internal read- +/// buffer limit. For message-mode pipes a single `ReadFile` delivers the +/// entire message. pub async fn recv_raw_async(conn: &AsyncConn) -> io::Result<(Vec, Vec)> { - let h = conn.handle.as_raw_handle() as SysHANDLE; + let raw = conn.as_raw_handle() as SysHANDLE; tokio::task::block_in_place(|| { let size = max_message_size() + HANDLE_SUFFIX_SIZE; - let mut buf = Vec::with_capacity(size); - // SAFETY: all bit patterns are valid for u8; pipe_read writes exactly n bytes into - // the spare capacity before set_len(n) is called below. - let (n, handles) = pipe_read( - h, - unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr(), size) }, - true, - )?; - unsafe { buf.set_len(n) }; + let mut buf = vec![0u8; size]; + let (payload_len, handles) = pipe_read(raw, &mut buf, true)?; + buf.truncate(payload_len); Ok((buf, handles)) }) } /// Async send on a Windows named pipe IPC connection. /// -/// Server responses never carry handles; a zero-handle-count suffix is appended automatically. -/// Uses `block_in_place` with a direct `WriteFile`. +/// Server responses never carry handles; a zero-handle-count suffix is +/// appended. Uses `block_in_place` + raw `WriteFile` to bypass mio. pub async fn send_raw_async(conn: &AsyncConn, data: &[u8]) -> io::Result<()> { + let raw = conn.as_raw_handle() as SysHANDLE; let mut buf = data.to_vec(); - buf.extend_from_slice(&0u32.to_le_bytes()); - let h = conn.handle.as_raw_handle() as SysHANDLE; - tokio::task::block_in_place(|| pipe_write(h, &buf, true)) + buf.extend_from_slice(&0u32.to_le_bytes()); // zero handle count + tokio::task::block_in_place(move || pipe_write(raw, &buf, true)) } diff --git a/datadog-ipc/tests/windows_shm.rs b/datadog-ipc/tests/windows_shm.rs index 4f74afa867..ebba54121b 100644 --- a/datadog-ipc/tests/windows_shm.rs +++ b/datadog-ipc/tests/windows_shm.rs @@ -22,7 +22,7 @@ fn test_shm_handle_transfer() { .unwrap(); rt.spawn({ let server = ExampleServer::default(); - async move { server.accept_connection(conn_server).await } + async move { server.accept_connection(conn_server.into_async_conn().unwrap()).await } }); let mut channel = ExampleInterfaceChannel::new(conn_client); @@ -43,15 +43,12 @@ fn test_shm_handle_transfer() { /// Verifies that IPC messages larger than 4 KB are handled without panicking. /// -/// Using Tokio's `NamedPipeServer`, it registered the pipe handle with mio/IOCP, which -/// posted overlapped `ReadFile` calls into a fixed 4 KB internal buffer. Messages larger than -/// 4 KB caused `ReadFile` to return `ERROR_MORE_DATA` synchronously; Windows still queued an -/// IOCP completion, but mio had already transitioned `io.read` to `State::Err`. When the -/// completion fired, mio's `read_done` hit `_ => unreachable!()` (named_pipe.rs:871). -/// -/// Which is why we route serve-loop I/O through `block_in_place` + direct `ReadFile` into the -/// caller-supplied large buffer, bypassing mio's 4 KB limit entirely. This serves as regression -/// test. +/// The serve loop uses Tokio's IOCP-backed `readable().await` + `try_read()` pattern +/// (no `block_in_place`, no mio). The pipe is created with `FILE_FLAG_OVERLAPPED` and +/// registered via `NamedPipeServer::from_raw_handle`, giving Tokio direct IOCP ownership. +/// `try_read` reads into a caller-supplied buffer large enough for any message, so +/// `ERROR_MORE_DATA` can never occur. This test is a regression guard for the old mio +/// 4 KB panic. #[test] fn test_large_message() { let (conn_server, conn_client) = SeqpacketConn::socketpair().unwrap(); @@ -63,7 +60,7 @@ fn test_large_message() { .unwrap(); rt.spawn({ let server = ExampleServer::default(); - async move { server.accept_connection(conn_server).await } + async move { server.accept_connection(conn_server.into_async_conn().unwrap()).await } }); let mut channel = ExampleInterfaceChannel::new(conn_client); diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 225e6a5440..93f8d6f8e0 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -173,7 +173,7 @@ impl SidecarServer { /// /// # Arguments /// - /// * `conn`: A `SeqpacketConn` that represents the connection to the client. + /// * `conn`: The connection to the client. pub async fn accept_connection(self, conn: SeqpacketConn) { let handler = Arc::new(ConnectionSidecarHandler::new(self)); let handler_for_cleanup = handler.clone(); diff --git a/datadog-sidecar/src/setup/thread_listener_windows.rs b/datadog-sidecar/src/setup/thread_listener_windows.rs index 9ba4d8bcff..333164c564 100644 --- a/datadog-sidecar/src/setup/thread_listener_windows.rs +++ b/datadog-sidecar/src/setup/thread_listener_windows.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::io; -use std::sync::{Arc, Mutex, OnceLock}; +use std::sync::{Mutex, OnceLock}; use std::thread::{self, JoinHandle}; use tokio::sync::oneshot; use tracing::{error, info}; @@ -12,9 +12,7 @@ use crate::entry::MainLoopConfig; use crate::service::blocking::SidecarTransport; use crate::setup::Liaison; use crate::setup::NamedPipeLiaison; -use datadog_ipc::{SeqpacketConn, SeqpacketListener}; -use futures::FutureExt; -use manual_future::ManualFuture; +use datadog_ipc::{AsyncConn, SeqpacketListener}; static MASTER_LISTENER: OnceLock>> = OnceLock::new(); @@ -107,46 +105,25 @@ impl MasterListener { } } -/// Accept connections in a loop for Windows named pipes. +/// Accept connections in a loop using IOCP-backed async named pipes. +/// +/// `listener.accept_async()` uses Tokio's Windows named-pipe reactor so that +/// `connect().await` is directly `select!`-cancellable — no `spawn_blocking` or +/// polling loop. When the shutdown signal arrives the select arm fires immediately +/// and the accept future is dropped cleanly. async fn accept_socket_loop_thread_windows( listener: SeqpacketListener, - handler: Box, - shutdown_rx: oneshot::Receiver<()>, + handler: Box, + mut shutdown_rx: oneshot::Receiver<()>, ) -> io::Result<()> { - let (closed_future, close_completer) = ManualFuture::new(); - let close_completer = Arc::new(Mutex::new(Some(close_completer))); - - tokio::spawn({ - let close_completer = Arc::clone(&close_completer); - async move { - let _ = shutdown_rx.await; - if let Some(completer) = close_completer.lock().ok().and_then(|mut g| g.take()) { - completer.complete(()).await; - } - } - }); - - let listener = Arc::new(listener); - let cancellation = closed_future.shared(); loop { - let listener_clone = Arc::clone(&listener); tokio::select! { - _ = cancellation.clone() => { + _ = &mut shutdown_rx => { info!("Shutdown signal received in Windows pipe listener"); break; } - result = tokio::task::spawn_blocking(move || listener_clone.accept_blocking()) => { - match result { - Ok(Ok(conn)) => handler(conn), - Ok(Err(e)) => { - error!("Failed to accept worker connection: {}", e); - break; - } - Err(e) => { - error!("Listener task panicked: {}", e); - break; - } - } + result = listener.accept_async() => { + handler(result?); } } } diff --git a/datadog-sidecar/src/setup/windows.rs b/datadog-sidecar/src/setup/windows.rs index a3c83aa754..e25c760583 100644 --- a/datadog-sidecar/src/setup/windows.rs +++ b/datadog-sidecar/src/setup/windows.rs @@ -4,11 +4,11 @@ use crate::primary_sidecar_identifier; use crate::setup::Liaison; use datadog_ipc::platform::PIPE_PATH; -use datadog_ipc::{SeqpacketConn, SeqpacketListener}; +use datadog_ipc::{AsyncConn, SeqpacketConn, SeqpacketListener}; use libc::getpid; use std::io; -pub type IpcClient = SeqpacketConn; +pub type IpcClient = AsyncConn; pub type IpcServer = SeqpacketListener; pub struct NamedPipeLiaison { diff --git a/datadog-sidecar/src/windows.rs b/datadog-sidecar/src/windows.rs index a141030491..ac66afa047 100644 --- a/datadog-sidecar/src/windows.rs +++ b/datadog-sidecar/src/windows.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::enter_listener_loop; -use datadog_ipc::{SeqpacketConn, SeqpacketListener}; +use datadog_ipc::{AsyncConn, SeqpacketListener}; use futures::FutureExt; use libdd_common::Endpoint; @@ -89,20 +89,14 @@ pub extern "C" fn ddog_daemon_entry_point(_trampoline_data: &TrampolineData) { async fn accept_socket_loop( listener: SeqpacketListener, cancellation: ManualFuture<()>, - handler: Box, + handler: Box, ) -> io::Result<()> { - // Use spawn_blocking + accept_blocking so the accepted handle has no pending overlapped - // I/O registered with mio/IOCP. recv_raw_async/send_raw_async then use block_in_place - // with a caller-supplied large buffer, bypassing mio's 4 KB internal ReadFile buffer - // (which would cause ERROR_MORE_DATA → unreachable!() panic for larger messages). - let listener = Arc::new(listener); let cancellation = cancellation.shared(); loop { - let listener_clone = Arc::clone(&listener); select! { _ = cancellation.clone() => break, - result = tokio::task::spawn_blocking(move || listener_clone.accept_blocking()) => { - handler(result??); + result = listener.accept_async() => { + handler(result?); } } } From 80b054c148daabbe1a7661ba6207b6b9251367e4 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 21 Mar 2026 00:07:02 +0100 Subject: [PATCH 26/29] fmt --- datadog-ipc/src/platform/windows/sockets.rs | 24 +++++++++++++-------- datadog-ipc/tests/windows_shm.rs | 12 +++++++++-- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/datadog-ipc/src/platform/windows/sockets.rs b/datadog-ipc/src/platform/windows/sockets.rs index 19251568b6..57eb3ddf96 100644 --- a/datadog-ipc/src/platform/windows/sockets.rs +++ b/datadog-ipc/src/platform/windows/sockets.rs @@ -56,7 +56,9 @@ use windows_sys::Win32::System::Pipes::{ use windows_sys::Win32::System::Threading::{ CreateEventA, SetEvent, WaitForMultipleObjects, WaitForSingleObject, INFINITE, }; -use windows_sys::Win32::System::IO::{CancelIo, CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0}; +use windows_sys::Win32::System::IO::{ + CancelIo, CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_0, +}; /// Wire-format suffix overhead: 4-byte count + 8 bytes per handle slot. /// @@ -722,9 +724,9 @@ impl Future for ConnectFuture { type Output = io::Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.rx).poll(cx).map(|r| { - r.unwrap_or_else(|_| Err(io::Error::from(io::ErrorKind::BrokenPipe))) - }) + Pin::new(&mut self.rx) + .poll(cx) + .map(|r| r.unwrap_or_else(|_| Err(io::Error::from(io::ErrorKind::BrokenPipe)))) } } @@ -758,8 +760,7 @@ impl SeqpacketListener { if cancel_raw == 0 { return Err(io::Error::last_os_error()); } - let cancel_arc = - Arc::new(unsafe { OwnedHandle::from_raw_handle(cancel_raw as RawHandle) }); + let cancel_arc = Arc::new(unsafe { OwnedHandle::from_raw_handle(cancel_raw as RawHandle) }); let cancel_for_thread = Arc::clone(&cancel_arc); let (tx, rx) = tokio::sync::oneshot::channel::>(); @@ -793,8 +794,7 @@ impl SeqpacketListener { { // Overlapped pending - wait for connection or cancellation. let handles = [overlapped_event, cancel_raw]; - let wait = - unsafe { WaitForMultipleObjects(2, handles.as_ptr() as _, 0, INFINITE) }; + let wait = unsafe { WaitForMultipleObjects(2, handles.as_ptr() as _, 0, INFINITE) }; unsafe { CloseHandle(overlapped_event as HANDLE) }; @@ -830,7 +830,13 @@ impl SeqpacketListener { let pid_bytes = unsafe { GetCurrentProcessId() }.to_le_bytes(); let mut written: u32 = 0; unsafe { - WriteFile(conn_raw, pid_bytes.as_ptr() as _, 4, &mut written, null_mut()); + WriteFile( + conn_raw, + pid_bytes.as_ptr() as _, + 4, + &mut written, + null_mut(), + ); } SeqpacketConn::from_server_handle(conn_handle, client_pid) }); diff --git a/datadog-ipc/tests/windows_shm.rs b/datadog-ipc/tests/windows_shm.rs index ebba54121b..681f05ac2e 100644 --- a/datadog-ipc/tests/windows_shm.rs +++ b/datadog-ipc/tests/windows_shm.rs @@ -22,7 +22,11 @@ fn test_shm_handle_transfer() { .unwrap(); rt.spawn({ let server = ExampleServer::default(); - async move { server.accept_connection(conn_server.into_async_conn().unwrap()).await } + async move { + server + .accept_connection(conn_server.into_async_conn().unwrap()) + .await + } }); let mut channel = ExampleInterfaceChannel::new(conn_client); @@ -60,7 +64,11 @@ fn test_large_message() { .unwrap(); rt.spawn({ let server = ExampleServer::default(); - async move { server.accept_connection(conn_server.into_async_conn().unwrap()).await } + async move { + server + .accept_connection(conn_server.into_async_conn().unwrap()) + .await + } }); let mut channel = ExampleInterfaceChannel::new(conn_client); From a648a7836e152ec3b9935449b55ec8aa809e991f Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 21 Mar 2026 02:32:42 +0100 Subject: [PATCH 27/29] Fix windows panic at startup time --- datadog-sidecar/src/shm_remote_config.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/datadog-sidecar/src/shm_remote_config.rs b/datadog-sidecar/src/shm_remote_config.rs index 11b9cb9c63..22f11c3f23 100644 --- a/datadog-sidecar/src/shm_remote_config.rs +++ b/datadog-sidecar/src/shm_remote_config.rs @@ -635,9 +635,10 @@ impl RemoteConfigManager { self.check_configs = self.active_configs.keys().cloned().collect(); } + let expiry = Instant::now().checked_sub(Duration::from_secs(3666)); while let Some((_, Reverse(instant))) = self.unexpired_targets.peek() { #[allow(clippy::unwrap_used)] - if *instant < Instant::now() - Duration::from_secs(3666) { + if expiry.map_or(false, |e| *instant < e) { let (target, _) = self.unexpired_targets.pop().unwrap(); self.encountered_targets.remove(&target); } else { From c7448a20b0986b47b0ae022145661a70a73a6cf7 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 21 Mar 2026 03:25:51 +0100 Subject: [PATCH 28/29] clippy Signed-off-by: Bob Weinand --- datadog-sidecar/src/service/sidecar_server.rs | 7 ++++--- datadog-sidecar/src/shm_remote_config.rs | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index 93f8d6f8e0..e7fc1ba994 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -94,8 +94,6 @@ pub struct SidecarServer { /// A `Mutex` guarded optional `ManualFutureCompleter` for telemetry configuration. pub self_telemetry_config: Arc>>>, - /// Keeps track of the number of submitted payloads. - pub(crate) submitted_payloads: Arc, /// All tracked agent infos per endpoint pub agent_infos: AgentInfos, /// All remote config handling @@ -113,6 +111,8 @@ struct ConnectionSidecarHandler { /// Used to auto-register metrics in newly-created telemetry clients when a metric point /// for a previously registered metric arrives for a new (service, env) combination. metric_registrations: Mutex>, + /// Keeps track of the number of submitted payloads. + pub(crate) submitted_payloads: Arc, } impl ConnectionSidecarHandler { @@ -122,6 +122,7 @@ impl ConnectionSidecarHandler { session_id: Default::default(), instances: Default::default(), metric_registrations: Default::default(), + submitted_payloads: Default::default(), } } @@ -358,7 +359,7 @@ impl SidecarServer { impl SidecarInterface for ConnectionSidecarHandler { fn recv_counter(&self) -> &AtomicU64 { - &self.server.submitted_payloads + &self.submitted_payloads } async fn enqueue_actions( diff --git a/datadog-sidecar/src/shm_remote_config.rs b/datadog-sidecar/src/shm_remote_config.rs index 22f11c3f23..9015f83ea9 100644 --- a/datadog-sidecar/src/shm_remote_config.rs +++ b/datadog-sidecar/src/shm_remote_config.rs @@ -638,7 +638,7 @@ impl RemoteConfigManager { let expiry = Instant::now().checked_sub(Duration::from_secs(3666)); while let Some((_, Reverse(instant))) = self.unexpired_targets.peek() { #[allow(clippy::unwrap_used)] - if expiry.map_or(false, |e| *instant < e) { + if expiry.is_some_and(|e| *instant < e) { let (target, _) = self.unexpired_targets.pop().unwrap(); self.encountered_targets.remove(&target); } else { From 35b13a1c05b831d377250c425f79498252060934 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 21 Mar 2026 05:13:08 +0100 Subject: [PATCH 29/29] Fix counter Signed-off-by: Bob Weinand --- datadog-sidecar/src/self_telemetry.rs | 20 +++++++++++++++++-- datadog-sidecar/src/service/sidecar_server.rs | 15 ++++++++++---- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/datadog-sidecar/src/self_telemetry.rs b/datadog-sidecar/src/self_telemetry.rs index 4fe33bc84c..14f94ebf7e 100644 --- a/datadog-sidecar/src/self_telemetry.rs +++ b/datadog-sidecar/src/self_telemetry.rs @@ -11,7 +11,7 @@ use libdd_telemetry::worker::{ LifecycleAction, TelemetryActions, TelemetryWorkerBuilder, TelemetryWorkerHandle, }; use manual_future::ManualFuture; -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicU64, Ordering}; use std::time::Duration; use tokio::select; use tokio::task::JoinHandle; @@ -21,6 +21,7 @@ struct MetricData<'a> { sidecar_watchdog: &'a WatchdogHandle, server: &'a SidecarServer, submitted_payloads: ContextKey, + last_submitted_payloads: AtomicU64, active_sessions: ContextKey, memory_usage: ContextKey, logs_created: ContextKey, @@ -42,10 +43,24 @@ impl MetricData<'_> { async fn collect_and_send(&self) { let trace_metrics = self.server.trace_flusher.collect_metrics(); + let submitted_payloads_delta = { + let mut counters = self.server.connection_counters.lock_or_panic(); + let mut sum = 0u64; + counters.retain(|weak| { + if let Some(counter) = weak.upgrade() { + sum += counter.load(Ordering::Relaxed); + true + } else { + false + } + }); + sum.saturating_sub(self.last_submitted_payloads.swap(sum, Ordering::Relaxed)) + }; + let mut futures = vec![ self.send( self.submitted_payloads, - self.server.submitted_payloads.swap(0, Ordering::Relaxed) as f64, + submitted_payloads_delta as f64, vec![], ), self.send( @@ -196,6 +211,7 @@ impl SelfTelemetry { worker: &worker, server: &self.server, sidecar_watchdog: &self.watchdog_handle, + last_submitted_payloads: AtomicU64::new(0), submitted_payloads: worker.register_metric_context( "server.submitted_payloads".to_string(), vec![], diff --git a/datadog-sidecar/src/service/sidecar_server.rs b/datadog-sidecar/src/service/sidecar_server.rs index e7fc1ba994..a2be2752b6 100644 --- a/datadog-sidecar/src/service/sidecar_server.rs +++ b/datadog-sidecar/src/service/sidecar_server.rs @@ -22,7 +22,7 @@ use std::borrow::Cow; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, Weak}; use std::time::{Duration, SystemTime}; use tracing::{debug, error, info, trace, warn}; @@ -94,6 +94,8 @@ pub struct SidecarServer { /// A `Mutex` guarded optional `ManualFutureCompleter` for telemetry configuration. pub self_telemetry_config: Arc>>>, + /// Weak references to per-connection payload counters, for telemetry aggregation. + pub(crate) connection_counters: Arc>>>, /// All tracked agent infos per endpoint pub agent_infos: AgentInfos, /// All remote config handling @@ -105,24 +107,29 @@ pub struct SidecarServer { /// Per-connection handler wrapper that tracks sessions/instances for cleanup on disconnect. struct ConnectionSidecarHandler { server: SidecarServer, + /// Per-connection counter incremented on each received IPC message. + submitted_payloads: Arc, session_id: std::sync::OnceLock, instances: Mutex>, /// All telemetry metric registrations received on this connection, keyed by metric name. /// Used to auto-register metrics in newly-created telemetry clients when a metric point /// for a previously registered metric arrives for a new (service, env) combination. metric_registrations: Mutex>, - /// Keeps track of the number of submitted payloads. - pub(crate) submitted_payloads: Arc, } impl ConnectionSidecarHandler { fn new(server: SidecarServer) -> Self { + let submitted_payloads = Arc::new(AtomicU64::new(0)); + server + .connection_counters + .lock_or_panic() + .push(Arc::downgrade(&submitted_payloads)); Self { server, + submitted_payloads, session_id: Default::default(), instances: Default::default(), metric_registrations: Default::default(), - submitted_payloads: Default::default(), } }