-
-
Notifications
You must be signed in to change notification settings - Fork 333
Implement server::Builder::max_send_buffer_size #577
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -51,6 +51,12 @@ pub(super) struct Prioritize { | |
|
|
||
| /// What `DATA` frame is currently being sent in the codec. | ||
| in_flight_data_frame: InFlightData, | ||
|
|
||
| /// The max send buffer size allowed. | ||
| max_send_buffer_size: usize, | ||
|
|
||
| /// The current send buffer size. | ||
| current_send_buffer_size: usize, | ||
| } | ||
|
|
||
| #[derive(Debug, Eq, PartialEq)] | ||
|
|
@@ -93,9 +99,17 @@ impl Prioritize { | |
| flow, | ||
| last_opened_id: StreamId::ZERO, | ||
| in_flight_data_frame: InFlightData::Nothing, | ||
| max_send_buffer_size: usize::MAX, | ||
| current_send_buffer_size: 0, | ||
| } | ||
| } | ||
|
|
||
| pub fn set_max_send_buffer_size(&mut self, max: usize, store: &mut Store, counts: &mut Counts) { | ||
| self.max_send_buffer_size = max; | ||
|
|
||
| self.assign_connection_capacity(0, store, counts); | ||
| } | ||
|
|
||
| /// Queue a frame to be sent to the remote | ||
| pub fn queue_frame<B>( | ||
| &mut self, | ||
|
|
@@ -175,6 +189,8 @@ impl Prioritize { | |
| self.try_assign_capacity(stream); | ||
| } | ||
|
|
||
| self.current_send_buffer_size += sz as usize; | ||
|
|
||
| if frame.is_end_stream() { | ||
| stream.state.send_close(); | ||
| self.reserve_capacity(0, stream, counts); | ||
|
|
@@ -350,7 +366,7 @@ impl Prioritize { | |
| self.flow.assign_capacity(inc); | ||
|
|
||
| // Assign newly acquired capacity to streams pending capacity. | ||
| while self.flow.available() > 0 { | ||
| while self.available() > 0 { | ||
| let stream = match self.pending_capacity.pop(store) { | ||
| Some(stream) => stream, | ||
| None => return, | ||
|
|
@@ -373,6 +389,17 @@ impl Prioritize { | |
| } | ||
| } | ||
|
|
||
| fn available(&self) -> WindowSize { | ||
| cmp::min( | ||
| self.flow.available().as_size() as usize, | ||
| cmp::min( | ||
| self.max_send_buffer_size | ||
| .saturating_sub(self.current_send_buffer_size), | ||
| WindowSize::MAX as usize, | ||
| ), | ||
| ) as WindowSize | ||
| } | ||
|
|
||
| /// Request capacity to send data | ||
| fn try_assign_capacity(&mut self, stream: &mut store::Ptr) { | ||
| let total_requested = stream.requested_send_capacity; | ||
|
|
@@ -395,7 +422,8 @@ impl Prioritize { | |
| additional, | ||
| buffered = stream.buffered_send_data, | ||
| window = stream.send_flow.window_size(), | ||
| conn = %self.flow.available() | ||
| conn_window = %self.flow.available(), | ||
| conn = self.available(), | ||
| ); | ||
|
|
||
| if additional == 0 { | ||
|
|
@@ -413,7 +441,7 @@ impl Prioritize { | |
| ); | ||
|
|
||
| // The amount of currently available capacity on the connection | ||
| let conn_available = self.flow.available().as_size(); | ||
| let conn_available = self.available(); | ||
|
|
||
| // First check if capacity is immediately available | ||
| if conn_available > 0 { | ||
|
|
@@ -509,6 +537,10 @@ impl Prioritize { | |
|
|
||
| // Because, always try to reclaim... | ||
| self.reclaim_frame(buffer, store, dst); | ||
|
|
||
| // Maybe schedule streams if the send buffer is not full | ||
| // anymore. | ||
| self.assign_connection_capacity(0, store, counts); | ||
| } | ||
| None => { | ||
| // Try to flush the codec. | ||
|
|
@@ -630,6 +662,8 @@ impl Prioritize { | |
| tracing::trace!(?frame, "dropping"); | ||
| } | ||
|
|
||
| self.current_send_buffer_size -= stream.buffered_send_data; | ||
|
|
||
| stream.buffered_send_data = 0; | ||
| stream.requested_send_capacity = 0; | ||
| if let InFlightData::DataFrame(key) = self.in_flight_data_frame { | ||
|
|
@@ -736,6 +770,8 @@ impl Prioritize { | |
| tracing::trace_span!("updating stream flow").in_scope(|| { | ||
| stream.send_flow.send_data(len); | ||
|
|
||
| self.current_send_buffer_size -= len as usize; | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this area needs to potentially wake up any tasks that have been waiting on "capacity", since now there is "more buffer space to use". The reason it needs to be done here, is because with flow control capacity, the connection doesn't actually get more until the peer has sent another WINDOW_UPDATE frame. So
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure I understand. Why do we even call
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am even more confused that we call
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Doesn't that mean that
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I called |
||
|
|
||
| // Decrement the stream's buffered data counter | ||
| debug_assert!(stream.buffered_send_data >= len as usize); | ||
| stream.buffered_send_data -= len as usize; | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -1561,3 +1561,86 @@ async fn data_padding() { | |||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| join(srv, h2).await; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| #[tokio::test] | ||||||||||||||||||||||||
| async fn notify_on_send_buffer_available() { | ||||||||||||||||||||||||
| // This test ensures that the stream gets notified when there is additional | ||||||||||||||||||||||||
| // send buffer space. | ||||||||||||||||||||||||
| h2_support::trace_init!(); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let (io, mut client) = mock::new(); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let client = async move { | ||||||||||||||||||||||||
| let settings = client.assert_server_handshake().await; | ||||||||||||||||||||||||
| assert_default_settings!(settings); | ||||||||||||||||||||||||
| client.send_frame( | ||||||||||||||||||||||||
| frames::headers(1) | ||||||||||||||||||||||||
| .request("GET", "https://www.example.com/") | ||||||||||||||||||||||||
| .eos() | ||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||
| .await; | ||||||||||||||||||||||||
| client.send_frame( | ||||||||||||||||||||||||
| frames::headers(3) | ||||||||||||||||||||||||
| .request("GET", "https://www.example.com/") | ||||||||||||||||||||||||
| .eos() | ||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||
| .await; | ||||||||||||||||||||||||
| client.recv_frame(frames::headers(1).response(200)).await; | ||||||||||||||||||||||||
| client.recv_frame(frames::headers(3).response(200)).await; | ||||||||||||||||||||||||
| dbg!(11); | ||||||||||||||||||||||||
| client.recv_frame(frames::data(1, &b"abcde"[..]).eos()).await; | ||||||||||||||||||||||||
| dbg!(31); | ||||||||||||||||||||||||
| client.recv_frame(frames::data(3, &b"abcde"[..])).await; | ||||||||||||||||||||||||
| dbg!(32); | ||||||||||||||||||||||||
| client.recv_frame(frames::data(3, &b"abcde"[..])).await; | ||||||||||||||||||||||||
| dbg!(33); | ||||||||||||||||||||||||
| client.recv_frame(frames::data(3, &b"abcde"[..])).await; | ||||||||||||||||||||||||
| dbg!(34); | ||||||||||||||||||||||||
| client.recv_frame(frames::data(3, &b""[..]).eos()).await; | ||||||||||||||||||||||||
| }; | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let srv = async move { | ||||||||||||||||||||||||
| let mut srv = server::Builder::new() | ||||||||||||||||||||||||
| .max_send_buffer_size(5) | ||||||||||||||||||||||||
| .handshake::<_, Bytes>(io) | ||||||||||||||||||||||||
| .await | ||||||||||||||||||||||||
| .expect("handshake"); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let (_req, mut reply1) = srv.next().await.unwrap().unwrap(); | ||||||||||||||||||||||||
| let (_req, mut reply2) = srv.next().await.unwrap().unwrap(); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let mut stream1 = reply1.send_response(http::Response::new(()), false).unwrap(); | ||||||||||||||||||||||||
| let mut stream2 = reply2.send_response(http::Response::new(()), false).unwrap(); | ||||||||||||||||||||||||
| drop((reply1, reply2)); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| let t0 = tokio::spawn(async move { | ||||||||||||||||||||||||
| assert!(srv.next().await.is_none(), "unexpected request"); | ||||||||||||||||||||||||
| }); | ||||||||||||||||||||||||
| let t1 = tokio::spawn(async move { | ||||||||||||||||||||||||
| eprintln!("[t1] RESERVE 1 cap"); | ||||||||||||||||||||||||
| stream1.reserve_capacity(1); | ||||||||||||||||||||||||
| stream1 = util::wait_for_capacity(stream1, 1).await; | ||||||||||||||||||||||||
| eprintln!("[t1] got 1 cap"); | ||||||||||||||||||||||||
| stream1.send_data("abcde".into(), true).unwrap(); | ||||||||||||||||||||||||
| }); | ||||||||||||||||||||||||
| let t2 = tokio::spawn(async move { | ||||||||||||||||||||||||
| for n in 0..3 { | ||||||||||||||||||||||||
| eprintln!("[t2] RESERVE 1 cap, loop {}", n); | ||||||||||||||||||||||||
| stream2.reserve_capacity(1); | ||||||||||||||||||||||||
| stream2 = util::wait_for_capacity(stream2, 1).await; | ||||||||||||||||||||||||
| eprintln!("[t2] got 1 cap, loop {}", n); | ||||||||||||||||||||||||
| stream2.send_data("abcde".into(), false).unwrap(); | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| stream2.send_data("".into(), true).unwrap(); | ||||||||||||||||||||||||
| }); | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| t2.await.expect("srv body spawn"); | ||||||||||||||||||||||||
| t1.await.expect("srv body spawn"); | ||||||||||||||||||||||||
| t0.await.expect("srv end"); | ||||||||||||||||||||||||
|
Comment on lines
+1640
to
+1642
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. AFAIK the test is still wrong. We await for
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Never mind I misread. That being said, the test really doesn't help much finding where the issue with my patch is :/
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. h2/tests/h2-support/src/util.rs Lines 56 to 66 in 2c53d60
There, What do you think is wrong here? That |
||||||||||||||||||||||||
| }; | ||||||||||||||||||||||||
|
|
||||||||||||||||||||||||
| join(srv, client).await; | ||||||||||||||||||||||||
| } | ||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I used to do this prior to calling
try_assign_capacity. AFAIU that was wrong, right?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That sounds right. The connection-level and stream-level should be updated together, but since updating the stream-level may make it exceed it's requested capacity and thus needs to check for more capacity, that must happen in-between.
A code comment about the ordering being important may be prudent.