Skip to content

Commit fff0469

Browse files
committed
http2: document that RFC 7540 prioritization does not work with small payloads
This change demonstrates that golang/go#75936 applies to the RFC 7540 write scheduler. A similar test will be added for RFC 9218 write scheduler after support for it is incorporated within http2/server.go. For golang/go#75936 Change-Id: I4e05dbeb0aab71942eb699b67383ef5b52c3ef4d Reviewed-on: https://go-review.googlesource.com/c/net/+/714741 Reviewed-by: Nicholas Husin <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Damien Neil <[email protected]>
1 parent f35e3a4 commit fff0469

File tree

1 file changed

+68
-0
lines changed

1 file changed

+68
-0
lines changed

http2/server_test.go

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5124,3 +5124,71 @@ func testServerSendDataAfterRequestBodyClose(t testing.TB) {
51245124
})
51255125
st.wantIdle()
51265126
}
5127+
5128+
// This test documents current behavior, rather than ideal behavior that we
5129+
// would necessarily like to see. Refer to go.dev/issues/75936 for details.
5130+
func TestServerRFC7540PrioritySmallPayload(t *testing.T) {
5131+
synctestTest(t, testServerRFC7540PrioritySmallPayload)
5132+
}
5133+
func testServerRFC7540PrioritySmallPayload(t testing.TB) {
5134+
endTest := false
5135+
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
5136+
for !endTest {
5137+
w.Write([]byte("a"))
5138+
if f, ok := w.(http.Flusher); ok {
5139+
f.Flush()
5140+
}
5141+
}
5142+
}, func(s *Server) {
5143+
s.NewWriteScheduler = func() WriteScheduler {
5144+
return NewPriorityWriteScheduler(nil)
5145+
}
5146+
})
5147+
if syncConn, ok := st.cc.(*synctestNetConn); ok {
5148+
syncConn.SetReadBufferSize(1)
5149+
} else {
5150+
t.Fatal("Server connection is not synctestNetConn")
5151+
}
5152+
defer st.Close()
5153+
defer func() { endTest = true }()
5154+
st.greet()
5155+
5156+
// Create 5 streams with weight of 0, and another 5 streams with weight of
5157+
// 255.
5158+
// Since each stream receives an infinite number of bytes, we should expect
5159+
// to see that almost all of the response we get are for the streams with
5160+
// weight of 255.
5161+
for i := 1; i <= 19; i += 2 {
5162+
weight := 1
5163+
if i > 10 {
5164+
weight = 255
5165+
}
5166+
st.writeHeaders(HeadersFrameParam{
5167+
StreamID: uint32(i),
5168+
BlockFragment: st.encodeHeader(),
5169+
EndStream: true,
5170+
EndHeaders: true,
5171+
Priority: PriorityParam{StreamDep: 0, Weight: uint8(weight)},
5172+
})
5173+
synctest.Wait()
5174+
}
5175+
5176+
// In the current implementation however, the response we get are
5177+
// distributed equally amongst all the streams, regardless of weight.
5178+
streamWriteCount := make(map[uint32]int)
5179+
totalWriteCount := 10000
5180+
for range totalWriteCount {
5181+
f := st.readFrame()
5182+
if f == nil {
5183+
break
5184+
}
5185+
streamWriteCount[f.Header().StreamID] += 1
5186+
}
5187+
for streamID, writeCount := range streamWriteCount {
5188+
expectedWriteCount := totalWriteCount / len(streamWriteCount)
5189+
errorMargin := expectedWriteCount / 100
5190+
if writeCount >= expectedWriteCount+errorMargin || writeCount <= expectedWriteCount-errorMargin {
5191+
t.Errorf("Expected stream %v to receive %v±%v writes, got %v", streamID, expectedWriteCount, errorMargin, writeCount)
5192+
}
5193+
}
5194+
}

0 commit comments

Comments
 (0)