Skip to content

Commit 95b847e

Browse files
committed
interop-testing: Use separate event loops in RetryTest
The RetryTest was flaky, and it seems to have been caused by the client and server getting assigned to the same event loop. Separating the two reduces the flake rate from ~3% to less than 0.1% (no flakes in a 1000). While I was here fixing the executors, I reduced the number of threads created and shut down the threads after they are no longer used. This had no impact to the flake rate (no flakes in 1000).
1 parent 7ba0718 commit 95b847e

File tree

1 file changed

+14
-5
lines changed
  • interop-testing/src/test/java/io/grpc/testing/integration

1 file changed

+14
-5
lines changed

interop-testing/src/test/java/io/grpc/testing/integration/RetryTest.java

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@
7777
import java.util.concurrent.LinkedBlockingQueue;
7878
import java.util.concurrent.TimeUnit;
7979
import java.util.concurrent.atomic.AtomicBoolean;
80+
import org.junit.After;
8081
import org.junit.Rule;
8182
import org.junit.Test;
8283
import org.junit.runner.RunWith;
@@ -110,7 +111,7 @@ public class RetryTest {
110111
mock(ClientCall.Listener.class, delegatesTo(testCallListener));
111112

112113
private CountDownLatch backoffLatch = new CountDownLatch(1);
113-
private final EventLoopGroup group = new DefaultEventLoopGroup() {
114+
private final EventLoopGroup clientGroup = new DefaultEventLoopGroup(1) {
114115
@SuppressWarnings("FutureReturnValueIgnored")
115116
@Override
116117
public ScheduledFuture<?> schedule(
@@ -122,7 +123,7 @@ public ScheduledFuture<?> schedule(
122123
new Runnable() {
123124
@Override
124125
public void run() {
125-
group.execute(command);
126+
clientGroup.execute(command);
126127
}
127128
},
128129
delay,
@@ -137,6 +138,7 @@ public void run() {} // no-op
137138
TimeUnit.NANOSECONDS);
138139
}
139140
};
141+
private final EventLoopGroup serverGroup = new DefaultEventLoopGroup(1);
140142
private final FakeStatsRecorder clientStatsRecorder = new FakeStatsRecorder();
141143
private final ClientInterceptor statsInterceptor =
142144
InternalCensusStatsAccessor.getClientInterceptor(
@@ -173,11 +175,18 @@ public Listener<String> startCall(ServerCall<String, Integer> call, Metadata hea
173175
private Map<String, Object> retryPolicy = null;
174176
private long bufferLimit = 1L << 20; // 1M
175177

178+
@After
179+
@SuppressWarnings("FutureReturnValueIgnored")
180+
public void tearDown() {
181+
clientGroup.shutdownGracefully();
182+
serverGroup.shutdownGracefully();
183+
}
184+
176185
private void startNewServer() throws Exception {
177186
localServer = cleanupRule.register(NettyServerBuilder.forAddress(localAddress)
178187
.channelType(LocalServerChannel.class)
179-
.bossEventLoopGroup(group)
180-
.workerEventLoopGroup(group)
188+
.bossEventLoopGroup(serverGroup)
189+
.workerEventLoopGroup(serverGroup)
181190
.addService(serviceDefinition)
182191
.build());
183192
localServer.start();
@@ -196,7 +205,7 @@ private void createNewChannel() {
196205
channel = cleanupRule.register(
197206
NettyChannelBuilder.forAddress(localAddress)
198207
.channelType(LocalChannel.class, LocalAddress.class)
199-
.eventLoopGroup(group)
208+
.eventLoopGroup(clientGroup)
200209
.usePlaintext()
201210
.enableRetry()
202211
.perRpcBufferLimit(bufferLimit)

0 commit comments

Comments
 (0)