instance_id
stringlengths
17
39
repo
stringclasses
8 values
issue_id
stringlengths
14
34
pr_id
stringlengths
14
34
linking_methods
sequencelengths
1
3
base_commit
stringlengths
40
40
merge_commit
stringlengths
0
40
hints_text
sequencelengths
0
106
resolved_comments
sequencelengths
0
119
created_at
unknown
labeled_as
sequencelengths
0
7
problem_title
stringlengths
7
174
problem_statement
stringlengths
0
55.4k
gold_files
sequencelengths
0
10
gold_files_postpatch
sequencelengths
1
10
test_files
sequencelengths
0
60
gold_patch
stringlengths
220
5.83M
test_patch
stringlengths
386
194k
split_random
stringclasses
3 values
split_time
stringclasses
3 values
issue_start_time
unknown
issue_created_at
unknown
issue_by_user
stringlengths
3
21
split_repo
stringclasses
3 values
netty/netty/9995_9999
netty/netty
netty/netty/9995
netty/netty/9999
[ "keyword_pr_to_issue" ]
56055f4404fbebeb6403165d352d62330a1bf81d
2a5118f824afefa53f1cd199cf56dee0746b8688
[ "We love love contributions so feel free to submit a PR if you have time. Otherwise it may take a bit before I will have a chance to do it \n\n> Am 05.02.2020 um 04:12 schrieb Ruwei <notifications@github.com>:\n> \n> \n> Expected behavior\n> \n> I'm writing a smtp client based on netty. SmtpCommand lacks the AUTH command. It is used to login to an SMTP server.\n> \n> See it in https://www.ietf.org/rfc/rfc4954.txt\n> \n> Is it possible to add it ?\n> \n> Actual behavior\n> \n> Can not create an AUTH command.\n> \n> Steps to reproduce\n> \n> Minimal yet complete reproducer code (or URL to code)\n> \n> Netty version\n> \n> 4.1.45.Final\n> \n> JVM version (e.g. java -version)\n> \n> 11.0.1\n> \n> OS version (e.g. uname -a)\n> \n> Darwin Mac.local 19.2.0 Darwin Kernel Version 19.2.0: Sat Nov 9 03:47:04 PST 2019; root:xnu-6153.61.1~20/RELEASE_X86_64 x86_64\n> \n> —\n> You are receiving this because you are subscribed to this thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "@normanmaurer ok. I will do it :-)", "After send AUTH, the client need to send encoded username and password without any command, but SmtpCommand is a required field for SmtpRequest. So I think it's necessary to add an EMPTY command which means a request with only parameters. WDYT? @normanmaurer " ]
[]
"2020-02-06T08:37:35Z"
[]
SmtpCommand lacks AUTH
### Expected behavior I'm writing a smtp client based on netty. SmtpCommand lacks the AUTH command. It is used to login to an SMTP server. See it in https://www.ietf.org/rfc/rfc4954.txt Is it possible to add it ? ### Actual behavior Can not create an AUTH command. ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.45.Final ### JVM version (e.g. `java -version`) 11.0.1 ### OS version (e.g. `uname -a`) Darwin Mac.local 19.2.0 Darwin Kernel Version 19.2.0: Sat Nov 9 03:47:04 PST 2019; root:xnu-6153.61.1~20/RELEASE_X86_64 x86_64
[ "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java", "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java", "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java" ]
[ "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java", "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java", "codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java" ]
[ "codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestEncoderTest.java" ]
diff --git a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java index 63ba3f5847f..a2c69b866df 100644 --- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java +++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpCommand.java @@ -31,6 +31,7 @@ public final class SmtpCommand { public static final SmtpCommand EHLO = new SmtpCommand(AsciiString.cached("EHLO")); public static final SmtpCommand HELO = new SmtpCommand(AsciiString.cached("HELO")); + public static final SmtpCommand AUTH = new SmtpCommand(AsciiString.cached("AUTH")); public static final SmtpCommand MAIL = new SmtpCommand(AsciiString.cached("MAIL")); public static final SmtpCommand RCPT = new SmtpCommand(AsciiString.cached("RCPT")); public static final SmtpCommand DATA = new SmtpCommand(AsciiString.cached("DATA")); @@ -40,11 +41,13 @@ public final class SmtpCommand { public static final SmtpCommand VRFY = new SmtpCommand(AsciiString.cached("VRFY")); public static final SmtpCommand HELP = new SmtpCommand(AsciiString.cached("HELP")); public static final SmtpCommand QUIT = new SmtpCommand(AsciiString.cached("QUIT")); + public static final SmtpCommand EMPTY = new SmtpCommand(AsciiString.cached("")); private static final Map<String, SmtpCommand> COMMANDS = new HashMap<String, SmtpCommand>(); static { COMMANDS.put(EHLO.name().toString(), EHLO); COMMANDS.put(HELO.name().toString(), HELO); + COMMANDS.put(AUTH.name().toString(), AUTH); COMMANDS.put(MAIL.name().toString(), MAIL); COMMANDS.put(RCPT.name().toString(), RCPT); COMMANDS.put(DATA.name().toString(), DATA); @@ -54,6 +57,7 @@ public final class SmtpCommand { COMMANDS.put(VRFY.name().toString(), VRFY); COMMANDS.put(HELP.name().toString(), HELP); COMMANDS.put(QUIT.name().toString(), QUIT); + COMMANDS.put(EMPTY.name().toString(), EMPTY); } /** diff --git a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java index 9fb567f9916..0355a296d8b 100644 --- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java +++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequestEncoder.java @@ -58,7 +58,8 @@ protected void encode(ChannelHandlerContext ctx, Object msg, List<Object> out) t final ByteBuf buffer = ctx.alloc().buffer(); try { req.command().encode(buffer); - writeParameters(req.parameters(), buffer); + boolean notEmpty = req.command() != SmtpCommand.EMPTY; + writeParameters(req.parameters(), buffer, notEmpty); ByteBufUtil.writeShortBE(buffer, CRLF_SHORT); out.add(buffer); release = false; @@ -85,11 +86,13 @@ protected void encode(ChannelHandlerContext ctx, Object msg, List<Object> out) t } } - private static void writeParameters(List<CharSequence> parameters, ByteBuf out) { + private static void writeParameters(List<CharSequence> parameters, ByteBuf out, boolean commandNotEmpty) { if (parameters.isEmpty()) { return; } - out.writeByte(SP); + if (commandNotEmpty) { + out.writeByte(SP); + } if (parameters instanceof RandomAccess) { final int sizeMinusOne = parameters.size() - 1; for (int i = 0; i < sizeMinusOne; i++) { diff --git a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java index 446c6fb9ddf..b9da1a3efe3 100644 --- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java +++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpRequests.java @@ -50,6 +50,20 @@ public static SmtpRequest ehlo(CharSequence hostname) { return new DefaultSmtpRequest(SmtpCommand.EHLO, hostname); } + /** + * Creates a {@code EMPTY} request. + */ + public static SmtpRequest empty(CharSequence... parameter) { + return new DefaultSmtpRequest(SmtpCommand.EMPTY, parameter); + } + + /** + * Creates a {@code AUTH} request. + */ + public static SmtpRequest auth(CharSequence... parameter) { + return new DefaultSmtpRequest(SmtpCommand.AUTH, parameter); + } + /** * Creates a {@code NOOP} request. */
diff --git a/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestEncoderTest.java b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestEncoderTest.java index fb145149225..320bae4514d 100644 --- a/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestEncoderTest.java +++ b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestEncoderTest.java @@ -38,6 +38,21 @@ public void testEncodeHelo() { testEncode(SmtpRequests.helo("localhost"), "HELO localhost\r\n"); } + @Test + public void testEncodeAuth() { + testEncode(SmtpRequests.auth("LOGIN"), "AUTH LOGIN\r\n"); + } + + @Test + public void testEncodeAuthWithParameter() { + testEncode(SmtpRequests.auth("PLAIN", "dGVzdAB0ZXN0ADEyMzQ="), "AUTH PLAIN dGVzdAB0ZXN0ADEyMzQ=\r\n"); + } + + @Test + public void testEncodeEmpty() { + testEncode(SmtpRequests.empty("dGVzdAB0ZXN0ADEyMzQ="), "dGVzdAB0ZXN0ADEyMzQ=\r\n"); + } + @Test public void testEncodeMail() { testEncode(SmtpRequests.mail("me@netty.io"), "MAIL FROM:<me@netty.io>\r\n");
val
test
"2020-02-06T09:02:31"
"2020-02-05T03:12:36Z"
yuanrw
val
netty/netty/10067_10069
netty/netty
netty/netty/10067
netty/netty/10069
[ "keyword_pr_to_issue" ]
04532220152a69a04064537daffd9e57953b81c0
7fce06a0d1f76865b1494fde99697c31a8a1f92f
[]
[ "this is not correct (and also not the one above) as you will cause a `ClassCastException` later on if the `ChannelHandlerContext` does not wrap an `ChannelOutboundHandler`." ]
"2020-02-29T06:34:18Z"
[]
HttpContentCompressor doesn't work when put on an alternate EventExecutor
### Expected behavior The following isn't expected to mangle or lose the response content: pipeline.addLast(eventExecutor, new HttpContentCompressor()); ### Actual behavior The content may be mangled or completely lost. ### Steps to reproduce ` .addLast(compressorGroup, new HttpContentCompressor())` ### Minimal yet complete reproducer code (or URL to code) https://github.com/atcurtis/netty/blob/e8dd352c8d17943df310ba1371329c0990e5bbb9/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java#L281 ### Netty version I checked 4.1.42.Final and 4.1.46.Final ### JVM version (e.g. `java -version`) Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode) ### OS version (e.g. `uname -a`) 3.10.0-957.21.3.el7.x86_64
[ "transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java", "transport/src/main/java/io/netty/channel/ChannelHandlerMask.java" ]
[ "transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java", "transport/src/main/java/io/netty/channel/ChannelHandlerMask.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java" ]
diff --git a/transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java b/transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java index ec4c9b45be0..92c912a5000 100644 --- a/transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java +++ b/transport/src/main/java/io/netty/channel/AbstractChannelHandlerContext.java @@ -51,6 +51,8 @@ import static io.netty.channel.ChannelHandlerMask.MASK_DISCONNECT; import static io.netty.channel.ChannelHandlerMask.MASK_EXCEPTION_CAUGHT; import static io.netty.channel.ChannelHandlerMask.MASK_FLUSH; +import static io.netty.channel.ChannelHandlerMask.MASK_ONLY_INBOUND; +import static io.netty.channel.ChannelHandlerMask.MASK_ONLY_OUTBOUND; import static io.netty.channel.ChannelHandlerMask.MASK_READ; import static io.netty.channel.ChannelHandlerMask.MASK_USER_EVENT_TRIGGERED; import static io.netty.channel.ChannelHandlerMask.MASK_WRITE; @@ -906,20 +908,33 @@ private boolean isNotValidPromise(ChannelPromise promise, boolean allowVoidPromi private AbstractChannelHandlerContext findContextInbound(int mask) { AbstractChannelHandlerContext ctx = this; + EventExecutor currentExecutor = executor(); do { ctx = ctx.next; - } while ((ctx.executionMask & mask) == 0); + } while (skipContext(ctx, currentExecutor, mask, MASK_ONLY_INBOUND)); return ctx; } private AbstractChannelHandlerContext findContextOutbound(int mask) { AbstractChannelHandlerContext ctx = this; + EventExecutor currentExecutor = executor(); do { ctx = ctx.prev; - } while ((ctx.executionMask & mask) == 0); + } while (skipContext(ctx, currentExecutor, mask, MASK_ONLY_OUTBOUND)); return ctx; } + private static boolean skipContext( + AbstractChannelHandlerContext ctx, EventExecutor currentExecutor, int mask, int onlyMask) { + // Ensure we correctly handle MASK_EXCEPTION_CAUGHT which is not included in the MASK_EXCEPTION_CAUGHT + return (ctx.executionMask & (onlyMask | mask)) == 0 || + // We can only skip if the EventExecutor is the same as otherwise we need to ensure we offload + // everything to preserve ordering. + // + // See https://github.com/netty/netty/issues/10067 + (ctx.executor() == currentExecutor && (ctx.executionMask & mask) == 0); + } + @Override public ChannelPromise voidPromise() { return channel().voidPromise(); diff --git a/transport/src/main/java/io/netty/channel/ChannelHandlerMask.java b/transport/src/main/java/io/netty/channel/ChannelHandlerMask.java index 526006a84e3..dad793cf38c 100644 --- a/transport/src/main/java/io/netty/channel/ChannelHandlerMask.java +++ b/transport/src/main/java/io/netty/channel/ChannelHandlerMask.java @@ -54,11 +54,13 @@ final class ChannelHandlerMask { static final int MASK_WRITE = 1 << 15; static final int MASK_FLUSH = 1 << 16; - private static final int MASK_ALL_INBOUND = MASK_EXCEPTION_CAUGHT | MASK_CHANNEL_REGISTERED | + static final int MASK_ONLY_INBOUND = MASK_CHANNEL_REGISTERED | MASK_CHANNEL_UNREGISTERED | MASK_CHANNEL_ACTIVE | MASK_CHANNEL_INACTIVE | MASK_CHANNEL_READ | MASK_CHANNEL_READ_COMPLETE | MASK_USER_EVENT_TRIGGERED | MASK_CHANNEL_WRITABILITY_CHANGED; - private static final int MASK_ALL_OUTBOUND = MASK_EXCEPTION_CAUGHT | MASK_BIND | MASK_CONNECT | MASK_DISCONNECT | + private static final int MASK_ALL_INBOUND = MASK_EXCEPTION_CAUGHT | MASK_ONLY_INBOUND; + static final int MASK_ONLY_OUTBOUND = MASK_BIND | MASK_CONNECT | MASK_DISCONNECT | MASK_CLOSE | MASK_DEREGISTER | MASK_READ | MASK_WRITE | MASK_FLUSH; + private static final int MASK_ALL_OUTBOUND = MASK_EXCEPTION_CAUGHT | MASK_ONLY_OUTBOUND; private static final FastThreadLocal<Map<Class<? extends ChannelHandler>, Integer>> MASKS = new FastThreadLocal<Map<Class<? extends ChannelHandler>, Integer>>() {
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java index d0676fd907a..bb43b3c9380 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java @@ -15,14 +15,32 @@ */ package io.netty.handler.codec.http; +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.EventLoopGroup; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.channel.local.LocalAddress; +import io.netty.channel.local.LocalChannel; +import io.netty.channel.local.LocalServerChannel; import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.EncoderException; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; +import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import org.junit.Test; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; @@ -259,6 +277,104 @@ public void testFullContent() throws Exception { assertThat(ch.readOutbound(), is(nullValue())); } + @Test + public void testExecutorPreserveOrdering() throws Exception { + final EventLoopGroup compressorGroup = new DefaultEventLoopGroup(1); + EventLoopGroup localGroup = new DefaultEventLoopGroup(1); + Channel server = null; + Channel client = null; + try { + ServerBootstrap bootstrap = new ServerBootstrap() + .channel(LocalServerChannel.class) + .group(localGroup) + .childHandler(new ChannelInitializer<LocalChannel>() { + @Override + protected void initChannel(LocalChannel ch) throws Exception { + ch.pipeline() + .addLast(new HttpServerCodec()) + .addLast(new HttpObjectAggregator(1024)) + .addLast(compressorGroup, new HttpContentCompressor()) + .addLast(new ChannelOutboundHandlerAdapter() { + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + throws Exception { + super.write(ctx, msg, promise); + } + }) + .addLast(new ChannelInboundHandlerAdapter() { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof FullHttpRequest) { + FullHttpResponse res = + new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hello, World", CharsetUtil.US_ASCII)); + ctx.writeAndFlush(res); + ReferenceCountUtil.release(msg); + return; + } + super.channelRead(ctx, msg); + } + }); + } + }); + + LocalAddress address = new LocalAddress(UUID.randomUUID().toString()); + server = bootstrap.bind(address).sync().channel(); + + final BlockingQueue<HttpObject> responses = new LinkedBlockingQueue<HttpObject>(); + + client = new Bootstrap() + .channel(LocalChannel.class) + .remoteAddress(address) + .group(localGroup) + .handler(new ChannelInitializer<LocalChannel>() { + @Override + protected void initChannel(LocalChannel ch) throws Exception { + ch.pipeline().addLast(new HttpClientCodec()).addLast(new ChannelInboundHandlerAdapter() { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof HttpObject) { + responses.put((HttpObject) msg); + return; + } + super.channelRead(ctx, msg); + } + }); + } + }).connect().sync().channel(); + + client.writeAndFlush(newRequest()).sync(); + + assertEncodedResponse((HttpResponse) responses.poll(1, TimeUnit.SECONDS)); + HttpContent c = (HttpContent) responses.poll(1, TimeUnit.SECONDS); + assertNotNull(c); + assertThat(ByteBufUtil.hexDump(c.content()), + is("1f8b0800000000000000f248cdc9c9d75108cf2fca4901000000ffff")); + c.release(); + + c = (HttpContent) responses.poll(1, TimeUnit.SECONDS); + assertNotNull(c); + assertThat(ByteBufUtil.hexDump(c.content()), is("0300c6865b260c000000")); + c.release(); + + LastHttpContent last = (LastHttpContent) responses.poll(1, TimeUnit.SECONDS); + assertNotNull(last); + assertThat(last.content().readableBytes(), is(0)); + last.release(); + + assertNull(responses.poll(1, TimeUnit.SECONDS)); + } finally { + if (client != null) { + client.close().sync(); + } + if (server != null) { + server.close().sync(); + } + compressorGroup.shutdownGracefully(); + localGroup.shutdownGracefully(); + } + } + /** * If the length of the content is unknown, {@link HttpContentEncoder} should not skip encoding the content * even if the actual length is turned out to be 0. @@ -543,7 +659,10 @@ private static void assertEncodedResponse(EmbeddedChannel ch) { Object o = ch.readOutbound(); assertThat(o, is(instanceOf(HttpResponse.class))); - HttpResponse res = (HttpResponse) o; + assertEncodedResponse((HttpResponse) o); + } + + private static void assertEncodedResponse(HttpResponse res) { assertThat(res, is(not(instanceOf(HttpContent.class)))); assertThat(res.headers().get(HttpHeaderNames.TRANSFER_ENCODING), is("chunked")); assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue()));
val
test
"2020-03-02T08:38:45"
"2020-02-29T06:30:30Z"
atcurtis
val
netty/netty/10072_10075
netty/netty
netty/netty/10072
netty/netty/10075
[ "keyword_pr_to_issue" ]
1b0e3d95f43b710261695b1731522ed3b6d29e0d
7b946a781e9ed78d9283f7907aeb4b8471dc9a3a
[ "And I'm happy to work on a patch once I get a 👍 that someone else agrees this is a problem.", "@bryce-anderson yes this sounds like a bug.", "so yep please work on a fix @bryce-anderson :)" ]
[ "This feels like something that almost certainly exists somewhere else.", "This change isn't strictly necessary for this patch but I spent a fair amount of time trying to read through this for the nth time and decided to clean it up so the control flow (when the method returns etc, _**not**_ H2 flow control) is more readable. If desired, we can move it to a different patch.", "These are also not strictly necessary but part of the general cleanup I was attempting to do so this is more readable in the future.", "Just use bytebuf.writeZero(size); ?", "Would you prefer I replace the body of this method with\r\n`UnpooledByteBufAllocator.DEFAULT.buffer().writeZero(size)` or inline it in the best?", "I dont care... I was just mention it as you said there is most likely something like this already :)", "nit: private final static " ]
"2020-03-02T20:32:27Z"
[]
AbstractHttp2StreamChannel fails to flush window update under certain read patterns
### Expected behavior Window updates get flushed by `AbstractHttp2StreamChannel`. ### Actual behavior Under certain conditions that doesn't happen. ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) I have a test case that is really ugly and should probably be condensed but it illustrates the issue. I've annotated it with values observed with a debugger during it's execution. ```java static ByteBuf bb(int size) { ByteBuf buf = UnpooledByteBufAllocator.DEFAULT.buffer(); for (int i = 0; i < size; i++) { buf.writeByte('a'); } return buf; } class FlushSniffer extends ChannelOutboundHandlerAdapter { private boolean didFlush; public boolean checkFlush() { boolean r = didFlush; didFlush = false; return r; } @Override public void flush(ChannelHandlerContext ctx) throws Exception { didFlush = true; super.flush(ctx); } } @Test public void windowUpdatesAreFlushed() { LastInboundHandler inboundHandler = new LastInboundHandler(); FlushSniffer flushSniffer = new FlushSniffer(); parentChannel.pipeline().addFirst(flushSniffer); Http2StreamChannel childChannel = newInboundStream(3, false, inboundHandler); assertTrue(childChannel.config().isAutoRead()); childChannel.config().setAutoRead(false); assertFalse(childChannel.config().isAutoRead()); Http2HeadersFrame headersFrame = inboundHandler.readInbound(); assertNotNull(headersFrame); // From the headers. assertTrue(flushSniffer.checkFlush()); // childChannel.readState == IN_PROGRESS due to initial auto-read. frameInboundWriter.writeInboundData(childChannel.stream().id(), bb(1), 0, false); // readState == IDLE, flowControlledBytes = 1 frameInboundWriter.writeInboundData(childChannel.stream().id(), bb(1020), 0, false); // readState == IDLE, flowControlledBytes = 1, 1 message(1020) buffered. assertTrue(flushSniffer.checkFlush()); // This will trigger a read of the second message (1020 bytes). childChannel.read(); assertTrue(flushSniffer.checkFlush()); // fails. // This should have flushed the window update of 1 // flowControlledBytes = 1020 childChannel.read(); // triggered a flow control window updatte in DefaultHttp2LocalFlowCoontroller:465 // flowControlledBytes = 0 <- we wrote the window update but didn't flush! assertTrue(flushSniffer.checkFlush()); // fails: no flush was called. Http2DataFrame data = inboundHandler.blockingReadInbound(); release(data); } ``` There are a few problems that I see so far. * One is that on `beginRead()` we will write any flow bytes as an update frame and enter `doBeginRead()` but if there are no messages to be had we break out of the loop without flushing [here](https://github.com/netty/netty/blob/netty-4.1.43.Final/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java#L805-L810). * A second problem is that when we do write the window update we check if the promise is complete and if it is, we assume it's been flushed [here](https://github.com/netty/netty/blob/netty-4.1.43.Final/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java#L848-L853). However, the Http2FrameCodec intercepts these frames and always just sets the promise to done [here](https://github.com/netty/netty/blob/4.1/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java#L313). This may cause an explicit `flush()` call to be ineffective as the stream channel attempts to be clever about it and elid unnecessary flushes [here](https://github.com/netty/netty/blob/netty-4.1.43.Final/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java#L1034). ### Netty version branch 4.1, SHA e0d73bca4dd36910c7cdbe3d577c51aeb8287cc6 (Fri Feb 28)
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java" ]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MultiplexTest.java", "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2TestUtil.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java index 547d724f556..92abf19a246 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java @@ -558,10 +558,7 @@ void fireChildRead(Http2Frame frame) { // read (unknown, reset) and the trade off is less conditionals for the hot path (headers/data) at the // cost of additional readComplete notifications on the rare path. if (allocHandle.continueReading()) { - if (!readCompletePending) { - readCompletePending = true; - addChannelToReadCompletePendingQueue(); - } + maybeAddChannelToReadCompletePendingQueue(); } else { unsafe.notifyReadComplete(allocHandle, true); } @@ -807,6 +804,9 @@ void doBeginRead() { if (readEOS) { unsafe.closeForcibly(); } + // We need to double check that there is nothing left to flush such as a + // window update frame. + flush(); break; } final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle(); @@ -822,10 +822,7 @@ void doBeginRead() { // currently reading it is possible that more frames will be delivered to this child channel. In // the case that this child channel still wants to read we delay the channelReadComplete on this // child channel until the parent is done reading. - if (!readCompletePending) { - readCompletePending = true; - addChannelToReadCompletePendingQueue(); - } + maybeAddChannelToReadCompletePendingQueue(); } else { notifyReadComplete(allocHandle, true); } @@ -841,6 +838,10 @@ private void updateLocalWindowIfNeeded() { int bytes = flowControlledBytes; flowControlledBytes = 0; ChannelFuture future = write0(parentContext(), new DefaultHttp2WindowUpdateFrame(bytes).stream(stream)); + // window update frames are commonly swallowed by the Http2FrameCodec and the promise is synchronously + // completed but the flow controller _may_ have generated a wire level WINDOW_UPDATE. Therefore we need, + // to assume there was a write done that needs to be flushed or we risk flow control starvation. + writeDoneAndNoFlush = true; // Add a listener which will notify and teardown the stream // when a window update fails if needed or check the result of the future directly if it was completed // already. @@ -849,7 +850,6 @@ private void updateLocalWindowIfNeeded() { windowUpdateFrameWriteComplete(future, AbstractHttp2StreamChannel.this); } else { future.addListener(windowUpdateFrameWriteListener); - writeDoneAndNoFlush = true; } } } @@ -920,58 +920,57 @@ public void write(Object msg, final ChannelPromise promise) { try { if (msg instanceof Http2StreamFrame) { Http2StreamFrame frame = validateStreamFrame((Http2StreamFrame) msg).stream(stream()); - if (!firstFrameWritten && !isStreamIdValid(stream().id())) { - if (!(frame instanceof Http2HeadersFrame)) { - ReferenceCountUtil.release(frame); - promise.setFailure( - new IllegalArgumentException("The first frame must be a headers frame. Was: " - + frame.name())); - return; - } - firstFrameWritten = true; - ChannelFuture f = write0(parentContext(), frame); - if (f.isDone()) { - firstWriteComplete(f, promise); - } else { - final long bytes = FlowControlledFrameSizeEstimator.HANDLE_INSTANCE.size(msg); - incrementPendingOutboundBytes(bytes, false); - f.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) { - firstWriteComplete(future, promise); - decrementPendingOutboundBytes(bytes, false); - } - }); - writeDoneAndNoFlush = true; - } - return; - } + writeHttp2StreamFrame(frame, promise); } else { String msgStr = msg.toString(); ReferenceCountUtil.release(msg); promise.setFailure(new IllegalArgumentException( "Message must be an " + StringUtil.simpleClassName(Http2StreamFrame.class) + ": " + msgStr)); - return; } + } catch (Throwable t) { + promise.tryFailure(t); + } + } - ChannelFuture f = write0(parentContext(), msg); - if (f.isDone()) { - writeComplete(f, promise); + private void writeHttp2StreamFrame(Http2StreamFrame frame, final ChannelPromise promise) { + if (!firstFrameWritten && !isStreamIdValid(stream().id()) && !(frame instanceof Http2HeadersFrame)) { + ReferenceCountUtil.release(frame); + promise.setFailure( + new IllegalArgumentException("The first frame must be a headers frame. Was: " + + frame.name())); + return; + } + + final boolean firstWrite; + if (firstFrameWritten) { + firstWrite = false; + } else { + firstWrite = firstFrameWritten = true; + } + + ChannelFuture f = write0(parentContext(), frame); + if (f.isDone()) { + if (firstWrite) { + firstWriteComplete(f, promise); } else { - final long bytes = FlowControlledFrameSizeEstimator.HANDLE_INSTANCE.size(msg); - incrementPendingOutboundBytes(bytes, false); - f.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) { + writeComplete(f, promise); + } + } else { + final long bytes = FlowControlledFrameSizeEstimator.HANDLE_INSTANCE.size(frame); + incrementPendingOutboundBytes(bytes, false); + f.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) { + if (firstWrite) { + firstWriteComplete(future, promise); + } else { writeComplete(future, promise); - decrementPendingOutboundBytes(bytes, false); } - }); - writeDoneAndNoFlush = true; - } - } catch (Throwable t) { - promise.tryFailure(t); + decrementPendingOutboundBytes(bytes, false); + } + }); + writeDoneAndNoFlush = true; } } @@ -1084,6 +1083,13 @@ public ChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) { } } + private void maybeAddChannelToReadCompletePendingQueue() { + if (!readCompletePending) { + readCompletePending = true; + addChannelToReadCompletePendingQueue(); + } + } + protected void flush0(ChannelHandlerContext ctx) { ctx.flush(); }
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MultiplexTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MultiplexTest.java index 8dcc579797c..c1207f686e0 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MultiplexTest.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MultiplexTest.java @@ -22,6 +22,7 @@ import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.channel.WriteBufferWaterMark; import io.netty.channel.embedded.EmbeddedChannel; @@ -63,7 +64,6 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyShort; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.never; @@ -1152,6 +1152,66 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) { verifyFramesMultiplexedToCorrectChannel(childChannel, inboundHandler, 3); } + private static final class FlushSniffer extends ChannelOutboundHandlerAdapter { + + private boolean didFlush; + + public boolean checkFlush() { + boolean r = didFlush; + didFlush = false; + return r; + } + + @Override + public void flush(ChannelHandlerContext ctx) throws Exception { + didFlush = true; + super.flush(ctx); + } + } + + @Test + public void windowUpdatesAreFlushed() { + LastInboundHandler inboundHandler = new LastInboundHandler(); + FlushSniffer flushSniffer = new FlushSniffer(); + parentChannel.pipeline().addFirst(flushSniffer); + + Http2StreamChannel childChannel = newInboundStream(3, false, inboundHandler); + assertTrue(childChannel.config().isAutoRead()); + childChannel.config().setAutoRead(false); + assertFalse(childChannel.config().isAutoRead()); + + Http2HeadersFrame headersFrame = inboundHandler.readInbound(); + assertNotNull(headersFrame); + + assertTrue(flushSniffer.checkFlush()); + + // Write some bytes to get the channel into the idle state with buffered data and also verify we + // do not dispatch it until we receive a read() call. + frameInboundWriter.writeInboundData(childChannel.stream().id(), bb(16 * 1024), 0, false); + frameInboundWriter.writeInboundData(childChannel.stream().id(), bb(16 * 1024), 0, false); + assertTrue(flushSniffer.checkFlush()); + + verify(frameWriter, never()).writeWindowUpdate(eqCodecCtx(), anyInt(), anyInt(), anyChannelPromise()); + // only the first one was read because it was legacy auto-read behavior. + verifyFramesMultiplexedToCorrectChannel(childChannel, inboundHandler, 1); + assertFalse(flushSniffer.checkFlush()); + + // Trigger a read of the second frame. + childChannel.read(); + verifyFramesMultiplexedToCorrectChannel(childChannel, inboundHandler, 1); + // We expect a flush here because the StreamChannel will flush the smaller increment but the + // connection will collect the bytes and decide not to send a wire level frame until more are consumed. + assertTrue(flushSniffer.checkFlush()); + verify(frameWriter, never()).writeWindowUpdate(eqCodecCtx(), anyInt(), anyInt(), anyChannelPromise()); + + // Call read one more time which should trigger the writing of the flow control update. + childChannel.read(); + verify(frameWriter).writeWindowUpdate(eqCodecCtx(), eq(0), eq(32 * 1024), anyChannelPromise()); + verify(frameWriter).writeWindowUpdate( + eqCodecCtx(), eq(childChannel.stream().id()), eq(32 * 1024), anyChannelPromise()); + assertTrue(flushSniffer.checkFlush()); + } + private static void verifyFramesMultiplexedToCorrectChannel(Http2StreamChannel streamChannel, LastInboundHandler inboundHandler, int numFrames) { diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2TestUtil.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2TestUtil.java index 6fa3449709a..ff420bf6dad 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2TestUtil.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2TestUtil.java @@ -687,6 +687,10 @@ static ByteBuf bb(String s) { return ByteBufUtil.writeUtf8(UnpooledByteBufAllocator.DEFAULT, s); } + static ByteBuf bb(int size) { + return UnpooledByteBufAllocator.DEFAULT.buffer().writeZero(size); + } + static void assertEqualsAndRelease(Http2Frame expected, Http2Frame actual) { try { assertEquals(expected, actual);
test
test
"2020-03-03T14:34:51"
"2020-03-01T15:53:57Z"
bryce-anderson
val
netty/netty/10123_10128
netty/netty
netty/netty/10123
netty/netty/10128
[ "keyword_pr_to_issue" ]
99aca1879fcd69e4beecd100dcff5bec61eb2b84
913cae6fef1f708238918cbdf3b25db03c805604
[ "Hi! \r\nI agree that the approach using the instrinsic is to be preferred, but I'm curious about the impact: do we have benchmarks that measure the benefit of replacing it on `PoolThreadCache` and mimic a real case usage?", "Originally I noticed the log2 method in a async profiler cpu framegraph for an application, it is reported as 3% of Context.flush method and 0.4% of total.\r\nPrepared a test with Allocator: https://gist.github.com/netudima/7984e90458c40fde00e0b6fec6d192f8\r\n\r\nLoop based:\r\n```\r\nBenchmark Mode Cnt Score Error Units\r\nNettyAllocatorBenchmark.getAndRelease avgt 15 101.912 ± 0.489 ns/op\r\n```\r\n\r\nnumberOfLeadingZeros based:\r\n```\r\nBenchmark Mode Cnt Score Error Units\r\nNettyAllocatorBenchmark.getAndRelease avgt 15 99.826 ± 1.293 ns/op\r\n```\r\nnumberOfLeadingZeros is a bit faster. \r\nThe difference is not so large because io.netty.buffer.PoolThreadCache#cacheForNormal has \r\n```\r\nnormCapacity >> numShiftsNormalDirect\r\n``` \r\nlogic and due to this shift the input values for the log2 function are usually small (numShiftsNormalDirect by default is 13).\r\nIn case of small values the difference between the log2 options is not so large, for example if value = 2:\r\n```\r\nBenchmark Mode Cnt Score Error Units\r\nNettyLog2Benchmark.bits avgt 15 2.687 ± 0.010 ns/op\r\nNettyLog2Benchmark.cycle avgt 15 2.705 ± 0.024 ns/op\r\nNettyLog2Benchmark.leadingZeroes avgt 15 1.995 ± 0.023 ns/op\r\n```\r\nleadingZeroes is still faster and does not depend on a value, at the same time the loop is not much slower due to the small number of iterations.", "I suggest to minic some real usage of it by changing https://gist.github.com/netudima/7984e90458c40fde00e0b6fec6d192f8#file-nettyallocatorbenchmark-java-L53 into:\r\n```java\r\n\r\n @Param({\"0\", \"5\", \"10\", \"100\"})\r\n public long dutyTokens;\r\n\r\n @Benchmark\r\n public void getAndRelease(MyState state) {\r\n ByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;\r\n ByteBuf buf = alloc.directBuffer(state.value);\r\n final long tokens = dutyTokens;\r\n if (tokens > 0)\r\n BlackHole::consumeCPU(tokens);\r\n buf.release();\r\n }\r\n```\r\nNot super good, would be even better to use a `final static long DUTY_TOKENS` to be set with a sys property, but it's ok anyway. Try this with different values of dutyTokens and let me know if it makes a big difference.\r\n\r\nTip: beware considering `async-profiler` output for small inlineable methods as the source of truth (see http://psy-lob-saw.blogspot.com/2018/07/how-inlined-code-confusing-profiles.html) prefer reading `perfasm` output instead to be sure of what's going on (skid permitting)!", "```\r\n final long tokens = dutyTokens;\r\n if (tokens > 0)\r\n BlackHole::consumerCPU(tokens);\r\n buf.release();\r\n```\r\n\r\n@franz1981 it actually could be simplified to:\r\n\r\n`return buf.release();` with replacing the `state.value` to `@param`.\r\nBut anyway, micro optimization seems nice.", "@doom369 yep, my point was more related to validate the improvement against some more realistic behaviour ie with some cpu activity between each alloc-release.\r\nLooping n times summing bytes read from the buffer would have been ok too, but is more prone to cache misses depending on the arena configuration, size of caches etc etc: consumeCPU tends to be more deterministic if tokens are not too few...", "Anyway, feel free to send the PR \"regardless\" the result of the bench (hopefully positive): it's something that makes sense to change :+1: ", "Thank you for the feedback and the tip.\r\nThe results of the bench: https://gist.github.com/netudima/787176639eac62ae8e4ab8430f95e883\r\n\r\nLoop:\r\n```\r\nBenchmark (dutyTokens) Mode Cnt Score Error Units\r\nNettyAllocatorBenchmark.getAndRelease 0 avgt 15 104.455 ± 1.604 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 5 avgt 15 107.287 ± 1.437 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 10 avgt 15 113.684 ± 4.852 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 100 avgt 15 258.181 ± 2.718 ns/op\r\n```\r\n\r\nLeadingZeroes:\r\n```\r\nBenchmark (dutyTokens) Mode Cnt Score Error Units\r\nNettyAllocatorBenchmark.getAndRelease 0 avgt 15 97.994 ± 0.797 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 5 avgt 15 102.724 ± 1.672 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 10 avgt 15 106.537 ± 1.813 ns/op\r\nNettyAllocatorBenchmark.getAndRelease 100 avgt 15 255.368 ± 2.089 ns/op\r\n```\r\nI will send a PR soon.", "PR: https://github.com/netty/netty/pull/10128" ]
[]
"2020-03-22T21:06:58Z"
[]
Optimize log2 in PoolThreadCache
Optimize log2 in PoolThreadCache Currently io.netty.buffer.PoolThreadCache uses log2 implementation based on a shift loop. It is not a most efficient way: https://gist.github.com/netudima/e1e8206bf58ff0ab6e2d637a4362bfc2 ``` NettyLog2Benchmark.bits avgt 25 2.612 ± 0.009 ns/op NettyLog2Benchmark.cycle avgt 25 10.909 ± 0.157 ns/op NettyLog2Benchmark.leadingZeroes avgt 25 1.921 ± 0.006 ns/op # JMH version: 1.21 # VM version: JDK 11.0.5, OpenJDK 64-Bit Server VM, 11.0.5+10-b520.38 # CPU: 2,6 GHz 6-Core Intel Core i7 ``` It looks like for the current versions of JDK using Integer.numberOfLeadingZeros (intrinsic) is an optimal approach. Note: PoolChunk also has log2 but it is already implemented based on Integer.numberOfLeadingZeros. ## Netty version 4.1.48, 4.0.56 ### JVM version (e.g. `java -version`) JDK 11.0.5, OpenJDK 64-Bit Server VM, 11.0.5+10-b520.38 ### OS version (e.g. `uname -a`) Darwin MBP 19.3.0 Darwin Kernel Version 19.3.0: Thu Jan 9 20:58:23 PST 2020; root:xnu-6153.81.5~1/RELEASE_X86_64 x86_64
[ "buffer/src/main/java/io/netty/buffer/PoolThreadCache.java" ]
[ "buffer/src/main/java/io/netty/buffer/PoolThreadCache.java" ]
[]
diff --git a/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java b/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java index c1bf7e6d074..92e46a0f02d 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java +++ b/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java @@ -42,6 +42,7 @@ final class PoolThreadCache { private static final InternalLogger logger = InternalLoggerFactory.getInstance(PoolThreadCache.class); + private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1; final PoolArena<byte[]> heapArena; final PoolArena<ByteBuffer> directArena; @@ -151,13 +152,9 @@ private static <T> MemoryRegionCache<T>[] createNormalCaches( } } + // val > 0 private static int log2(int val) { - int res = 0; - while (val > 1) { - val >>= 1; - res++; - } - return res; + return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val); } /** @@ -323,6 +320,7 @@ private MemoryRegionCache<?> cacheForSmall(PoolArena<?> area, int normCapacity) private MemoryRegionCache<?> cacheForNormal(PoolArena<?> area, int normCapacity) { if (area.isDirect()) { + // sizeClass == Normal => normCapacity >= pageSize => the shifted value > 0 int idx = log2(normCapacity >> numShiftsNormalDirect); return cache(normalDirectCaches, idx); }
null
val
test
"2020-03-19T12:47:46"
"2020-03-21T08:48:34Z"
netudima
val
netty/netty/10164_10175
netty/netty
netty/netty/10164
netty/netty/10175
[ "keyword_pr_to_issue" ]
b996bf2151fe99b5fc528b98ac5ea65681380e2f
37948bc9debb3646150365e21da5d38342bbb3c5
[ "@jrhee17 sure this sounds like something useful. Please provide a PR and I will review it :)", "@jrhee17 please let me know once you have a PR ...", "Apologies for the delay -- I was drafting a POC on the client-side library to get a better idea on how this encoder will be used. The PR has been opened 🍀 " ]
[ "shouldn't `address.length() > 108` be good enough ?", "As this one is `@Sharable` should we add a private constructor, mark it final and expose a static instance that the user can use ?", "why we dont write stuff directly to the `ByteBuf` ? We can use `out.writeCharSequence(...)`", "consider using a `switch` as replacement for all the `if ... else if ... else`.", "Can we make this `216` an static field and add comments on what this is about ? ", "again consider using `out.writeCharSequence(...)`", "same comment as above. ", "please make this `108` an static field and add comments what this is about. ", "closing using index based for loop to eliminate GC pressure ", "please add javadocs ", "add empty line above ", "Just use normal string concats... ", "please add javadocs and consider removing final to be more consistent ", "same comment as above... consider just using normal string concat ", "you should call `msg.retain()` as `SimpleChannelInboundHandler` will release the msg after `channelRead0` returns ", "call `readMessage.release()` in a finally block ", "add `sync()`", "also close the channels and shutdown the group in a finally block ", "call `byteBuf.release();` and also add `assertFalse(ch.finish());`", "call `byteBuf.release();` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "call `byteBuf.release()` and also add `assertFalse(ch.finish());`", "I think we have a couple of options:\r\n1) Pad the string to 108 length, and use `writeCharSequence `\r\n -> I'm not sure the extra string allocation will be worth it. I don't think readability will improve either, since we still need to 1) get the size of the address in bytes 2) allocate a new 108 byte length string 3) and write to `ByteBuffer`.\r\n2) Restrict user input to accept only 108 byte addresses\r\n -> usability 😢 \r\n3) Keep AS-IS\r\n\r\nI prefer 3, but might be willing to change to 1) if you wish", "ditto", "I've added an `ObjectUtil.checkNotNull` check to make my intention clear\r\n(unless you meant we should allow null addresses... let me know if this was your intention 🙏 )", "I wonder why even need the `StringBuilder` at all. Cant you just directly write all of this to the `ByteBuf` step-by-step ?... ", "why you can't just use `writeCharSequence(...)` here ? It seems there is no need to convert to `byte[]` first. Same comment is true for other places.", "also wouldn't it be better to use `out.writeZero(...)` ? There is no need for this extra allocation.", "consider using `SimpleChannelInboundHandler` and just consume the message in `channelRead0`. This way it is correctly released without hitting the tail of the pipeline ", "imho it should just be `write(...)....`", "this throw makes no sense as it will be not propagated at all.", "I'm still looking for a more efficient way to do this.\r\nThe reason I can't just switch to `writeCharSequence` because I need to know the number of bytes to pad to length 108.\r\nhttps://github.com/netty/netty/blob/b86a6c2e49c37b4872f075debddc06a9adcc4670/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java#L93\r\n\r\nAny ideas are welcome", "`writeCharSequence` will return the number of bytes written. So You can use this, or I am missing something ?", "yea... I don't read javadocs carefully 🤦 ", "you would be better of to write bytes for the know things as this will be cheaper then doing a conversation:\r\n\r\n```\r\nout.writeByte((byte) ' ');\r\n```\r\n\r\nSame comment for others.", "can you add a comment what the `4` is about ?", "nit: not really your PRs fault but either the if statement is incorrect or the exception message in terms of allowed port range.", "nit: you could move the for loop into this block as there is no need to do the loop when its empty anyway ", "nit: consider replacing by a switch statement. ", "nit: remove final as we usually not it for params ", "nit: remove final as we usually not it for params ", "nit you could also simplify this as:\r\n\r\n```\r\npublic void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {\r\n ChannelFuture future = ctx.write(msg, promise);\r\n if (msg instanceof HAProxyMessage) {\r\n future.addListener(...)\r\n }\r\n}\r\n```", "I would remove this... the user will know that it failed because we fail the promise ", "@jrhee17 one question... isn't `TEXT_PREFIX` a better name ?", "huh... binary <-> text. updated.", "Should this catch RuntimeException?", "I think at least we should also catch `IndexOutOfBoundsException`" ]
"2020-04-08T14:08:31Z"
[]
Consider adding support for HAProxyMessageEncoder
**Subject** Consider adding support for `HAProxyMessageEncoder` **Motivation** I am working on a [project](https://github.com/line/armeria) which offers server-side PROXY protocol support (using netty `HaProxyMessageDecoder` 💯 ) All works fine, but we have some limitations: 1) Sometimes, clients may want to pass an arbitrary source, destination address directly without using a haproxy server. client -> server (no haproxy server in the middle) 2) More critically, users may want to preserve the original source, dest address in a multi-layer proxy architecture. Currently, our servers have no way of relaying this information to the next layer without an encoder. client -> server1 -> server2 -> server3 ----- I have a PR cooking at the moment, but I would like to confirm if this is something you are willing to review/accept before proceeding further. Any comments/feedback are welcome POC: https://github.com/jrhee17/netty/commit/0dfa47b6adb263c2b46fef6ad3ec04ef1878e948
[ "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java", "example/pom.xml" ]
[ "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java", "codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java", "example/pom.xml", "example/src/main/java/io/netty/example/haproxy/HAProxyClient.java", "example/src/main/java/io/netty/example/haproxy/HAProxyHandler.java", "example/src/main/java/io/netty/example/haproxy/HAProxyServer.java" ]
[ "codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java", "codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java" ]
diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java index c2a4e22c723..4a79b172d39 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java @@ -56,5 +56,31 @@ final class HAProxyConstants { static final byte TPAF_UNIX_STREAM_BYTE = 0x31; static final byte TPAF_UNIX_DGRAM_BYTE = 0x32; + /** + * V2 protocol binary header prefix + */ + static final byte[] BINARY_PREFIX = { + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x00, + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x51, + (byte) 0x55, + (byte) 0x49, + (byte) 0x54, + (byte) 0x0A + }; + + static final byte[] TEXT_PREFIX = { + (byte) 'P', + (byte) 'R', + (byte) 'O', + (byte) 'X', + (byte) 'Y', + }; + private HAProxyConstants() { } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java index 1777e672574..e501a6950c8 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java @@ -25,6 +25,7 @@ import io.netty.util.ResourceLeakDetectorFactory; import io.netty.util.ResourceLeakTracker; import io.netty.util.internal.ObjectUtil; +import io.netty.util.internal.StringUtil; import java.util.ArrayList; import java.util.Collections; @@ -59,9 +60,16 @@ private HAProxyMessage( } /** - * Creates a new instance + * Creates a new instance of HAProxyMessage. + * @param protocolVersion the protocol version. + * @param command the command. + * @param proxiedProtocol the protocol containing the address family and transport protocol. + * @param sourceAddress the source address. + * @param destinationAddress the destination address. + * @param sourcePort the source port. This value must be 0 for unix, unspec addresses. + * @param destinationPort the destination port. This value must be 0 for unix, unspec addresses. */ - private HAProxyMessage( + public HAProxyMessage( HAProxyProtocolVersion protocolVersion, HAProxyCommand command, HAProxyProxiedProtocol proxiedProtocol, String sourceAddress, String destinationAddress, int sourcePort, int destinationPort) { @@ -70,20 +78,30 @@ private HAProxyMessage( } /** - * Creates a new instance + * Creates a new instance of HAProxyMessage. + * @param protocolVersion the protocol version. + * @param command the command. + * @param proxiedProtocol the protocol containing the address family and transport protocol. + * @param sourceAddress the source address. + * @param destinationAddress the destination address. + * @param sourcePort the source port. This value must be 0 for unix, unspec addresses. + * @param destinationPort the destination port. This value must be 0 for unix, unspec addresses. + * @param tlvs the list of tlvs. */ - private HAProxyMessage( + public HAProxyMessage( HAProxyProtocolVersion protocolVersion, HAProxyCommand command, HAProxyProxiedProtocol proxiedProtocol, String sourceAddress, String destinationAddress, int sourcePort, int destinationPort, - List<HAProxyTLV> tlvs) { + List<? extends HAProxyTLV> tlvs) { + ObjectUtil.checkNotNull(protocolVersion, "protocolVersion"); ObjectUtil.checkNotNull(proxiedProtocol, "proxiedProtocol"); + ObjectUtil.checkNotNull(tlvs, "tlvs"); AddressFamily addrFamily = proxiedProtocol.addressFamily(); checkAddress(sourceAddress, addrFamily); checkAddress(destinationAddress, addrFamily); - checkPort(sourcePort); - checkPort(destinationPort); + checkPort(sourcePort, addrFamily); + checkPort(destinationPort, addrFamily); this.protocolVersion = protocolVersion; this.command = command; @@ -329,9 +347,13 @@ static HAProxyMessage decodeHeader(String header) { throw new HAProxyProtocolException("invalid TCP4/6 header: " + header + " (expected: 6 parts)"); } - return new HAProxyMessage( - HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, - protAndFam, parts[2], parts[3], parts[4], parts[5]); + try { + return new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, + protAndFam, parts[2], parts[3], parts[4], parts[5]); + } catch (RuntimeException e) { + throw new HAProxyProtocolException("invalid HAProxy message", e); + } } /** @@ -373,18 +395,18 @@ private static String ipBytesToString(ByteBuf header, int addressLen) { * * @param value the port * @return port as an integer - * @throws HAProxyProtocolException if port is not a valid integer + * @throws IllegalArgumentException if port is not a valid integer */ private static int portStringToInt(String value) { int port; try { port = Integer.parseInt(value); } catch (NumberFormatException e) { - throw new HAProxyProtocolException("invalid port: " + value, e); + throw new IllegalArgumentException("invalid port: " + value, e); } if (port <= 0 || port > 65535) { - throw new HAProxyProtocolException("invalid port: " + value + " (expected: 1 ~ 65535)"); + throw new IllegalArgumentException("invalid port: " + value + " (expected: 1 ~ 65535)"); } return port; @@ -395,7 +417,7 @@ private static int portStringToInt(String value) { * * @param address human-readable address * @param addrFamily the {@link AddressFamily} to check the address against - * @throws HAProxyProtocolException if the address is invalid + * @throws IllegalArgumentException if the address is invalid */ private static void checkAddress(String address, AddressFamily addrFamily) { ObjectUtil.checkNotNull(addrFamily, "addrFamily"); @@ -403,10 +425,14 @@ private static void checkAddress(String address, AddressFamily addrFamily) { switch (addrFamily) { case AF_UNSPEC: if (address != null) { - throw new HAProxyProtocolException("unable to validate an AF_UNSPEC address: " + address); + throw new IllegalArgumentException("unable to validate an AF_UNSPEC address: " + address); } return; case AF_UNIX: + ObjectUtil.checkNotNull(address, "address"); + if (address.getBytes(CharsetUtil.US_ASCII).length > 108) { + throw new IllegalArgumentException("invalid AF_UNIX address: " + address); + } return; } @@ -415,28 +441,41 @@ private static void checkAddress(String address, AddressFamily addrFamily) { switch (addrFamily) { case AF_IPv4: if (!NetUtil.isValidIpV4Address(address)) { - throw new HAProxyProtocolException("invalid IPv4 address: " + address); + throw new IllegalArgumentException("invalid IPv4 address: " + address); } break; case AF_IPv6: if (!NetUtil.isValidIpV6Address(address)) { - throw new HAProxyProtocolException("invalid IPv6 address: " + address); + throw new IllegalArgumentException("invalid IPv6 address: " + address); } break; default: - throw new Error(); + throw new IllegalArgumentException("unexpected addrFamily: " + addrFamily); } } /** - * Validate a UDP/TCP port + * Validate the port depending on the addrFamily. * * @param port the UDP/TCP port - * @throws HAProxyProtocolException if the port is out of range (0-65535 inclusive) + * @throws IllegalArgumentException if the port is out of range (0-65535 inclusive) */ - private static void checkPort(int port) { - if (port < 0 || port > 65535) { - throw new HAProxyProtocolException("invalid port: " + port + " (expected: 1 ~ 65535)"); + private static void checkPort(int port, AddressFamily addrFamily) { + switch (addrFamily) { + case AF_IPv6: + case AF_IPv4: + if (port < 0 || port > 65535) { + throw new IllegalArgumentException("invalid port: " + port + " (expected: 0 ~ 65535)"); + } + break; + case AF_UNIX: + case AF_UNSPEC: + if (port != 0) { + throw new IllegalArgumentException("port cannot be specified with addrFamily: " + addrFamily); + } + break; + default: + throw new IllegalArgumentException("unexpected addrFamily: " + addrFamily); } } @@ -498,6 +537,14 @@ public List<HAProxyTLV> tlvs() { return tlvs; } + int tlvNumBytes() { + int tlvNumBytes = 0; + for (int i = 0; i < tlvs.size(); i++) { + tlvNumBytes += tlvs.get(i).totalNumBytes(); + } + return tlvNumBytes; + } + @Override public HAProxyMessage touch() { tryRecord(); @@ -556,4 +603,26 @@ protected void deallocate() { } } } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(256) + .append(StringUtil.simpleClassName(this)) + .append("(protocolVersion: ").append(protocolVersion) + .append(", command: ").append(command) + .append(", proxiedProtocol: ").append(proxiedProtocol) + .append(", sourceAddress: ").append(sourceAddress) + .append(", destinationAddress: ").append(destinationAddress) + .append(", sourcePort: ").append(sourcePort) + .append(", destinationPort: ").append(destinationPort) + .append(", tlvs: ["); + if (!tlvs.isEmpty()) { + for (HAProxyTLV tlv: tlvs) { + sb.append(tlv).append(", "); + } + sb.setLength(sb.length() - 2); + } + sb.append("])"); + return sb.toString(); + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java index 87bea65a719..427fa4d3031 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java @@ -23,6 +23,8 @@ import java.util.List; +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; + /** * Decodes an HAProxy proxy protocol header * @@ -49,32 +51,6 @@ public class HAProxyMessageDecoder extends ByteToMessageDecoder { */ private static final int V2_MAX_TLV = 65535 - 216; - /** - * Binary header prefix - */ - private static final byte[] BINARY_PREFIX = { - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x00, - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x51, - (byte) 0x55, - (byte) 0x49, - (byte) 0x54, - (byte) 0x0A - }; - - private static final byte[] TEXT_PREFIX = { - (byte) 'P', - (byte) 'R', - (byte) 'O', - (byte) 'X', - (byte) 'Y', - }; - /** * Binary header prefix length */ diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java new file mode 100644 index 00000000000..ef36480073b --- /dev/null +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.haproxy; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToByteEncoder; +import io.netty.util.CharsetUtil; +import io.netty.util.NetUtil; + +import java.util.List; + +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; + +/** + * Encodes an HAProxy proxy protocol message + * + * @see <a href="http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt">Proxy Protocol Specification</a> + */ +@Sharable +public final class HAProxyMessageEncoder extends MessageToByteEncoder<HAProxyMessage> { + + private static final int V2_VERSION_BITMASK = 0x02 << 4; + + // Length for source/destination addresses for the UNIX family must be 108 bytes each. + static final int UNIX_ADDRESS_BYTES_LENGTH = 108; + static final int TOTAL_UNIX_ADDRESS_BYTES_LENGTH = UNIX_ADDRESS_BYTES_LENGTH * 2; + + public static final HAProxyMessageEncoder INSTANCE = new HAProxyMessageEncoder(); + + private HAProxyMessageEncoder() { + } + + @Override + protected void encode(ChannelHandlerContext ctx, HAProxyMessage msg, ByteBuf out) throws Exception { + switch (msg.protocolVersion()) { + case V1: + encodeV1(msg, out); + break; + case V2: + encodeV2(msg, out); + break; + default: + throw new HAProxyProtocolException("Unsupported version: " + msg.protocolVersion()); + } + } + + private static void encodeV1(HAProxyMessage msg, ByteBuf out) { + out.writeBytes(TEXT_PREFIX); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.proxiedProtocol().name(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.sourceAddress(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.destinationAddress(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(String.valueOf(msg.sourcePort()), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(String.valueOf(msg.destinationPort()), CharsetUtil.US_ASCII); + out.writeByte((byte) '\r'); + out.writeByte((byte) '\n'); + } + + private static void encodeV2(HAProxyMessage msg, ByteBuf out) { + out.writeBytes(BINARY_PREFIX); + out.writeByte(V2_VERSION_BITMASK | msg.command().byteValue()); + out.writeByte(msg.proxiedProtocol().byteValue()); + + switch (msg.proxiedProtocol().addressFamily()) { + case AF_IPv4: + case AF_IPv6: + byte[] srcAddrBytes = NetUtil.createByteArrayFromIpAddressString(msg.sourceAddress()); + byte[] dstAddrBytes = NetUtil.createByteArrayFromIpAddressString(msg.destinationAddress()); + // srcAddrLen + dstAddrLen + 4 (srcPort + dstPort) + numTlvBytes + out.writeShort(srcAddrBytes.length + dstAddrBytes.length + 4 + msg.tlvNumBytes()); + out.writeBytes(srcAddrBytes); + out.writeBytes(dstAddrBytes); + out.writeShort(msg.sourcePort()); + out.writeShort(msg.destinationPort()); + encodeTlvs(msg.tlvs(), out); + break; + case AF_UNIX: + out.writeShort(TOTAL_UNIX_ADDRESS_BYTES_LENGTH + msg.tlvNumBytes()); + int srcAddrBytesWritten = out.writeCharSequence(msg.sourceAddress(), CharsetUtil.US_ASCII); + out.writeZero(UNIX_ADDRESS_BYTES_LENGTH - srcAddrBytesWritten); + int dstAddrBytesWritten = out.writeCharSequence(msg.destinationAddress(), CharsetUtil.US_ASCII); + out.writeZero(UNIX_ADDRESS_BYTES_LENGTH - dstAddrBytesWritten); + encodeTlvs(msg.tlvs(), out); + break; + case AF_UNSPEC: + out.writeShort(0); + break; + default: + throw new HAProxyProtocolException("unexpected addrFamily"); + } + } + + private static void encodeTlv(HAProxyTLV haProxyTLV, ByteBuf out) { + if (haProxyTLV instanceof HAProxySSLTLV) { + HAProxySSLTLV ssltlv = (HAProxySSLTLV) haProxyTLV; + out.writeByte(haProxyTLV.typeByteValue()); + out.writeShort(ssltlv.contentNumBytes()); + out.writeByte(ssltlv.client()); + out.writeInt(ssltlv.verify()); + encodeTlvs(ssltlv.encapsulatedTLVs(), out); + } else { + out.writeByte(haProxyTLV.typeByteValue()); + ByteBuf value = haProxyTLV.content(); + int readableBytes = value.readableBytes(); + out.writeShort(readableBytes); + out.writeBytes(value.readSlice(readableBytes)); + } + } + + private static void encodeTlvs(List<HAProxyTLV> haProxyTLVs, ByteBuf out) { + for (int i = 0; i < haProxyTLVs.size(); i++) { + encodeTlv(haProxyTLVs.get(i), out); + } + } +} diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java index 5d3dc103676..36166b2a198 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java @@ -17,6 +17,8 @@ package io.netty.handler.codec.haproxy; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.internal.StringUtil; import java.util.Collections; import java.util.List; @@ -35,7 +37,19 @@ public final class HAProxySSLTLV extends HAProxyTLV { * Creates a new HAProxySSLTLV * * @param verify the verification result as defined in the specification for the pp2_tlv_ssl struct (see - * http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) + * http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) + * @param clientBitField the bitfield with client information + * @param tlvs the encapsulated {@link HAProxyTLV}s + */ + public HAProxySSLTLV(final int verify, final byte clientBitField, final List<HAProxyTLV> tlvs) { + this(verify, clientBitField, tlvs, Unpooled.EMPTY_BUFFER); + } + + /** + * Creates a new HAProxySSLTLV + * + * @param verify the verification result as defined in the specification for the pp2_tlv_ssl struct (see + * http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) * @param clientBitField the bitfield with client information * @param tlvs the encapsulated {@link HAProxyTLV}s * @param rawContent the raw TLV content @@ -69,6 +83,13 @@ public boolean isPP2ClientCertSess() { return (clientBitField & 0x4) != 0; } + /** + * Returns the client bit field + */ + public byte client() { + return clientBitField; + } + /** * Returns the verification result */ @@ -83,4 +104,22 @@ public List<HAProxyTLV> encapsulatedTLVs() { return tlvs; } + @Override + int contentNumBytes() { + int tlvNumBytes = 0; + for (int i = 0; i < tlvs.size(); i++) { + tlvNumBytes += tlvs.get(i).totalNumBytes(); + } + return 5 + tlvNumBytes; // clientBit(1) + verify(4) + tlvs + } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + + "(type: " + type() + + ", typeByteValue: " + typeByteValue() + + ", client: " + client() + + ", verify: " + verify() + + ", numEncapsulatedTlvs: " + tlvs.size() + ')'; + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java index 38d79a0ebfe..a8075be11b2 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java @@ -18,6 +18,7 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.DefaultByteBufHolder; +import io.netty.util.internal.StringUtil; import static io.netty.util.internal.ObjectUtil.*; @@ -32,6 +33,18 @@ public class HAProxyTLV extends DefaultByteBufHolder { private final Type type; private final byte typeByteValue; + /** + * The size of this tlv in bytes. + * @return the number of bytes. + */ + int totalNumBytes() { + return 3 + contentNumBytes(); // type(1) + length(2) + content + } + + int contentNumBytes() { + return content().readableBytes(); + } + /** * The registered types a TLV can have regarding the PROXY protocol 1.5 spec */ @@ -56,7 +69,7 @@ public enum Type { * * @return the {@link Type} of a TLV */ - public static Type typeForByteValue(final byte byteValue) { + public static Type typeForByteValue(byte byteValue) { switch (byteValue) { case 0x01: return PP2_TYPE_ALPN; @@ -74,6 +87,52 @@ public static Type typeForByteValue(final byte byteValue) { return OTHER; } } + + /** + * Returns the byte value for the {@link Type} as defined in the PROXY protocol 1.5 spec. + * + * @param type the {@link Type} + * + * @return the byte value of the {@link Type}. + */ + public static byte byteValueForType(Type type) { + switch (type) { + case PP2_TYPE_ALPN: + return 0x01; + case PP2_TYPE_AUTHORITY: + return 0x02; + case PP2_TYPE_SSL: + return 0x20; + case PP2_TYPE_SSL_VERSION: + return 0x21; + case PP2_TYPE_SSL_CN: + return 0x22; + case PP2_TYPE_NETNS: + return 0x30; + default: + throw new IllegalArgumentException("unknown type: " + type); + } + } + } + + /** + * Creates a new HAProxyTLV + * + * @param typeByteValue the byteValue of the TLV. This is especially important if non-standard TLVs are used + * @param content the raw content of the TLV + */ + public HAProxyTLV(byte typeByteValue, ByteBuf content) { + this(Type.typeForByteValue(typeByteValue), typeByteValue, content); + } + + /** + * Creates a new HAProxyTLV + * + * @param type the {@link Type} of the TLV + * @param content the raw content of the TLV + */ + public HAProxyTLV(Type type, ByteBuf content) { + this(type, Type.byteValueForType(type), content); } /** @@ -146,4 +205,12 @@ public HAProxyTLV touch(Object hint) { super.touch(hint); return this; } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + + "(type: " + type() + + ", typeByteValue: " + typeByteValue() + + ", content: " + contentToString() + ')'; + } } diff --git a/example/pom.xml b/example/pom.xml index bb3bb3a57be..07d7bca4d37 100644 --- a/example/pom.xml +++ b/example/pom.xml @@ -103,6 +103,11 @@ <artifactId>netty-codec-mqtt</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>netty-codec-haproxy</artifactId> + <version>${project.version}</version> + </dependency> <dependency> <groupId>com.google.protobuf</groupId> diff --git a/example/src/main/java/io/netty/example/haproxy/HAProxyClient.java b/example/src/main/java/io/netty/example/haproxy/HAProxyClient.java new file mode 100644 index 00000000000..1da402a5b21 --- /dev/null +++ b/example/src/main/java/io/netty/example/haproxy/HAProxyClient.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.example.haproxy; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.haproxy.HAProxyCommand; +import io.netty.handler.codec.haproxy.HAProxyMessage; +import io.netty.handler.codec.haproxy.HAProxyProtocolVersion; +import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol; +import io.netty.util.CharsetUtil; + +import static io.netty.example.haproxy.HAProxyServer.*; + +public final class HAProxyClient { + + private static final String HOST = System.getProperty("host", "127.0.0.1"); + + public static void main(String[] args) throws Exception { + EventLoopGroup group = new NioEventLoopGroup(); + try { + Bootstrap b = new Bootstrap(); + b.group(group) + .channel(NioSocketChannel.class) + .handler(new HAProxyHandler()); + + // Start the connection attempt. + Channel ch = b.connect(HOST, PORT).sync().channel(); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "127.0.0.1", "127.0.0.2", 8000, 9000); + + ch.writeAndFlush(message).sync(); + ch.writeAndFlush(Unpooled.copiedBuffer("Hello World!", CharsetUtil.US_ASCII)).sync(); + ch.writeAndFlush(Unpooled.copiedBuffer("Bye now!", CharsetUtil.US_ASCII)).sync(); + ch.close().sync(); + } finally { + group.shutdownGracefully(); + } + } +} diff --git a/example/src/main/java/io/netty/example/haproxy/HAProxyHandler.java b/example/src/main/java/io/netty/example/haproxy/HAProxyHandler.java new file mode 100644 index 00000000000..fa44334d405 --- /dev/null +++ b/example/src/main/java/io/netty/example/haproxy/HAProxyHandler.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.example.haproxy; + +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.haproxy.HAProxyMessage; +import io.netty.handler.codec.haproxy.HAProxyMessageEncoder; + +public class HAProxyHandler extends ChannelOutboundHandlerAdapter { + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + ctx.pipeline().addBefore(ctx.name(), null, HAProxyMessageEncoder.INSTANCE); + super.handlerAdded(ctx); + } + + @Override + public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + ChannelFuture future = ctx.write(msg, promise); + if (msg instanceof HAProxyMessage) { + future.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + if (future.isSuccess()) { + ctx.pipeline().remove(HAProxyMessageEncoder.INSTANCE); + ctx.pipeline().remove(HAProxyHandler.this); + } else { + ctx.close(); + } + } + }); + } + } +} diff --git a/example/src/main/java/io/netty/example/haproxy/HAProxyServer.java b/example/src/main/java/io/netty/example/haproxy/HAProxyServer.java new file mode 100644 index 00000000000..ab50288abff --- /dev/null +++ b/example/src/main/java/io/netty/example/haproxy/HAProxyServer.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.example.haproxy; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.haproxy.HAProxyMessage; +import io.netty.handler.codec.haproxy.HAProxyMessageDecoder; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; + +public final class HAProxyServer { + + static final int PORT = Integer.parseInt(System.getProperty("port", "8080")); + + public static void main(String[] args) throws Exception { + EventLoopGroup bossGroup = new NioEventLoopGroup(1); + EventLoopGroup workerGroup = new NioEventLoopGroup(); + try { + ServerBootstrap b = new ServerBootstrap(); + b.group(bossGroup, workerGroup) + .channel(NioServerSocketChannel.class) + .handler(new LoggingHandler(LogLevel.INFO)) + .childHandler(new HAProxyServerInitializer()); + b.bind(PORT).sync().channel().closeFuture().sync(); + } finally { + bossGroup.shutdownGracefully(); + workerGroup.shutdownGracefully(); + } + } + + static class HAProxyServerInitializer extends ChannelInitializer<SocketChannel> { + @Override + public void initChannel(SocketChannel ch) throws Exception { + ch.pipeline().addLast( + new LoggingHandler(LogLevel.DEBUG), + new HAProxyMessageDecoder(), + new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof HAProxyMessage) { + System.out.println("proxy message: " + msg); + } else if (msg instanceof ByteBuf) { + System.out.println("bytebuf message: " + ByteBufUtil.prettyHexDump((ByteBuf) msg)); + } + } + }); + } + } +}
diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java new file mode 100644 index 00000000000..8af8f45493e --- /dev/null +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.haproxy; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.local.LocalAddress; +import io.netty.channel.local.LocalChannel; +import io.netty.channel.local.LocalServerChannel; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.Assert.*; + +public class HAProxyIntegrationTest { + + @Test + public void testBasicCase() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference<HAProxyMessage> msgHolder = new AtomicReference<HAProxyMessage>(); + LocalAddress localAddress = new LocalAddress("HAProxyIntegrationTest"); + + EventLoopGroup group = new DefaultEventLoopGroup(); + ServerBootstrap sb = new ServerBootstrap(); + sb.channel(LocalServerChannel.class) + .group(group) + .childHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new HAProxyMessageDecoder()); + ch.pipeline().addLast(new SimpleChannelInboundHandler<HAProxyMessage>() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HAProxyMessage msg) throws Exception { + msgHolder.set(msg.retain()); + latch.countDown(); + } + }); + } + }); + Channel serverChannel = sb.bind(localAddress).sync().channel(); + + Bootstrap b = new Bootstrap(); + Channel clientChannel = b.channel(LocalChannel.class) + .handler(HAProxyMessageEncoder.INSTANCE) + .group(group) + .connect(localAddress).sync().channel(); + + try { + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + clientChannel.writeAndFlush(message).sync(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + HAProxyMessage readMessage = msgHolder.get(); + + assertEquals(message.protocolVersion(), readMessage.protocolVersion()); + assertEquals(message.command(), readMessage.command()); + assertEquals(message.proxiedProtocol(), readMessage.proxiedProtocol()); + assertEquals(message.sourceAddress(), readMessage.sourceAddress()); + assertEquals(message.destinationAddress(), readMessage.destinationAddress()); + assertEquals(message.sourcePort(), readMessage.sourcePort()); + assertEquals(message.destinationPort(), readMessage.destinationPort()); + + readMessage.release(); + } finally { + clientChannel.close().sync(); + serverChannel.close().sync(); + group.shutdownGracefully().sync(); + } + } +} diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java new file mode 100644 index 00000000000..09c89ab0544 --- /dev/null +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java @@ -0,0 +1,404 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.haproxy; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.haproxy.HAProxyTLV.Type; +import io.netty.util.ByteProcessor; +import io.netty.util.CharsetUtil; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; +import static io.netty.handler.codec.haproxy.HAProxyMessageEncoder.*; +import static org.junit.Assert.*; + +public class HaProxyMessageEncoderTest { + + private static final int V2_HEADER_BYTES_LENGTH = 16; + private static final int IPv4_ADDRESS_BYTES_LENGTH = 12; + private static final int IPv6_ADDRESS_BYTES_LENGTH = 36; + + @Test + public void testIPV4EncodeProxyV1() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals("PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n", + byteBuf.toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPV6EncodeProxyV1() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals("PROXY TCP6 2001:0db8:85a3:0000:0000:8a2e:0370:7334 1050:0:0:0:5:600:300c:326b 56324 443\r\n", + byteBuf.toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPv4EncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x01, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // source address length + int sourceAddrLength = byteBuf.getUnsignedShort(14); + assertEquals(12, sourceAddrLength); + + // source address + byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 4); + assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x01 }, sourceAddr); + + // destination address + byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 20, 4); + assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x0b }, destAddr); + + // source port + int sourcePort = byteBuf.getUnsignedShort(24); + assertEquals(56324, sourcePort); + + // destination port + int destPort = byteBuf.getUnsignedShort(26); + assertEquals(443, destPort); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPv6EncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x02, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // source address length + int sourceAddrLength = byteBuf.getUnsignedShort(14); + assertEquals(IPv6_ADDRESS_BYTES_LENGTH, sourceAddrLength); + + // source address + byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 16); + assertArrayEquals(new byte[] { + (byte) 0x20, (byte) 0x01, 0x0d, (byte) 0xb8, + (byte) 0x85, (byte) 0xa3, 0x00, 0x00, 0x00, 0x00, (byte) 0x8a, 0x2e, + 0x03, 0x70, 0x73, 0x34 + }, sourceAddr); + + // destination address + byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 32, 16); + assertArrayEquals(new byte[] { + (byte) 0x10, (byte) 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x06, 0x00, 0x30, 0x0c, 0x32, 0x6b + }, destAddr); + + // source port + int sourcePort = byteBuf.getUnsignedShort(48); + assertEquals(56324, sourcePort); + + // destination port + int destPort = byteBuf.getUnsignedShort(50); + assertEquals(443, destPort); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testUnixEncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "/var/run/src.sock", "/var/run/dst.sock", 0, 0); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x03, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // address length + int addrLength = byteBuf.getUnsignedShort(14); + assertEquals(TOTAL_UNIX_ADDRESS_BYTES_LENGTH, addrLength); + + // source address + int srcAddrEnd = byteBuf.forEachByte(16, 108, ByteProcessor.FIND_NUL); + assertEquals("/var/run/src.sock", + byteBuf.slice(16, srcAddrEnd - 16).toString(CharsetUtil.US_ASCII)); + + // destination address + int dstAddrEnd = byteBuf.forEachByte(124, 108, ByteProcessor.FIND_NUL); + assertEquals("/var/run/dst.sock", + byteBuf.slice(124, dstAddrEnd - 124).toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testTLVEncodeProxy() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + List<HAProxyTLV> tlvs = new ArrayList<HAProxyTLV>(); + + ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII); + HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy()); + tlvs.add(alpnTlv); + + ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII); + HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy()); + tlvs.add(authorityTlv); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443, tlvs); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // length + assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH); + + // skip to tlv section + ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH); + + // alpn tlv + assertEquals(alpnTlv.typeByteValue(), tlv.readByte()); + short bufLength = tlv.readShort(); + assertEquals(helloWorld.array().length, bufLength); + assertEquals(helloWorld, tlv.readBytes(bufLength)); + + // authority tlv + assertEquals(authorityTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(arbitrary.array().length, bufLength); + assertEquals(arbitrary, tlv.readBytes(bufLength)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testSslTLVEncodeProxy() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + List<HAProxyTLV> tlvs = new ArrayList<HAProxyTLV>(); + + ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII); + HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy()); + tlvs.add(alpnTlv); + + ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII); + HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy()); + tlvs.add(authorityTlv); + + ByteBuf sslContent = Unpooled.copiedBuffer("some ssl content", CharsetUtil.US_ASCII); + HAProxySSLTLV haProxySSLTLV = new HAProxySSLTLV(1, (byte) 0x01, tlvs, sslContent.copy()); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443, + Collections.<HAProxyTLV>singletonList(haProxySSLTLV)); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH); + ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH); + + // ssl tlv type + assertEquals(haProxySSLTLV.typeByteValue(), tlv.readByte()); + + // length + int bufLength = tlv.readUnsignedShort(); + assertEquals(bufLength, tlv.readableBytes()); + + // client, verify + assertEquals(0x01, byteBuf.readByte()); + assertEquals(1, byteBuf.readInt()); + + // alpn tlv + assertEquals(alpnTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(helloWorld.array().length, bufLength); + assertEquals(helloWorld, tlv.readBytes(bufLength)); + + // authority tlv + assertEquals(authorityTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(arbitrary.array().length, bufLength); + assertEquals(arbitrary, tlv.readBytes(bufLength)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testEncodeLocalProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.LOCAL, HAProxyProxiedProtocol.UNKNOWN, + null, null, 0, 0); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = new byte[12]; + byteBuf.readBytes(headerBytes); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.readByte(); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x00, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.readByte(); + assertEquals(0x00, transportByte); + + // source address length + int sourceAddrLength = byteBuf.readUnsignedShort(); + assertEquals(0, sourceAddrLength); + + assertFalse(byteBuf.isReadable()); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidIpV4Address() { + String invalidIpv4Address = "192.168.0.1234"; + new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + invalidIpv4Address, "192.168.0.11", 56324, 443); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidIpV6Address() { + String invalidIpv6Address = "2001:0db8:85a3:0000:0000:8a2e:0370:73345"; + new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + invalidIpv6Address, "1050:0:0:0:5:600:300c:326b", 56324, 443); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidUnixAddress() { + String invalidUnixAddress = new String(new byte[UNIX_ADDRESS_BYTES_LENGTH + 1]); + new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + invalidUnixAddress, "/var/run/dst.sock", 0, 0); + } + + @Test(expected = NullPointerException.class) + public void testNullUnixAddress() { + new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + null, null, 0, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testLongUnixAddress() { + String longUnixAddress = new String(new char[109]).replace("\0", "a"); + new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "source", longUnixAddress, 0, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidUnixPort() { + new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "/var/run/src.sock", "/var/run/dst.sock", 80, 443); + } +}
train
test
"2020-04-14T14:23:45"
"2020-04-03T18:34:01Z"
jrhee17
val
netty/netty/10181_10182
netty/netty
netty/netty/10181
netty/netty/10182
[ "connected" ]
2b14775446f3f3937ddd6eb6bd7b22686810908f
79ef0c4706b64bd0b6c3ce24516beb587a0c5f4a
[ "I think @ejona86 @nmittler did mention that Conscrypt only works on Java8 in general. This may be special for Android tho.. So yep I would be happy to review and merge a PR which special case Android @gnarea ", "I think we've been able to run Conscrypt with Java 7. I see Conscrypt itself is building with Java 7 bytecode for OpenJDK. It might have been Java 6 that was the problem child, since that was before SSLParameters.setEndpointIdentificationAlgorithm().\r\n\r\nJava version checks on Android tend to be \"wrong,\" since Android is simultaneously Java 6, 7, 8, 9, etc depending on which APIs you care about.", "Conscrypt [claims Java 7 support](https://github.com/google/conscrypt#download). Only recent changes were to drop Java 6. Java 6 support [was added in 2017](https://github.com/google/conscrypt/commit/401f1c4645f5ec715516ee9674c662448d115e66).\r\n\r\nIt looks like [this is the source](https://github.com/netty/netty/commit/c4832cd9d989788f32dc1321d3e4c72bffa3587a) of the java 8 check, which is before the Java 6 support in Conscrypt that I linked to.", "Thanks for looking into this @normanmaurer and @ejona86! @sdsantos has put a PR together in #10182.", "@gnarea Could you please share a code snippet on how you got TLS to work on a gRPC server using Netty? I would very much appreciate as I'm currently struggling to find the correct approach.\r\n\r\nI'm currently trying to use the `JdkSslContext`.", "Hi @jnorkus! Check this out: https://github.com/relaycorp/relaynet-courier-android/blob/d686bb2768470fa139fa64ef3bb9697519160cee/app/src/main/java/tech/relaycorp/cogrpc/server/CogRPCServer.kt#L52", "Could you share the code for your Netty-based server on Android? Thank you!" ]
[]
"2020-04-10T17:09:27Z"
[]
Workaround to use Netty 4.1 with TLS on Android
We need to run a gRPC server on Android so we're planning to use Netty for that. Everything seems to work, but we had to change the following: https://github.com/netty/netty/blob/2b14775446f3f3937ddd6eb6bd7b22686810908f/handler/src/main/java/io/netty/handler/ssl/Conscrypt.java#L59 Because `PlatformDependent.javaVersion() >= 8` won't be true on Android. Like I've said, things seem to work based on the testing we've done so far, but I wanted to check a couple of things: - Why `PlatformDependent.javaVersion() >= 8`? What could break if it's `6`? That way we can check those things on Android. - Would you be open to a PR that changed that expression with something that worked on Android? Like `CAN_INSTANCE_PROVIDER && IS_CONSCRYPT_SSLENGINE != null && (PlatformDependent.isAndroid() || PlatformDependent.javaVersion() >= 8)`
[ "handler/src/main/java/io/netty/handler/ssl/Conscrypt.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/Conscrypt.java" ]
[]
diff --git a/handler/src/main/java/io/netty/handler/ssl/Conscrypt.java b/handler/src/main/java/io/netty/handler/ssl/Conscrypt.java index 9c9e2bba62c..c4c9182c41a 100644 --- a/handler/src/main/java/io/netty/handler/ssl/Conscrypt.java +++ b/handler/src/main/java/io/netty/handler/ssl/Conscrypt.java @@ -56,7 +56,8 @@ private static boolean canInstanceProvider() { * Indicates whether or not conscrypt is available on the current system. */ static boolean isAvailable() { - return CAN_INSTANCE_PROVIDER && IS_CONSCRYPT_SSLENGINE != null && PlatformDependent.javaVersion() >= 8; + return CAN_INSTANCE_PROVIDER && IS_CONSCRYPT_SSLENGINE != null && + (PlatformDependent.javaVersion() >= 8 || PlatformDependent.isAndroid()); } static boolean isEngineSupported(SSLEngine engine) {
null
train
test
"2020-04-08T12:04:48"
"2020-04-10T15:28:01Z"
gnarea
val
netty/netty/10202_10207
netty/netty
netty/netty/10202
netty/netty/10207
[ "keyword_pr_to_issue" ]
5fa5ce34e1580932c54cde56dd51c207288edde3
4f72cdf2333f682410e23e1eb5d77bc66b186cf4
[ "@fabienrenaud we love contributions :) So if you are interested maybe you want to provide a PR", "PR: https://github.com/netty/netty/pull/10207" ]
[ "~let's not change the default values used here.~", "2020", "ah never mind... this is because we use the one configured by the system in `DnsNameResolver` then ", "@fabienrenaud I wonder if we should better do this as part of the builder ?", "Doable. \r\nBut I'd need to keep this `ndots >= 0 ? ndots : DEFAULT_NDOTS` (with DEFAULT_NDOTS loaded from resolv.conf) just in case some users invoke the deprecated constructors with `ndots=-1` and expect the value to be loaded from file automatically.\r\nLet me know which way you prefer.", "why not use `int` ? ", "why not use `int` ? ", "why not use `int` ? ", "why not use `int` ? ", "`null` values are used for error control when parsing from strings failed for example (and not override defaults).\r\nBut I see you don't like it (I'm not fond of it either), so I moved catching `NumberFormatException` from `parseResIntOption` to `parseResOption`.", "never mind... lets keep it as you did.", "nit: make all of the methods package-private as the class is package-private as well. This also ensures we no expose these by mistake at some point. ", "add a comment why its ok to ignore. ", "done", "done" ]
"2020-04-24T05:41:01Z"
[]
DNS Resolver - Honor etc/resolv.conf options timeout, rotate and attempts
[/etc/resolv.conf](https://linux.die.net/man/5/resolv.conf) options support `timeout`, `rotate` and `attempts` but the [UnixResolverDnsServerAddressStreamProvider](https://github.com/netty/netty/blob/4.1/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java#L292) ignores these options. These should be parsed and used accordingly to their documented behavior.
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java" ]
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java", "resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverOptions.java" ]
[ "resolver-dns/src/test/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProviderTest.java" ]
diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index 67f85dcce6a..ca3668a6b7d 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -77,9 +77,9 @@ import java.util.Enumeration; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeUnit; import static io.netty.resolver.dns.DefaultDnsServerAddressStreamProvider.DNS_PORT; -import static io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.parseEtcResolverFirstNdots; import static io.netty.util.internal.ObjectUtil.checkNotNull; import static io.netty.util.internal.ObjectUtil.checkPositive; @@ -111,7 +111,7 @@ public class DnsNameResolver extends InetNameResolver { static final ResolvedAddressTypes DEFAULT_RESOLVE_ADDRESS_TYPES; static final String[] DEFAULT_SEARCH_DOMAINS; - private static final int DEFAULT_NDOTS; + private static final UnixResolverOptions DEFAULT_OPTIONS; static { if (NetUtil.isIpV4StackPreferred() || !anyInterfaceSupportsIpV6()) { @@ -141,13 +141,13 @@ public class DnsNameResolver extends InetNameResolver { } DEFAULT_SEARCH_DOMAINS = searchDomains; - int ndots; + UnixResolverOptions options; try { - ndots = parseEtcResolverFirstNdots(); + options = UnixResolverDnsServerAddressStreamProvider.parseEtcResolverOptions(); } catch (Exception ignore) { - ndots = UnixResolverDnsServerAddressStreamProvider.DEFAULT_NDOTS; + options = UnixResolverOptions.newBuilder().build(); } - DEFAULT_NDOTS = ndots; + DEFAULT_OPTIONS = options; } /** @@ -383,10 +383,12 @@ public DnsNameResolver( boolean decodeIdn, boolean completeOncePreferredResolved) { super(eventLoop); - this.queryTimeoutMillis = checkPositive(queryTimeoutMillis, "queryTimeoutMillis"); + this.queryTimeoutMillis = queryTimeoutMillis > 0 + ? queryTimeoutMillis + : TimeUnit.SECONDS.toMillis(DEFAULT_OPTIONS.timeout()); this.resolvedAddressTypes = resolvedAddressTypes != null ? resolvedAddressTypes : DEFAULT_RESOLVE_ADDRESS_TYPES; this.recursionDesired = recursionDesired; - this.maxQueriesPerResolve = checkPositive(maxQueriesPerResolve, "maxQueriesPerResolve"); + this.maxQueriesPerResolve = maxQueriesPerResolve > 0 ? maxQueriesPerResolve : DEFAULT_OPTIONS.attempts(); this.maxPayloadSize = checkPositive(maxPayloadSize, "maxPayloadSize"); this.optResourceEnabled = optResourceEnabled; this.hostsFileEntriesResolver = checkNotNull(hostsFileEntriesResolver, "hostsFileEntriesResolver"); @@ -401,7 +403,7 @@ public DnsNameResolver( dnsQueryLifecycleObserverFactory) : checkNotNull(dnsQueryLifecycleObserverFactory, "dnsQueryLifecycleObserverFactory"); this.searchDomains = searchDomains != null ? searchDomains.clone() : DEFAULT_SEARCH_DOMAINS; - this.ndots = ndots >= 0 ? ndots : DEFAULT_NDOTS; + this.ndots = ndots >= 0 ? ndots : DEFAULT_OPTIONS.ndots(); this.decodeIdn = decodeIdn; this.completeOncePreferredResolved = completeOncePreferredResolved; this.socketChannelFactory = socketChannelFactory; diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java index 4d3961221e4..332aaffe97a 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java @@ -29,7 +29,6 @@ import java.util.Arrays; import java.util.List; -import static io.netty.resolver.dns.DnsServerAddressStreamProviders.platformDefault; import static io.netty.util.internal.ObjectUtil.checkNotNull; import static io.netty.util.internal.ObjectUtil.intValue; @@ -46,16 +45,17 @@ public final class DnsNameResolverBuilder { private Integer minTtl; private Integer maxTtl; private Integer negativeTtl; - private long queryTimeoutMillis = 5000; + private long queryTimeoutMillis = -1; private ResolvedAddressTypes resolvedAddressTypes = DnsNameResolver.DEFAULT_RESOLVE_ADDRESS_TYPES; private boolean completeOncePreferredResolved; private boolean recursionDesired = true; - private int maxQueriesPerResolve = 16; + private int maxQueriesPerResolve = -1; private boolean traceEnabled; private int maxPayloadSize = 4096; private boolean optResourceEnabled = true; private HostsFileEntriesResolver hostsFileEntriesResolver = HostsFileEntriesResolver.DEFAULT; - private DnsServerAddressStreamProvider dnsServerAddressStreamProvider = platformDefault(); + private DnsServerAddressStreamProvider dnsServerAddressStreamProvider = + DnsServerAddressStreamProviders.platformDefault(); private DnsQueryLifecycleObserverFactory dnsQueryLifecycleObserverFactory = NoopDnsQueryLifecycleObserverFactory.INSTANCE; private String[] searchDomains; diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java index feac4b9145f..0bbe9136af0 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java @@ -46,19 +46,22 @@ public final class UnixResolverDnsServerAddressStreamProvider implements DnsServerAddressStreamProvider { private static final InternalLogger logger = InternalLoggerFactory.getInstance(UnixResolverDnsServerAddressStreamProvider.class); + + private static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+"); + private static final String RES_OPTIONS = System.getenv("RES_OPTIONS"); + private static final String ETC_RESOLV_CONF_FILE = "/etc/resolv.conf"; private static final String ETC_RESOLVER_DIR = "/etc/resolver"; private static final String NAMESERVER_ROW_LABEL = "nameserver"; private static final String SORTLIST_ROW_LABEL = "sortlist"; - private static final String OPTIONS_ROW_LABEL = "options"; + private static final String OPTIONS_ROW_LABEL = "options "; + private static final String OPTIONS_ROTATE_FLAG = "rotate"; private static final String DOMAIN_ROW_LABEL = "domain"; private static final String SEARCH_ROW_LABEL = "search"; private static final String PORT_ROW_LABEL = "port"; - private static final String NDOTS_LABEL = "ndots:"; - static final int DEFAULT_NDOTS = 1; + private final DnsServerAddresses defaultNameServerAddresses; private final Map<String, DnsServerAddresses> domainToNameServerStreamMap; - private static final Pattern SEARCH_DOMAIN_PATTERN = Pattern.compile("\\s+"); /** * Attempt to parse {@code /etc/resolv.conf} and files in the {@code /etc/resolver} directory by default. @@ -154,6 +157,7 @@ private boolean mayOverrideNameServers() { private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) throws IOException { Map<String, DnsServerAddresses> domainToNameServerStreamMap = new HashMap<String, DnsServerAddresses>(etcResolverFiles.length << 1); + boolean rotateGlobal = RES_OPTIONS != null && RES_OPTIONS.contains(OPTIONS_ROTATE_FLAG); for (File etcResolverFile : etcResolverFiles) { if (!etcResolverFile.isFile()) { continue; @@ -164,6 +168,7 @@ private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) t br = new BufferedReader(fr); List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>(2); String domainName = etcResolverFile.getName(); + boolean rotate = rotateGlobal; int port = DNS_PORT; String line; while ((line = br.readLine()) != null) { @@ -173,7 +178,9 @@ private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) t if (line.isEmpty() || (c = line.charAt(0)) == '#' || c == ';') { continue; } - if (line.startsWith(NAMESERVER_ROW_LABEL)) { + if (!rotate && line.startsWith(OPTIONS_ROW_LABEL)) { + rotate = line.contains(OPTIONS_ROTATE_FLAG); + } else if (line.startsWith(NAMESERVER_ROW_LABEL)) { int i = indexOfNonWhiteSpace(line, NAMESERVER_ROW_LABEL.length()); if (i < 0) { throw new IllegalArgumentException("error parsing label " + NAMESERVER_ROW_LABEL + @@ -212,7 +219,7 @@ private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) t } domainName = line.substring(i); if (!addresses.isEmpty()) { - putIfAbsent(domainToNameServerStreamMap, domainName, addresses); + putIfAbsent(domainToNameServerStreamMap, domainName, addresses, rotate); } addresses = new ArrayList<InetSocketAddress>(2); } else if (line.startsWith(PORT_ROW_LABEL)) { @@ -230,7 +237,7 @@ private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) t } } if (!addresses.isEmpty()) { - putIfAbsent(domainToNameServerStreamMap, domainName, addresses); + putIfAbsent(domainToNameServerStreamMap, domainName, addresses, rotate); } } finally { if (br == null) { @@ -245,9 +252,13 @@ private static Map<String, DnsServerAddresses> parse(File... etcResolverFiles) t private static void putIfAbsent(Map<String, DnsServerAddresses> domainToNameServerStreamMap, String domainName, - List<InetSocketAddress> addresses) { + List<InetSocketAddress> addresses, + boolean rotate) { // TODO(scott): sortlist is being ignored. - putIfAbsent(domainToNameServerStreamMap, domainName, DnsServerAddresses.sequential(addresses)); + DnsServerAddresses addrs = rotate + ? DnsServerAddresses.rotational(addresses) + : DnsServerAddresses.sequential(addresses); + putIfAbsent(domainToNameServerStreamMap, domainName, addrs); } private static void putIfAbsent(Map<String, DnsServerAddresses> domainToNameServerStreamMap, @@ -264,25 +275,25 @@ private static void putIfAbsent(Map<String, DnsServerAddresses> domainToNameServ } /** - * Parse a file of the format <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a> and return the - * value corresponding to the first ndots in an options configuration. - * @return the value corresponding to the first ndots in an options configuration, or {@link #DEFAULT_NDOTS} if not - * found. + * Parse <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a> and return options of interest, namely: + * timeout, attempts and ndots. + * @return The options values provided by /etc/resolve.conf. * @throws IOException If a failure occurs parsing the file. */ - static int parseEtcResolverFirstNdots() throws IOException { - return parseEtcResolverFirstNdots(new File(ETC_RESOLV_CONF_FILE)); + static UnixResolverOptions parseEtcResolverOptions() throws IOException { + return parseEtcResolverOptions(new File(ETC_RESOLV_CONF_FILE)); } /** - * Parse a file of the format <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a> and return the - * value corresponding to the first ndots in an options configuration. + * Parse a file of the format <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a> and return options + * of interest, namely: timeout, attempts and ndots. * @param etcResolvConf a file of the format <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a>. - * @return the value corresponding to the first ndots in an options configuration, or {@link #DEFAULT_NDOTS} if not - * found. + * @return The options values provided by /etc/resolve.conf. * @throws IOException If a failure occurs parsing the file. */ - static int parseEtcResolverFirstNdots(File etcResolvConf) throws IOException { + static UnixResolverOptions parseEtcResolverOptions(File etcResolvConf) throws IOException { + UnixResolverOptions.Builder optionsBuilder = UnixResolverOptions.newBuilder(); + FileReader fr = new FileReader(etcResolvConf); BufferedReader br = null; try { @@ -290,12 +301,7 @@ static int parseEtcResolverFirstNdots(File etcResolvConf) throws IOException { String line; while ((line = br.readLine()) != null) { if (line.startsWith(OPTIONS_ROW_LABEL)) { - int i = line.indexOf(NDOTS_LABEL); - if (i >= 0) { - i += NDOTS_LABEL.length(); - final int j = line.indexOf(' ', i); - return Integer.parseInt(line.substring(i, j < 0 ? line.length() : j)); - } + parseResOptions(line.substring(OPTIONS_ROW_LABEL.length()), optionsBuilder); break; } } @@ -306,7 +312,35 @@ static int parseEtcResolverFirstNdots(File etcResolvConf) throws IOException { br.close(); } } - return DEFAULT_NDOTS; + + // amend options + if (RES_OPTIONS != null) { + parseResOptions(RES_OPTIONS, optionsBuilder); + } + + return optionsBuilder.build(); + } + + private static void parseResOptions(String line, UnixResolverOptions.Builder builder) { + String[] opts = WHITESPACE_PATTERN.split(line); + for (String opt : opts) { + try { + if (opt.startsWith("ndots:")) { + builder.setNdots(parseResIntOption(opt, "ndots:")); + } else if (opt.startsWith("attempts:")) { + builder.setAttempts(parseResIntOption(opt, "attempts:")); + } else if (opt.startsWith("timeout:")) { + builder.setTimeout(parseResIntOption(opt, "timeout:")); + } + } catch (NumberFormatException ignore) { + // skip bad int values from resolv.conf to keep value already set in UnixResolverOptions + } + } + } + + private static int parseResIntOption(String opt, String fullLabel) { + String optValue = opt.substring(fullLabel.length()); + return Integer.parseInt(optValue); } /** @@ -346,7 +380,7 @@ static List<String> parseEtcResolverSearchDomains(File etcResolvConf) throws IOE if (i >= 0) { // May contain more then one entry, either seperated by whitespace or tab. // See https://linux.die.net/man/5/resolver - String[] domains = SEARCH_DOMAIN_PATTERN.split(line.substring(i)); + String[] domains = WHITESPACE_PATTERN.split(line.substring(i)); Collections.addAll(searchDomains, domains); } } @@ -364,4 +398,5 @@ static List<String> parseEtcResolverSearchDomains(File etcResolvConf) throws IOE ? Collections.singletonList(localDomain) : searchDomains; } + } diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverOptions.java b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverOptions.java new file mode 100644 index 00000000000..dcfedd8baa2 --- /dev/null +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverOptions.java @@ -0,0 +1,86 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.resolver.dns; + +/** + * Represents options defined in a file of the format <a href=https://linux.die.net/man/5/resolver>etc/resolv.conf</a>. + */ +final class UnixResolverOptions { + + private final int ndots; + private final int timeout; + private final int attempts; + + UnixResolverOptions(int ndots, int timeout, int attempts) { + this.ndots = ndots; + this.timeout = timeout; + this.attempts = attempts; + } + + static UnixResolverOptions.Builder newBuilder() { + return new UnixResolverOptions.Builder(); + } + + /** + * The number of dots which must appear in a name before an initial absolute query is made. + * The default value is {@code 1}. + */ + int ndots() { + return ndots; + } + + /** + * The timeout of each DNS query performed by this resolver (in seconds). + * The default value is {@code 5}. + */ + int timeout() { + return timeout; + } + + /** + * The maximum allowed number of DNS queries to send when resolving a host name. + * The default value is {@code 16}. + */ + int attempts() { + return attempts; + } + + static final class Builder { + + private int ndots = 1; + private int timeout = 5; + private int attempts = 16; + + private Builder() { + } + + void setNdots(int ndots) { + this.ndots = ndots; + } + + void setTimeout(int timeout) { + this.timeout = timeout; + } + + void setAttempts(int attempts) { + this.attempts = attempts; + } + + UnixResolverOptions build() { + return new UnixResolverOptions(ndots, timeout, attempts); + } + } +}
diff --git a/resolver-dns/src/test/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProviderTest.java b/resolver-dns/src/test/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProviderTest.java index 6bb133a11aa..97e3d8a75fb 100644 --- a/resolver-dns/src/test/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProviderTest.java +++ b/resolver-dns/src/test/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProviderTest.java @@ -29,8 +29,7 @@ import java.util.Collections; import java.util.List; -import static io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.DEFAULT_NDOTS; -import static io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.parseEtcResolverFirstNdots; +import static io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.parseEtcResolverOptions; import static org.junit.Assert.assertEquals; public class UnixResolverDnsServerAddressStreamProviderTest { @@ -50,6 +49,62 @@ public void defaultLookupShouldReturnResultsIfOnlySingleFileSpecified() throws E assertHostNameEquals("127.0.0.3", stream.next()); } + @Test + public void nameServerAddressStreamShouldBeRotationalWhenRotationOptionsIsPresent() throws Exception { + File f = buildFile("options rotate\n" + + "domain linecorp.local\n" + + "nameserver 127.0.0.2\n" + + "nameserver 127.0.0.3\n" + + "nameserver 127.0.0.4\n"); + UnixResolverDnsServerAddressStreamProvider p = + new UnixResolverDnsServerAddressStreamProvider(f, null); + + DnsServerAddressStream stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + + stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + assertHostNameEquals("127.0.0.2", stream.next()); + + stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.4", stream.next()); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + + stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + } + + @Test + public void nameServerAddressStreamShouldAlwaysStartFromTheTopWhenRotationOptionsIsAbsent() throws Exception { + File f = buildFile("domain linecorp.local\n" + + "nameserver 127.0.0.2\n" + + "nameserver 127.0.0.3\n" + + "nameserver 127.0.0.4\n"); + UnixResolverDnsServerAddressStreamProvider p = + new UnixResolverDnsServerAddressStreamProvider(f, null); + + DnsServerAddressStream stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + + stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + + stream = p.nameServerAddressStream(""); + assertHostNameEquals("127.0.0.2", stream.next()); + assertHostNameEquals("127.0.0.3", stream.next()); + assertHostNameEquals("127.0.0.4", stream.next()); + } + @Test public void defaultReturnedWhenNoBetterMatch() throws Exception { File f = buildFile("domain linecorp.local\n" + @@ -83,23 +138,63 @@ public void moreRefinedSelectionReturnedWhenMatch() throws Exception { } @Test - public void ndotsIsParsedIfPresent() throws IOException { + public void ndotsOptionIsParsedIfPresent() throws IOException { + File f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n" + + "options ndots:0\n"); + assertEquals(0, parseEtcResolverOptions(f).ndots()); + + f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n" + + "options ndots:123 foo:goo\n"); + assertEquals(123, parseEtcResolverOptions(f).ndots()); + } + + @Test + public void defaultValueReturnedIfNdotsOptionsNotPresent() throws IOException { + File f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n"); + assertEquals(1, parseEtcResolverOptions(f).ndots()); + } + + @Test + public void timeoutOptionIsParsedIfPresent() throws IOException { + File f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n" + + "options timeout:0\n"); + assertEquals(0, parseEtcResolverOptions(f).timeout()); + + f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n" + + "options foo:bar timeout:124\n"); + assertEquals(124, parseEtcResolverOptions(f).timeout()); + } + + @Test + public void defaultValueReturnedIfTimeoutOptionsIsNotPresent() throws IOException { + File f = buildFile("search localdomain\n" + + "nameserver 127.0.0.11\n"); + assertEquals(5, parseEtcResolverOptions(f).timeout()); + } + + @Test + public void attemptsOptionIsParsedIfPresent() throws IOException { File f = buildFile("search localdomain\n" + - "nameserver 127.0.0.11\n" + - "options ndots:0\n"); - assertEquals(0, parseEtcResolverFirstNdots(f)); + "nameserver 127.0.0.11\n" + + "options attempts:0\n"); + assertEquals(0, parseEtcResolverOptions(f).attempts()); f = buildFile("search localdomain\n" + - "nameserver 127.0.0.11\n" + - "options ndots:123 foo:goo\n"); - assertEquals(123, parseEtcResolverFirstNdots(f)); + "nameserver 127.0.0.11\n" + + "options foo:bar attempts:12\n"); + assertEquals(12, parseEtcResolverOptions(f).attempts()); } @Test - public void defaultValueReturnedIfNdotsNotPresent() throws IOException { + public void defaultValueReturnedIfAttemptsOptionsIsNotPresent() throws IOException { File f = buildFile("search localdomain\n" + - "nameserver 127.0.0.11\n"); - assertEquals(DEFAULT_NDOTS, parseEtcResolverFirstNdots(f)); + "nameserver 127.0.0.11\n"); + assertEquals(16, parseEtcResolverOptions(f).attempts()); } @Test
test
test
"2020-04-27T13:59:01"
"2020-04-22T19:46:13Z"
fabienrenaud
val
netty/netty/10200_10209
netty/netty
netty/netty/10200
netty/netty/10209
[ "keyword_pr_to_issue" ]
5fa5ce34e1580932c54cde56dd51c207288edde3
c354fa48e10de847cf17c10083a2cbc2c0a63a36
[]
[ "@fabienrenaud let's use `buf.retainedSlice()`", "right...\r\ndone!", "@fabienrenaud Can you add a comment that its only fine to not copy here as we will never call `undecodedChunk.writeBytes(...)` later on ?", "same comment as above ", "added", "added", "@fabienrenaud why this change ?", "the previous sequence was failing decoding immediately (on the first character). \r\nthe new sequence causes the decoder to create a few new/sliced ByteBufs before failing the decoding of the last entry `==`.\r\nin other words, the check now increases the chance of catching a leak caused by failing to decode the Nth element instead of just the first one.", "got it... thanks " ]
"2020-04-24T08:19:04Z"
[]
HttpPostRequestDecoder: copy-free implementation
`HttpPostRequestDecoder` decoder implementations (`HttpPostStandardRequestDecoder` and `HttpPostMultipartRequestDecoder`) both do a full copy of the `ByteBuf` before decoding. ``` public HttpPostStandardRequestDecoder offer(HttpContent content) { checkDestroyed(); // Maybe we should better not copy here for performance reasons but this will need // more care by the caller to release the content in a correct manner later // So maybe something to optimize on a later stage ByteBuf buf = content.content(); if (undecodedChunk == null) { undecodedChunk = buf.copy(); } else ... ``` https://github.com/netty/netty/blob/dcbfe17eeb588a8ba2acb95800776526d01cc5a2/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java#L283 https://github.com/netty/netty/blob/dcbfe17eeb588a8ba2acb95800776526d01cc5a2/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L324 They also uses unpooled buffers very broadly: ``` import static io.netty.buffer.Unpooled.*; ``` These classes should be re-implemented to not require copying the ByteBuf and use the default ByteBuf allocator instead of unpooled.
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index 4d59e4d47e0..95079f25fd6 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -321,18 +321,23 @@ public InterfaceHttpData getBodyHttpData(String name) { public HttpPostMultipartRequestDecoder offer(HttpContent content) { checkDestroyed(); - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + if (content instanceof LastHttpContent) { + isLastChunk = true; + } + ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = buf.copy(); + undecodedChunk = isLastChunk + // Take a slice instead of copying when the first chunk is also the last + // as undecodedChunk.writeBytes will never be called. + ? buf.retainedSlice() + // Maybe we should better not copy here for performance reasons but this will need + // more care by the caller to release the content in a correct manner later + // So maybe something to optimize on a later stage + : buf.copy(); } else { undecodedChunk.writeBytes(buf); } - if (content instanceof LastHttpContent) { - isLastChunk = true; - } parseBody(); if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { undecodedChunk.discardReadBytes(); @@ -1315,7 +1320,7 @@ private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String if (prevByte == HttpConstants.CR) { lastPosition--; } - ByteBuf content = undecodedChunk.copy(startReaderIndex, lastPosition - startReaderIndex); + ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); try { httpData.addContent(content, delimiterFound); } catch (IOException e) { @@ -1364,7 +1369,7 @@ private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimite lastRealPos--; } final int lastPosition = sao.getReadPosition(lastRealPos); - final ByteBuf content = undecodedChunk.copy(startReaderIndex, lastPosition - startReaderIndex); + final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); try { httpData.addContent(content, delimiterFound); } catch (IOException e) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index 7b94a7c8a32..deb35a66f39 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -26,7 +26,10 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException; import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus; import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException; +import io.netty.util.ByteProcessor; +import io.netty.util.CharsetUtil; import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.StringUtil; import java.io.IOException; import java.nio.charset.Charset; @@ -280,18 +283,23 @@ public InterfaceHttpData getBodyHttpData(String name) { public HttpPostStandardRequestDecoder offer(HttpContent content) { checkDestroyed(); - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + if (content instanceof LastHttpContent) { + isLastChunk = true; + } + ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = buf.copy(); + undecodedChunk = isLastChunk + // Take a slice instead of copying when the first chunk is also the last + // as undecodedChunk.writeBytes will never be called. + ? buf.retainedSlice() + // Maybe we should better not copy here for performance reasons but this will need + // more care by the caller to release the content in a correct manner later + // So maybe something to optimize on a later stage + : buf.copy(); } else { undecodedChunk.writeBytes(buf); } - if (content instanceof LastHttpContent) { - isLastChunk = true; - } parseBody(); if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { undecodedChunk.discardReadBytes(); @@ -429,7 +437,7 @@ private void parseBodyAttributesStandard() { if (read == '&') { currentStatus = MultiPartStatus.DISPOSITION; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = true; } else if (read == HttpConstants.CR) { @@ -439,7 +447,7 @@ private void parseBodyAttributesStandard() { if (read == HttpConstants.LF) { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 2; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; } else { @@ -452,7 +460,7 @@ private void parseBodyAttributesStandard() { } else if (read == HttpConstants.LF) { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; } @@ -466,7 +474,7 @@ private void parseBodyAttributesStandard() { // special case ampersandpos = currentpos; if (ampersandpos > firstpos) { - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); } else if (!currentAttribute.isCompleted()) { setFinalBuffer(EMPTY_BUFFER); } @@ -474,7 +482,7 @@ private void parseBodyAttributesStandard() { currentStatus = MultiPartStatus.EPILOGUE; } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) { // reset index except if to continue in case of FIELD getStatus - currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos), + currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos), false); firstpos = currentpos; } @@ -546,7 +554,7 @@ private void parseBodyAttributes() { if (read == '&') { currentStatus = MultiPartStatus.DISPOSITION; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = true; } else if (read == HttpConstants.CR) { @@ -557,7 +565,7 @@ private void parseBodyAttributes() { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 2; sao.setReadPosition(0); - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; break loop; @@ -575,7 +583,7 @@ private void parseBodyAttributes() { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 1; sao.setReadPosition(0); - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; break loop; @@ -592,7 +600,7 @@ private void parseBodyAttributes() { // special case ampersandpos = currentpos; if (ampersandpos > firstpos) { - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); } else if (!currentAttribute.isCompleted()) { setFinalBuffer(EMPTY_BUFFER); } @@ -600,7 +608,7 @@ private void parseBodyAttributes() { currentStatus = MultiPartStatus.EPILOGUE; } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) { // reset index except if to continue in case of FIELD getStatus - currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos), + currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos), false); firstpos = currentpos; } @@ -622,8 +630,10 @@ private void parseBodyAttributes() { private void setFinalBuffer(ByteBuf buffer) throws IOException { currentAttribute.addContent(buffer, true); - String value = decodeAttribute(currentAttribute.getByteBuf().toString(charset), charset); - currentAttribute.setValue(value); + ByteBuf decodedBuf = decodeAttribute(currentAttribute.getByteBuf(), charset); + if (decodedBuf != null) { // override content only when ByteBuf needed decoding + currentAttribute.setContent(decodedBuf); + } addHttpData(currentAttribute); currentAttribute = null; } @@ -641,6 +651,28 @@ private static String decodeAttribute(String s, Charset charset) { } } + private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) { + int firstEscaped = b.forEachByte(new ByteProcessor.IndexOfProcessor((byte) '%')); + if (firstEscaped == -1) { + return null; // nothing to decode + } + + ByteBuf buf = b.alloc().buffer(b.readableBytes()); + UrlDecoder urlDecode = new UrlDecoder(buf); + int idx = b.forEachByte(urlDecode); + if (urlDecode.nextEscapedIdx != 0) { // incomplete hex byte + if (idx == -1) { + idx = b.readableBytes() - 1; + } + idx -= urlDecode.nextEscapedIdx - 1; + buf.release(); + throw new ErrorDataDecoderException( + String.format("Invalid hex byte at index '%d' in string: '%s'", idx, b.toString(charset))); + } + + return buf; + } + /** * Destroy the {@link HttpPostStandardRequestDecoder} and release all it resources. After this method * was called it is not possible to operate on it anymore. @@ -677,4 +709,39 @@ public void removeHttpDataFromClean(InterfaceHttpData data) { factory.removeHttpDataFromClean(request, data); } + + private static final class UrlDecoder implements ByteProcessor { + + private final ByteBuf output; + private int nextEscapedIdx; + private byte hiByte; + + UrlDecoder(ByteBuf output) { + this.output = output; + } + + @Override + public boolean process(byte value) { + if (nextEscapedIdx != 0) { + if (nextEscapedIdx == 1) { + hiByte = value; + ++nextEscapedIdx; + } else { + int hi = StringUtil.decodeHexNibble((char) hiByte); + int lo = StringUtil.decodeHexNibble((char) value); + if (hi == -1 || lo == -1) { + ++nextEscapedIdx; + return false; + } + output.writeByte((hi << 4) + lo); + nextEscapedIdx = 0; + } + } else if (value == '%') { + nextEscapedIdx = 1; + } else { + output.writeByte(value); + } + return true; + } + } }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 40771e017ad..7efdc55849e 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -25,7 +25,6 @@ import io.netty.handler.codec.http.DefaultHttpRequest; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; @@ -39,13 +38,11 @@ import java.nio.charset.UnsupportedCharsetException; import java.util.Arrays; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; -/** {@link HttpPostRequestDecoder} test case. */ +/** + * {@link HttpPostRequestDecoder} test case. + */ public class HttpPostRequestDecoderTest { @Test @@ -80,11 +77,11 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { for (String data : Arrays.asList("", "\r", "\r\r", "\r\r\r")) { final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"tmp-0.txt\"\r\n" + - "Content-Type: image/gif\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"tmp-0.txt\"\r\n" + + "Content-Type: image/gif\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); @@ -100,7 +97,7 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { // Validate data has been parsed correctly as it was passed into request. assertEquals("Invalid decoded data [data=" + data.replaceAll("\r", "\\\\r") + ", upload=" + upload + ']', - data, upload.getString(CharsetUtil.UTF_8)); + data, upload.getString(CharsetUtil.UTF_8)); upload.release(); decoder.destroy(); } @@ -225,28 +222,28 @@ public void testNoZeroOut() throws Exception { final DefaultHttpDataFactory aMemFactory = new DefaultHttpDataFactory(false); DefaultHttpRequest aRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost"); + HttpMethod.POST, + "http://localhost"); aRequest.headers().set(HttpHeaderNames.CONTENT_TYPE, - "multipart/form-data; boundary=" + boundary); + "multipart/form-data; boundary=" + boundary); aRequest.headers().set(HttpHeaderNames.TRANSFER_ENCODING, - HttpHeaderValues.CHUNKED); + HttpHeaderValues.CHUNKED); HttpPostRequestDecoder aDecoder = new HttpPostRequestDecoder(aMemFactory, aRequest); final String aData = "some data would be here. the data should be long enough that it " + - "will be longer than the original buffer length of 256 bytes in " + - "the HttpPostRequestDecoder in order to trigger the issue. Some more " + - "data just to be on the safe side."; + "will be longer than the original buffer length of 256 bytes in " + + "the HttpPostRequestDecoder in order to trigger the issue. Some more " + + "data just to be on the safe side."; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"root\"\r\n" + - "Content-Type: text/plain\r\n" + - "\r\n" + - aData + - "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"root\"\r\n" + + "Content-Type: text/plain\r\n" + + "\r\n" + + aData + + "\r\n" + + "--" + boundary + "--\r\n"; byte[] aBytes = body.getBytes(); @@ -279,7 +276,7 @@ public void testNoZeroOut() throws Exception { // See https://github.com/netty/netty/issues/2305 @Test public void testChunkCorrect() throws Exception { - String payload = "town=794649819&town=784444184&town=794649672&town=794657800&town=" + + String payload = "town=794649819&town=784444184&town=794649672&town=794657800&town=" + "794655734&town=794649377&town=794652136&town=789936338&town=789948986&town=" + "789949643&town=786358677&town=794655880&town=786398977&town=789901165&town=" + "789913325&town=789903418&town=789903579&town=794645251&town=794694126&town=" + @@ -308,26 +305,44 @@ public void testChunkCorrect() throws Exception { "789958999&town=789961555&town=794694050&town=794650241&town=794656286&town=" + "794692081&town=794660090&town=794665227&town=794665136&town=794669931"; DefaultHttpRequest defaultHttpRequest = - new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(defaultHttpRequest); int firstChunk = 10; int middleChunk = 1024; - HttpContent part1 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(0, firstChunk).getBytes())); - HttpContent part2 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk, firstChunk + middleChunk).getBytes())); - HttpContent part3 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk + middleChunk, firstChunk + middleChunk * 2).getBytes())); - HttpContent part4 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk + middleChunk * 2).getBytes())); + byte[] payload1 = payload.substring(0, firstChunk).getBytes(); + byte[] payload2 = payload.substring(firstChunk, firstChunk + middleChunk).getBytes(); + byte[] payload3 = payload.substring(firstChunk + middleChunk, firstChunk + middleChunk * 2).getBytes(); + byte[] payload4 = payload.substring(firstChunk + middleChunk * 2).getBytes(); + + ByteBuf buf1 = Unpooled.directBuffer(payload1.length); + ByteBuf buf2 = Unpooled.directBuffer(payload2.length); + ByteBuf buf3 = Unpooled.directBuffer(payload3.length); + ByteBuf buf4 = Unpooled.directBuffer(payload4.length); + + buf1.writeBytes(payload1); + buf2.writeBytes(payload2); + buf3.writeBytes(payload3); + buf4.writeBytes(payload4); + + decoder.offer(new DefaultHttpContent(buf1)); + decoder.offer(new DefaultHttpContent(buf2)); + decoder.offer(new DefaultHttpContent(buf3)); + decoder.offer(new DefaultLastHttpContent(buf4)); + + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + assertEquals(139, decoder.getBodyHttpDatas().size()); + + Attribute attr = (Attribute) decoder.getBodyHttpData("town"); + assertEquals("794649819", attr.getValue()); - decoder.offer(part1); - decoder.offer(part2); - decoder.offer(part3); - decoder.offer(part4); + decoder.destroy(); + buf1.release(); + buf2.release(); + buf3.release(); + buf4.release(); } // See https://github.com/netty/netty/issues/3326 @@ -360,7 +375,7 @@ public void testFilenameContainingSemicolon() throws Exception { public void testFilenameContainingSemicolon2() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -368,11 +383,11 @@ public void testFilenameContainingSemicolon2() throws Exception { final String filename = "tmp;0.txt"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: image/gif\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: image/gif\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8.name())); // Create decoder instance to test. @@ -421,19 +436,19 @@ public void testDecodeOtherMimeHeaderFields() throws Exception { String filecontent = "123456"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=" + "\"" + "attached.txt" + "\"" + - "\r\n" + - "Content-Type: application/octet-stream" + "\r\n" + - "Content-Encoding: gzip" + "\r\n" + - "\r\n" + - filecontent + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename=" + "\"" + "attached.txt" + "\"" + + "\r\n" + + "Content-Type: application/octet-stream" + "\r\n" + + "Content-Encoding: gzip" + "\r\n" + + "\r\n" + + filecontent + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); req.headers().add(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -452,19 +467,19 @@ public void testDecodeOtherMimeHeaderFields() throws Exception { public void testMultipartRequestWithFileInvalidCharset() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final String data = "asdf"; final String filename = "tmp;0.txt"; final String body = - "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: image/gif; charset=ABCD\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: image/gif; charset=ABCD\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8)); // Create decoder instance to test. @@ -482,22 +497,22 @@ public void testMultipartRequestWithFileInvalidCharset() throws Exception { public void testMultipartRequestWithFieldInvalidCharset() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final String aData = "some data would be here. the data should be long enough that it " + - "will be longer than the original buffer length of 256 bytes in " + - "the HttpPostRequestDecoder in order to trigger the issue. Some more " + - "data just to be on the safe side."; + "will be longer than the original buffer length of 256 bytes in " + + "the HttpPostRequestDecoder in order to trigger the issue. Some more " + + "data just to be on the safe side."; final String body = - "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"root\"\r\n" + - "Content-Type: text/plain; charset=ABCD\r\n" + - "\r\n" + - aData + - "\r\n" + - "--" + boundary + "--\r\n"; + "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"root\"\r\n" + + "Content-Type: text/plain; charset=ABCD\r\n" + + "\r\n" + + aData + + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8)); // Create decoder instance to test. @@ -539,16 +554,16 @@ public void testDecodeContentDispositionFieldParameters() throws Exception { String filenameEncoded = URLEncoder.encode(filename, encoding); final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=" + encoding + "''" + filenameEncoded + "\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=" + encoding + "''" + filenameEncoded + + "\r\n\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -574,17 +589,17 @@ public void testDecodeWithLanguageContentDispositionFieldParameters() throws Exc String filenameEncoded = URLEncoder.encode(filename, encoding); final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=" + - encoding + "'" + language + "'" + filenameEncoded + "\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=" + + encoding + "'" + language + "'" + filenameEncoded + "\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -605,16 +620,16 @@ public void testDecodeMalformedNotEncodedContentDispositionFieldParameters() thr final String boundary = "74e78d11b0214bdcbc2f86491eeb4902"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=not-encoded\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=not-encoded\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); @@ -637,16 +652,16 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr final String boundary = "74e78d11b0214bdcbc2f86491eeb4902"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=not-a-charset''filename\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=not-a-charset''filename\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); @@ -667,7 +682,7 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -675,11 +690,11 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio final String filename = "tmp-0.txt"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: \r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: \r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8.name())); // Create decoder instance to test. @@ -697,15 +712,17 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio public void testMultipartRequest() throws Exception { String BOUNDARY = "01f136d9282f"; - ByteBuf byteBuf = Unpooled.wrappedBuffer(("--" + BOUNDARY + "\n" + - "Content-Disposition: form-data; name=\"msg_id\"\n" + - "\n" + - "15200\n" + - "--" + BOUNDARY + "\n" + - "Content-Disposition: form-data; name=\"msg\"\n" + - "\n" + - "test message\n" + - "--" + BOUNDARY + "--").getBytes()); + byte[] bodyBytes = ("--" + BOUNDARY + "\n" + + "Content-Disposition: form-data; name=\"msg_id\"\n" + + "\n" + + "15200\n" + + "--" + BOUNDARY + "\n" + + "Content-Disposition: form-data; name=\"msg\"\n" + + "\n" + + "test message\n" + + "--" + BOUNDARY + "--").getBytes(); + ByteBuf byteBuf = Unpooled.directBuffer(bodyBytes.length); + byteBuf.writeBytes(bodyBytes); FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.POST, "/up", byteBuf); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + BOUNDARY); @@ -718,11 +735,16 @@ public void testMultipartRequest() throws Exception { assertTrue(decoder.isMultipart()); assertFalse(decoder.getBodyHttpDatas().isEmpty()); assertEquals(2, decoder.getBodyHttpDatas().size()); - assertEquals("test message", ((Attribute) decoder.getBodyHttpData("msg")).getValue()); - assertEquals("15200", ((Attribute) decoder.getBodyHttpData("msg_id")).getValue()); + + Attribute attrMsg = (Attribute) decoder.getBodyHttpData("msg"); + assertTrue(attrMsg.getByteBuf().isDirect()); + assertEquals("test message", attrMsg.getValue()); + Attribute attrMsgId = (Attribute) decoder.getBodyHttpData("msg_id"); + assertTrue(attrMsgId.getByteBuf().isDirect()); + assertEquals("15200", attrMsgId.getValue()); decoder.destroy(); - assertEquals(1, req.refCnt()); + assertTrue(req.release()); } @Test(expected = HttpPostRequestDecoder.ErrorDataDecoderException.class) @@ -747,7 +769,7 @@ public void testNotLeakHeapBufferWhenWrapIllegalArgumentException() { } private static void testNotLeakWhenWrapIllegalArgumentException(ByteBuf buf) { - buf.writeCharSequence("==", CharsetUtil.US_ASCII); + buf.writeCharSequence("a=b&foo=%22bar%22&==", CharsetUtil.US_ASCII); FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", buf); try { new HttpPostStandardRequestDecoder(request); @@ -800,7 +822,109 @@ public void testDecodeWithLanguageContentDispositionFieldParametersForFix() thro assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); + decoder.destroy(); req.release(); } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { + byte[] bodyBytes = "foo=bar&a=b&empty=&city=%3c%22new%22%20york%20city%3e".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req); + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + assertEquals(4, decoder.getBodyHttpDatas().size()); + + Attribute attr = (Attribute) decoder.getBodyHttpData("foo"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("bar", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("a"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("b", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("empty"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("city"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("<\"new\" york city>", attr.getValue()); + + decoder.destroy(); + req.release(); + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte0() { + byte[] bodyBytes = "foo=bar&a=b&empty=%&city=paris".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%'", e.getMessage()); + } finally { + req.release(); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte1() { + byte[] bodyBytes = "foo=bar&a=b&empty=%2&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%2'", e.getMessage()); + } finally { + req.release(); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleHi() { + byte[] bodyBytes = "foo=bar&a=b&empty=%Zc&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%Zc'", e.getMessage()); + } finally { + req.release(); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo() { + byte[] bodyBytes = "foo=bar&a=b&empty=%2g&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%2g'", e.getMessage()); + } finally { + req.release(); + } + } }
train
test
"2020-04-27T13:59:01"
"2020-04-21T16:48:21Z"
fabienrenaud
val
netty/netty/10245_10247
netty/netty
netty/netty/10245
netty/netty/10247
[ "keyword_pr_to_issue" ]
cfcd7a4fdecde6173a1cddea9c1df703e9910895
c3db0391affb7eca0cd95a49d0b1063e3a2c3599
[ "Coworker reports that this NPE does not occur anymore after downgrading Netty to 4.1.34.Final, which is the last version before #8939.", "High throughput in sense of Bandwidth or Connections?", "Bandwidth. AIUI he had a simulator running that only created one connection but was sending messages constantly.", "@rdicroce sorry about this. Your analysis looks correct to me and this appears to be an oversight. WCBB instances need to be unwrapped here. I will provide a fix asap.\r\n\r\nOne way to work-around in the meantime would be to run with `-Dio.netty.leakDetection.level=disabled`", "@rdicroce @normanmaurer please see #10247" ]
[]
"2020-05-05T03:02:37Z"
[ "defect" ]
NPE in CompositeByteBuf when reading from channel
### Expected behavior There should not be a NullPointerException. ### Actual behavior A NullPointerException occurs: ``` io.netty.handler.codec.DecoderException: java.lang.NullPointerException at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:280) ~[netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:377) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:363) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:355) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:377) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:363) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:650) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:576) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-all-4.1.45.Final.jar:4.1.45.Final] at java.lang.Thread.run(Thread.java:834) [?:?] Caused by: java.lang.NullPointerException at io.netty.buffer.CompositeByteBuf.toComponentIndex0(CompositeByteBuf.java:893) ~[netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.buffer.CompositeByteBuf.addFlattenedComponents(CompositeByteBuf.java:464) ~[netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.handler.codec.ByteToMessageDecoder$2.cumulate(ByteToMessageDecoder.java:132) ~[netty-all-4.1.45.Final.jar:4.1.45.Final] at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:274) ~[netty-all-4.1.45.Final.jar:4.1.45.Final] ... 15 more ``` ### Steps to reproduce Not entirely clear. A coworker discovered that this happens sometimes with very high throughput. After scrutinizing the code a bit, I suspect this is a regression introduced by #8939. The only way a NPE could occur at line 893 is if the components array is null. It looks like that can only happen if the buffer passed to addFlattenedComponents() is a WrappedCompositeByteBuf. WCBB uses the special constructor in CBB that sets components to null, and does not override toComponentIndex0(). ### Netty version 4.1.45.Final ### JVM version (e.g. `java -version`) 11 (not sure exactly which update) ### OS version (e.g. `uname -a`) Windows 10
[ "buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java" ]
[ "buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java", "buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java index 48a2006a9f5..f949a2e78c0 100644 --- a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java @@ -473,7 +473,12 @@ public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, Byte consolidateIfNeeded(); return this; } - final CompositeByteBuf from = (CompositeByteBuf) buffer; + final CompositeByteBuf from; + if (buffer instanceof WrappedCompositeByteBuf) { + from = (CompositeByteBuf) buffer.unwrap(); + } else { + from = (CompositeByteBuf) buffer; + } from.checkIndex(ridx, widx - ridx); final Component[] fromComponents = from.components; final int compCountBefore = componentCount;
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java index d48b266a0ed..1c36be3b4ad 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java @@ -24,7 +24,6 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.ConcurrentModificationException; import java.util.Iterator; @@ -89,7 +88,20 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { buffers.add(EMPTY_BUFFER); } - ByteBuf buffer = wrappedBuffer(Integer.MAX_VALUE, buffers.toArray(new ByteBuf[0])).order(order); + ByteBuf buffer; + // Ensure that we are really testing a CompositeByteBuf + switch (buffers.size()) { + case 0: + buffer = compositeBuffer(Integer.MAX_VALUE); + break; + case 1: + buffer = compositeBuffer(Integer.MAX_VALUE).addComponent(buffers.get(0)); + break; + default: + buffer = wrappedBuffer(Integer.MAX_VALUE, buffers.toArray(new ByteBuf[0])); + break; + } + buffer = buffer.order(order); // Truncate to the requested capacity. buffer.capacity(length); @@ -101,6 +113,10 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { return buffer; } + protected CompositeByteBuf newCompositeBuffer() { + return compositeBuffer(); + } + // Composite buffer does not waste bandwidth on discardReadBytes, but // the test will fail in strict mode. @Override @@ -1124,8 +1140,17 @@ public void testInsertEmptyBufferInMiddle() { @Test public void testAddFlattenedComponents() { + testAddFlattenedComponents(false); + } + + @Test + public void testAddFlattenedComponentsWithWrappedComposite() { + testAddFlattenedComponents(true); + } + + private void testAddFlattenedComponents(boolean addWrapped) { ByteBuf b1 = Unpooled.wrappedBuffer(new byte[] { 1, 2, 3 }); - CompositeByteBuf newComposite = Unpooled.compositeBuffer() + CompositeByteBuf newComposite = newCompositeBuffer() .addComponent(true, b1) .addFlattenedComponents(true, b1.retain()) .addFlattenedComponents(true, Unpooled.EMPTY_BUFFER); @@ -1148,7 +1173,7 @@ public void testAddFlattenedComponents() { ByteBuf s4 = s2.retainedSlice(0, 2); buffer.release(); - ByteBuf compositeToAdd = Unpooled.compositeBuffer() + CompositeByteBuf compositeToAdd = compositeBuffer() .addComponent(s1) .addComponent(Unpooled.EMPTY_BUFFER) .addComponents(s2, s3, s4); @@ -1161,6 +1186,9 @@ public void testAddFlattenedComponents() { ByteBuf compositeCopy = compositeToAdd.copy(); + if (addWrapped) { + compositeToAdd = new WrappedCompositeByteBuf(compositeToAdd); + } newComposite.addFlattenedComponents(true, compositeToAdd); // verify that added range matches @@ -1189,7 +1217,7 @@ public void testAddFlattenedComponents() { @Test public void testIterator() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); cbuf.addComponent(EMPTY_BUFFER); @@ -1211,7 +1239,7 @@ public void testIterator() { @Test public void testEmptyIterator() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); Iterator<ByteBuf> it = cbuf.iterator(); assertFalse(it.hasNext()); @@ -1227,7 +1255,7 @@ public void testEmptyIterator() { @Test(expected = ConcurrentModificationException.class) public void testIteratorConcurrentModificationAdd() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); Iterator<ByteBuf> it = cbuf.iterator(); @@ -1243,7 +1271,7 @@ public void testIteratorConcurrentModificationAdd() { @Test(expected = ConcurrentModificationException.class) public void testIteratorConcurrentModificationRemove() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); Iterator<ByteBuf> it = cbuf.iterator(); @@ -1302,7 +1330,7 @@ public void testReleasesItsComponents2() { ByteBuf s3 = s2.readRetainedSlice(2); // 4 ByteBuf s4 = s3.readRetainedSlice(2); // 5 - ByteBuf composite = Unpooled.compositeBuffer() + ByteBuf composite = newCompositeBuffer() .addComponent(s1) .addComponents(s2, s3, s4) .order(ByteOrder.LITTLE_ENDIAN); @@ -1327,7 +1355,7 @@ public void testReleasesOnShrink() { ByteBuf b2 = Unpooled.buffer(2).writeShort(2); // composite takes ownership of s1 and s2 - ByteBuf composite = Unpooled.compositeBuffer() + ByteBuf composite = newCompositeBuffer() .addComponents(b1, b2); assertEquals(4, composite.capacity()); @@ -1357,7 +1385,7 @@ public void testReleasesOnShrink2() { ByteBuf b2 = b1.retainedSlice(b1.readerIndex(), 2); // composite takes ownership of b1 and b2 - ByteBuf composite = Unpooled.compositeBuffer() + ByteBuf composite = newCompositeBuffer() .addComponents(b1, b2); assertEquals(4, composite.capacity()); @@ -1413,12 +1441,12 @@ public void testDecomposeNone() { testDecompose(310, 0, 0); } - private static void testDecompose(int offset, int length, int expectedListSize) { + private void testDecompose(int offset, int length, int expectedListSize) { byte[] bytes = new byte[1024]; PlatformDependent.threadLocalRandom().nextBytes(bytes); ByteBuf buf = wrappedBuffer(bytes); - CompositeByteBuf composite = compositeBuffer(); + CompositeByteBuf composite = newCompositeBuffer(); composite.addComponents(true, buf.retainedSlice(100, 200), buf.retainedSlice(300, 400), @@ -1479,8 +1507,8 @@ public void testDiscardReadBytesCorrectlyUpdatesLastAccessed() { testDiscardCorrectlyUpdatesLastAccessed(false); } - private static void testDiscardCorrectlyUpdatesLastAccessed(boolean discardSome) { - CompositeByteBuf cbuf = compositeBuffer(); + private void testDiscardCorrectlyUpdatesLastAccessed(boolean discardSome) { + CompositeByteBuf cbuf = newCompositeBuffer(); List<ByteBuf> buffers = new ArrayList<ByteBuf>(4); for (int i = 0; i < 4; i++) { ByteBuf buf = buffer().writeInt(i); diff --git a/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java index 7b37177b435..20d82506139 100644 --- a/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java @@ -25,4 +25,9 @@ protected final ByteBuf newBuffer(int length, int maxCapacity) { protected WrappedCompositeByteBuf wrap(CompositeByteBuf buffer) { return new WrappedCompositeByteBuf(buffer); } + + @Override + protected CompositeByteBuf newCompositeBuffer() { + return wrap(super.newCompositeBuffer()); + } }
val
test
"2020-04-29T17:52:43"
"2020-05-04T18:39:13Z"
rdicroce
val
netty/netty/10261_10274
netty/netty
netty/netty/10261
netty/netty/10274
[ "keyword_pr_to_issue" ]
1c21733fb902ae40be29c11184cc712ddcc0a259
714dd00aabb414e6cfa931888328ad41d8f323e9
[ "Hi, I take a look " ]
[ "@amizurov unfortunately this is a a breaking change as it changes the method signature of some methods ", "Yes but this is only example, maybe you can advice another way ?", "ah sorry missed that... Changing an example is of course fine.", "hmm... is this really correct for the case where `msg` isn't an instance of `StompFrame ` ? Seems like you may loose items here then.", "We use this encoder inside `StompWebSocketProtocolCodec` so we accept only `StompSubframe` and don't put it to pipeline, just delegate.", "still this seems very error-prone. At least we should add an assert imho", "Sorry could you please explain what kind of assertion you mean ? ", "never mind " ]
"2020-05-12T09:52:29Z"
[]
why stomp.js can not get the message body
### Expected behavior Expect the stomp.js can use netty stomp send a messge to a topic and other stomp.js can receive the message. ### Actual behavior when Iuse netty stomp send a messge to a topic and other stomp.js receive the message,I found the stomp.js client can not recevice the message the message body is empty. In the web brower network console chosse ws you will find the message is split to two message . ### Steps to reproduce use the code in package io.netty.example.stomp and start the server, use stomp.js send a messge to a topic and other stomp.js receive the message. ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.49.Final ### JVM version (e.g. `java -version`) java version "1.8.0_231" ### OS version (e.g. `uname -a`) windows 10
[ "example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java" ]
[ "example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketFrameEncoder.java", "example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java" ]
[]
diff --git a/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketFrameEncoder.java b/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketFrameEncoder.java new file mode 100644 index 00000000000..68b92e6992f --- /dev/null +++ b/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketFrameEncoder.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.example.stomp.websocket; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.websocketx.ContinuationWebSocketFrame; +import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; +import io.netty.handler.codec.http.websocketx.WebSocketFrame; +import io.netty.handler.codec.stomp.LastStompContentSubframe; +import io.netty.handler.codec.stomp.StompFrame; +import io.netty.handler.codec.stomp.StompHeadersSubframe; +import io.netty.handler.codec.stomp.StompSubframe; +import io.netty.handler.codec.stomp.StompSubframeEncoder; + +import java.util.List; + +public class StompWebSocketFrameEncoder extends StompSubframeEncoder { + + @Override + public void encode(ChannelHandlerContext ctx, StompSubframe msg, List<Object> out) throws Exception { + super.encode(ctx, msg, out); + + if (out.isEmpty()) { + return; + } + + final WebSocketFrame webSocketFrame; + if (msg instanceof StompFrame) { + if (out.size() == 1) { + webSocketFrame = new TextWebSocketFrame(getFirst(out)); + } else { + CompositeByteBuf content = ctx.alloc().compositeBuffer(out.size()); + for (Object byteBuf : out) { + content.addComponent(true, (ByteBuf) byteBuf); + } + webSocketFrame = new TextWebSocketFrame(content); + } + } else if (msg instanceof StompHeadersSubframe) { + webSocketFrame = new TextWebSocketFrame(false, 0, getFirst(out)); + } else if (msg instanceof LastStompContentSubframe) { + webSocketFrame = new ContinuationWebSocketFrame(true, 0, getFirst(out)); + } else { + webSocketFrame = new ContinuationWebSocketFrame(false, 0, getFirst(out)); + } + + out.clear(); + out.add(webSocketFrame); + } + + private static ByteBuf getFirst(List<Object> container) { + return (ByteBuf) container.get(0); + } +} diff --git a/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java b/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java index 97870787748..8d8ed4b7f98 100644 --- a/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java +++ b/example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketProtocolCodec.java @@ -15,24 +15,25 @@ */ package io.netty.example.stomp.websocket; -import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler.Sharable; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToMessageCodec; import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; +import io.netty.handler.codec.http.websocketx.WebSocketFrameAggregator; import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler; import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.HandshakeComplete; +import io.netty.handler.codec.stomp.StompSubframe; import io.netty.handler.codec.stomp.StompSubframeAggregator; import io.netty.handler.codec.stomp.StompSubframeDecoder; -import io.netty.handler.codec.stomp.StompSubframeEncoder; import java.util.List; @Sharable -public class StompWebSocketProtocolCodec extends MessageToMessageCodec<WebSocketFrame, ByteBuf> { +public class StompWebSocketProtocolCodec extends MessageToMessageCodec<WebSocketFrame, StompSubframe> { private final StompChatHandler stompChatHandler = new StompChatHandler(); + private final StompWebSocketFrameEncoder stompWebSocketFrameEncoder = new StompWebSocketFrameEncoder(); @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { @@ -40,8 +41,8 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc StompVersion stompVersion = StompVersion.findBySubProtocol(((HandshakeComplete) evt).selectedSubprotocol()); ctx.channel().attr(StompVersion.CHANNEL_ATTRIBUTE_KEY).set(stompVersion); ctx.pipeline() + .addLast(new WebSocketFrameAggregator(65536)) .addLast(new StompSubframeDecoder()) - .addLast(new StompSubframeEncoder()) .addLast(new StompSubframeAggregator(65536)) .addLast(stompChatHandler) .remove(StompWebSocketClientPageHandler.INSTANCE); @@ -51,8 +52,8 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc } @Override - protected void encode(ChannelHandlerContext ctx, ByteBuf stompFrame, List<Object> out) { - out.add(new TextWebSocketFrame(stompFrame.retain())); + protected void encode(ChannelHandlerContext ctx, StompSubframe stompFrame, List<Object> out) throws Exception { + stompWebSocketFrameEncoder.encode(ctx, stompFrame, out); } @Override
null
train
test
"2020-05-12T08:48:54"
"2020-05-08T08:06:48Z"
xiaozaiyuji
val
netty/netty/10284_10285
netty/netty
netty/netty/10284
netty/netty/10285
[ "keyword_pr_to_issue" ]
caf51b72840e12142b3a87355da63e33230b24b4
d5087deec63e0152b76d1564d3fa8ae31d84c6af
[]
[]
"2020-05-13T16:42:26Z"
[]
HttpPostStandardRequestDecoder not decoding `+` to whitespaces
The new `UrlDecoder` in `HttpPostStandardRequestDecoder` does not convert `+` to whitespaces, which `QueryStringDecoder.decodeComponent` used to do. https://github.com/netty/netty/pull/10209/files#diff-99f86aa293d9977ca3a310e1488d31c1R713
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index 4cb82e678da..a71e2a14a2e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -656,7 +656,7 @@ private static String decodeAttribute(String s, Charset charset) { } private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) { - int firstEscaped = b.forEachByte(new ByteProcessor.IndexOfProcessor((byte) '%')); + int firstEscaped = b.forEachByte(new UrlEncodedDetector()); if (firstEscaped == -1) { return null; // nothing to decode } @@ -714,6 +714,13 @@ public void removeHttpDataFromClean(InterfaceHttpData data) { factory.removeHttpDataFromClean(request, data); } + private static final class UrlEncodedDetector implements ByteProcessor { + @Override + public boolean process(byte value) throws Exception { + return value != '%' && value != '+'; + } + } + private static final class UrlDecoder implements ByteProcessor { private final ByteBuf output; @@ -742,6 +749,8 @@ public boolean process(byte value) { } } else if (value == '%') { nextEscapedIdx = 1; + } else if (value == '+') { + output.writeByte(' '); } else { output.writeByte(value); }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 7efdc55849e..02958d46aa1 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -829,7 +829,7 @@ public void testDecodeWithLanguageContentDispositionFieldParametersForFix() thro @Test public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { - byte[] bodyBytes = "foo=bar&a=b&empty=&city=%3c%22new%22%20york%20city%3e".getBytes(); + byte[] bodyBytes = "foo=bar&a=b&empty=&city=%3c%22new%22%20york%20city%3e&other_city=los+angeles".getBytes(); ByteBuf content = Unpooled.directBuffer(bodyBytes.length); content.writeBytes(bodyBytes); @@ -838,7 +838,7 @@ public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { assertFalse(decoder.getBodyHttpDatas().isEmpty()); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - assertEquals(4, decoder.getBodyHttpDatas().size()); + assertEquals(5, decoder.getBodyHttpDatas().size()); Attribute attr = (Attribute) decoder.getBodyHttpData("foo"); assertTrue(attr.getByteBuf().isDirect()); @@ -856,6 +856,10 @@ public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { assertTrue(attr.getByteBuf().isDirect()); assertEquals("<\"new\" york city>", attr.getValue()); + attr = (Attribute) decoder.getBodyHttpData("other_city"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("los angeles", attr.getValue()); + decoder.destroy(); req.release(); }
test
test
"2020-05-13T08:00:23"
"2020-05-13T16:35:52Z"
fabienrenaud
val
netty/netty/10288_10289
netty/netty
netty/netty/10288
netty/netty/10289
[ "keyword_pr_to_issue" ]
caf51b72840e12142b3a87355da63e33230b24b4
20b8685c8b9a35b33f27c8b26900cf65e1575417
[ "cc @ejona86 \r\n\r\nI see 2 options:\r\n1) provide an explicit setter in `SslContextBuilder` and the value is then plumbed down to where it is used in `ReferenceCountedOpenSslContext`. \r\n2) `SslContextBuilder` will have some logic to automatically set this value (say if a custom `TrustManagerFactory` was used) and it will be plumbed down similar to (1) above.\r\n\r\nFor backward compatibility the system property `\"io.netty.handler.ssl.openssl.useTasks\"` could still be honored.", "For some gRPC work, we need to implement a TrustManager that will do blocking RPCs as part of the verification. We believe that means we need `useTasks=true`. But as a library, we aren't in a position to change a global system property. We'd like a way to enable useTasks=true for us, without changing the behavior of other Netty users in the same process.", "@ejona86 please note that this will only work for netty-tcnative-boringssl-static for this use-case as only boringssl provides the needed hooks. \r\n\r\nWould it be enough to expose a setter in `ReferenceCountedOpenSslContext` ?", "> @ejona86 please note that this will only work for netty-tcnative-boringssl-static for this use-case as only boringssl provides the needed hooks.\r\n> \r\n> Would it be enough to expose a setter in `ReferenceCountedOpenSslContext` ?\r\n\r\nit works at least for me: if the returned value from `SslContextBuilder.build` is a `ReferenceCountedOpenSslContext` then we call this setter for our case.\r\n", "`ReferenceCountedOpenSslContext` looks like it is returned by the `SslContextBuilder`. Yeah, that would work. A bit strange to mutate the SslContext, but I can see how that would be easy and work. That sounds quite fair. To configure would be an instanceof+cast+set.\r\n\r\nSince gRPC allows passing an SslContext during configuration, I think that would work for our internal-to-gRPC use case (which @sanjaypujare is interested in) and another \"our users may synchronously reload certificates in the TrustManager implementation\" use case.\r\n\r\nThe boringssl restriction is good to know. What behavior will we see with openssl-backed tcnative? Will it just \"not call our TrustManager\", or will it be called on an event loop?\r\n\r\nAlso, @sanjaypujare is willing to send a PR for the change. But I suggested it would be best to discuss the approach first.", "> `ReferenceCountedOpenSslContext` looks like it is returned by the `SslContextBuilder`. Yeah, that would work. A bit strange to mutate the SslContext, but I can see how that would be easy and work. That sounds quite fair. To configure would be an instanceof+cast+set.\r\n> \r\n> Since gRPC allows passing an SslContext during configuration, I think that would work for our internal-to-gRPC use case (which @sanjaypujare is interested in) and another \"our users may synchronously reload certificates in the TrustManager implementation\" use case.\r\n> \r\n> The boringssl restriction is good to know. What behavior will we see with openssl-backed tcnative? Will it just \"not call our TrustManager\", or will it be called on an event loop?\r\n\r\nIt will call it on the Eventloop\r\n> \r\n> Also, @sanjaypujare is willing to send a PR for the change. But I suggested it would be best to discuss the approach first.\r\n\r\nYep a PR would be awesome. Let’s do a setter for now. That said we may want to switch the default to true soon\r\n\r\n", "The current `SSLContext.setUseTasks(ctx, USE_TASKS);` call is in the constructor. With the setter approach I will call it again (with possibly a different value) which I assume is okay? Also the setter will allow the caller to set it to false even if \"io.netty.handler.ssl.openssl.useTasks\" system property is set to true. ", "Yep that’s fine\n\n> Am 13.05.2020 um 21:15 schrieb sanjaypujare <notifications@github.com>:\n> \n> \n> The current SSLContext.setUseTasks(ctx, USE_TASKS); call is in the constructor. With the setter approach I will call it again (with possibly a different value) which I assume is okay? Also the setter will allow the caller to set it to false even if \"io.netty.handler.ssl.openssl.useTasks\" system property is set to true.\n> \n> —\n> You are receiving this because you commented.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n" ]
[]
"2020-05-13T21:13:44Z"
[]
useTasks should be set or settable per ReferenceCountedOpenSslContext
### Expected behavior Each `ReferenceCountedOpenSslContext` instance have its own setting for UseTasks ### Actual behavior Currently `USE_TASKS` is a global set from the system property `"io.netty.handler.ssl.openssl.useTasks"` so all `ReferenceCountedOpenSslContext` instances are forced to use that one value. ### Steps to reproduce No steps needed. Just from reading the code. ### Minimal yet complete reproducer code (or URL to code) This is where it is set: https://github.com/netty/netty/blob/4.1/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java#L86 and this is where it is used: https://github.com/netty/netty/blob/4.1/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java#L339 ### Netty version Latest (4.1?) ### JVM version (e.g. `java -version`) any ### OS version (e.g. `uname -a`) all those with tcnative/OpenSsl support.
[ "handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java" ]
[]
diff --git a/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java b/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java index bc72db4c218..27eb43574c0 100644 --- a/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java +++ b/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslContext.java @@ -529,8 +529,7 @@ public final void setPrivateKeyMethod(OpenSslPrivateKeyMethod method) { } } - // Exposed for testing only - final void setUseTasks(boolean useTasks) { + public final void setUseTasks(boolean useTasks) { Lock writerLock = ctxLock.writeLock(); writerLock.lock(); try {
null
test
test
"2020-05-13T08:00:23"
"2020-05-13T17:34:24Z"
sanjaypujare
val
netty/netty/10320_10321
netty/netty
netty/netty/10320
netty/netty/10321
[ "keyword_pr_to_issue" ]
0375e6e01b0b6412c2369a8d3e2a990f41a9edfa
de134da720d23909151d20e6af6eb2a96181bf03
[]
[ "you need to call `c.release();`", "nit: move the `;` one line up", "Done by `channel.releaseInbound()`", "Done", "Consider writing the full HTTP request (including one or two chunks + last (zero) chunk + some trailers) to make sure that decoder's state machine transitions to the correct state and handles the entire request correctly.", "Done by move the tests to `HttpContentDecoderTest` to use the pre-defined `gzipped hello world` for testing.", "This can be changed to the following code to reduce the `trim()` operations and also reduce allocations:\r\n\r\n```java\r\nint idx = transferEncoding.indexOf(\",\");\r\nif (idx != -1) {\r\n contentEncoding = transferEncoding.subString(0, idx).trim();\r\n} else {\r\n contentEncoding = transferEncoding.trim();\r\n}\r\n```", "Consider using `HttpHeaderNames.CONTENT_LENGTH` constant.", "Looks like too many `\\r\\n`. Should we have only 2 pairs instead of 3?", "Consider using constants from `HttpHeaderNames` and `HttpHeaderValues`.", "Done", "Thanks for the advice, Done", "Done", "Yeah, that is right. thanks for pointing out. done" ]
"2020-05-27T01:19:13Z"
[]
More values other than chunked defined in Transfer-Encoding header leads to decode failure
According to https://tools.ietf.org/html/rfc7230#page-29, people can specify multiple values for `Transfer-Encoding` HTTP header, like: `Transfer-Encoding: gzip, chunked`, but Netty does not recognize it as a chunked request in this case. ### Expected behavior There is no exception in server, and client should get the server response. ### Actual behavior Exception found in server: ``` java.lang.IllegalArgumentException: text is empty (possibly HTTP/0.9) at io.netty.handler.codec.http.HttpVersion.valueOf(HttpVersion.java:64) at io.netty.handler.codec.http.HttpRequestDecoder.createMessage(HttpRequestDecoder.java:87) at io.netty.handler.codec.http.HttpObjectDecoder.decode(HttpObjectDecoder.java:206) at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501) at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440) at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:650) at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:576) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) ``` And there is no response back in that case. ### Steps to reproduce Please refer to the reproducer in the link below. ### Minimal yet complete reproducer code (or URL to code) Reproducer link: https://github.com/gaol/tests/tree/master/netty/transfer-encoding-gzip-chunked ### Netty version * 4.1.49.Final * latest `4.1` branch(commit: 0375e6e01b0b6412c2369a8d3e2a990f41a9edfa) ### JVM version (e.g. `java -version`) ``` [lgao@lgao netty]$ java -version openjdk version "1.8.0_222" OpenJDK Runtime Environment (AdoptOpenJDK)(build 1.8.0_222-b10) OpenJDK 64-Bit Server VM (AdoptOpenJDK)(build 25.222-b10, mixed mode) ``` ### OS version (e.g. `uname -a`) ``` [lgao@lgao netty]$ uname -a Linux lgao.corp.redhat.com 4.13.16-100.fc25.x86_64 #1 SMP Mon Nov 27 19:52:46 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux ``` ### Linked Issue https://github.com/eclipse-vertx/vert.x/issues/3353
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java index d2513e4a5a8..686d03257e8 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java @@ -85,7 +85,17 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List<Object> ou if (contentEncoding != null) { contentEncoding = contentEncoding.trim(); } else { - contentEncoding = IDENTITY; + String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING); + if (transferEncoding != null) { + int idx = transferEncoding.indexOf(","); + if (idx != -1) { + contentEncoding = transferEncoding.substring(0, idx).trim(); + } else { + contentEncoding = transferEncoding.trim(); + } + } else { + contentEncoding = IDENTITY; + } } decoder = newContentDecoder(contentEncoding); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java index 31fd14d2e5d..bb4d57cb54d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java @@ -300,7 +300,7 @@ public static void set100ContinueExpected(HttpMessage message, boolean expected) * @return True if transfer encoding is chunked, otherwise false */ public static boolean isTransferEncodingChunked(HttpMessage message) { - return message.headers().contains(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true); + return message.headers().containsValue(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true); } /**
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java index 24833a7f8d4..20f41eab49b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java @@ -563,6 +563,78 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { assertEquals(0, content.refCnt()); } + @Test + public void testTransferCodingGZIP() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Content-Length: " + GZ_HELLO_WORLD.length + "\r\n" + + "Transfer-Encoding: gzip\r\n" + + "\r\n"; + HttpRequestDecoder decoder = new HttpRequestDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); + + channel.writeInbound(Unpooled.copiedBuffer(requestStr.getBytes())); + channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD)); + + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isSuccess()); + assertFalse(request.headers().contains(HttpHeaderNames.CONTENT_LENGTH)); + + HttpContent content = channel.readInbound(); + assertTrue(content.decoderResult().isSuccess()); + assertEquals(HELLO_WORLD, content.content().toString(CharsetUtil.US_ASCII)); + content.release(); + + LastHttpContent lastHttpContent = channel.readInbound(); + assertTrue(lastHttpContent.decoderResult().isSuccess()); + lastHttpContent.release(); + + assertHasInboundMessages(channel, false); + assertHasOutboundMessages(channel, false); + assertFalse(channel.finish()); + channel.releaseInbound(); + } + + @Test + public void testTransferCodingGZIPAndChunked() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Content-Type: application/x-www-form-urlencoded\r\n" + + "Trailer: My-Trailer\r\n" + + "Transfer-Encoding: gzip, chunked\r\n" + + "\r\n"; + HttpRequestDecoder decoder = new HttpRequestDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + + String chunkLength = Integer.toHexString(GZ_HELLO_WORLD.length); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(chunkLength + "\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("\r\n".getBytes(CharsetUtil.US_ASCII)))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("0\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("My-Trailer: 42\r\n\r\n", CharsetUtil.US_ASCII))); + + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isSuccess()); + assertTrue(request.headers().containsValue(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true)); + assertFalse(request.headers().contains(HttpHeaderNames.CONTENT_LENGTH)); + + HttpContent chunk1 = channel.readInbound(); + assertTrue(chunk1.decoderResult().isSuccess()); + assertEquals(HELLO_WORLD, chunk1.content().toString(CharsetUtil.US_ASCII)); + chunk1.release(); + + LastHttpContent chunk2 = channel.readInbound(); + assertTrue(chunk2.decoderResult().isSuccess()); + assertEquals("42", chunk2.trailingHeaders().get("My-Trailer")); + chunk2.release(); + + assertFalse(channel.finish()); + channel.releaseInbound(); + } + private static byte[] gzDecompress(byte[] input) { ZlibDecoder decoder = ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP); EmbeddedChannel channel = new EmbeddedChannel(decoder);
train
test
"2020-05-25T07:39:52"
"2020-05-27T01:06:20Z"
gaol
val
netty/netty/10384_10387
netty/netty
netty/netty/10384
netty/netty/10387
[ "keyword_pr_to_issue" ]
523dc5c269fef07d3e9ceb1e7944118645b0717c
9893ac726fef12157c996bc827e2cdc840f7ef35
[ "/cc @bsideup ", "@normanmaurer well, it does read the file internally :) Consider either pre-loading it or adding `io.netty.handler.ssl.ReferenceCountedOpenSslClientContext$ExtendedTrustManagerVerifyCallback.verify` to the list of allowed methods", "@bsideup adding this extra method sounds like good enough... @bsideup @violetagg want to do a PR ?", "> @bsideup adding this extra method sounds like good enough... @bsideup @violetagg want to do a PR ?\r\n\r\nYep sure" ]
[]
"2020-07-02T10:08:15Z"
[]
Blocking call in ReferenceCountedOpenSslClientContext$ExtendedTrustManagerVerifyCallback
### Expected behavior No blocking calls reported by BlockHound ### Actual behavior The exception below appears when BlockHound is installed ``` Caused by: reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.base/java.io.FileInputStream.readBytes(FileInputStream.java) at java.base/java.io.FileInputStream.read(FileInputStream.java:257) at java.base/java.util.Properties$LineReader.readLine(Properties.java:500) at java.base/java.util.Properties.load0(Properties.java:416) at java.base/java.util.Properties.load(Properties.java:405) at java.base/sun.security.util.UntrustedCertificates$1.run(UntrustedCertificates.java:60) at java.base/sun.security.util.UntrustedCertificates$1.run(UntrustedCertificates.java:54) at java.base/java.security.AccessController.doPrivileged(Native Method) at java.base/sun.security.util.UntrustedCertificates.<clinit>(UntrustedCertificates.java:54) at java.base/sun.security.provider.certpath.UntrustedChecker.check(UntrustedChecker.java:78) at java.base/java.security.cert.PKIXCertPathChecker.check(PKIXCertPathChecker.java:176) at java.base/sun.security.provider.certpath.PKIXCertPathValidator.validate(PKIXCertPathValidator.java:171) at java.base/sun.security.provider.certpath.PKIXCertPathValidator.validate(PKIXCertPathValidator.java:145) at java.base/sun.security.provider.certpath.PKIXCertPathValidator.engineValidate(PKIXCertPathValidator.java:84) at java.base/java.security.cert.CertPathValidator.validate(CertPathValidator.java:309) at java.base/sun.security.validator.PKIXValidator.doValidate(PKIXValidator.java:364) at java.base/sun.security.validator.PKIXValidator.engineValidate(PKIXValidator.java:275) at java.base/sun.security.validator.Validator.validate(Validator.java:264) at java.base/sun.security.ssl.X509TrustManagerImpl.validate(X509TrustManagerImpl.java:313) at java.base/sun.security.ssl.X509TrustManagerImpl.checkTrusted(X509TrustManagerImpl.java:276) at java.base/sun.security.ssl.X509TrustManagerImpl.checkServerTrusted(X509TrustManagerImpl.java:141) at io.netty.handler.ssl.ReferenceCountedOpenSslClientContext$ExtendedTrustManagerVerifyCallback.verify(ReferenceCountedOpenSslClientContext.java:261) at io.netty.handler.ssl.ReferenceCountedOpenSslContext$AbstractCertificateVerifier.verify(ReferenceCountedOpenSslContext.java:700) at io.netty.internal.tcnative.SSL.readFromSSL(Native Method) at io.netty.handler.ssl.ReferenceCountedOpenSslEngine.readPlaintextData(ReferenceCountedOpenSslEngine.java:597) at io.netty.handler.ssl.ReferenceCountedOpenSslEngine.unwrap(ReferenceCountedOpenSslEngine.java:1199) at io.netty.handler.ssl.ReferenceCountedOpenSslEngine.unwrap(ReferenceCountedOpenSslEngine.java:1321) at io.netty.handler.ssl.ReferenceCountedOpenSslEngine.unwrap(ReferenceCountedOpenSslEngine.java:1365) at io.netty.handler.ssl.SslHandler$SslEngineType$1.unwrap(SslHandler.java:206) at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1380) ``` The issue is related to https://github.com/reactor/reactor-netty/issues/1148 ### Minimal yet complete reproducer code (or URL to code) Run the test from the Netty fork below with profiles `java11` and `boringssl` `mvn -Dtest=io.netty.util.internal.NettyBlockHoundIntegrationTest#testTrustManagerVerify test -Pjava11 -Pboringssl` https://github.com/violetagg/netty/commit/b1e51e5f27df8745002cb8a0922e07757cf045bf ### Netty version Current snapshot ### JVM version (e.g. `java -version`) java 11 ### OS version (e.g. `uname -a`) Mac OS
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "transport-blockhound-tests/pom.xml" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "transport-blockhound-tests/pom.xml" ]
[ "transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java", "transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.key", "transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.pem", "transport-blockhound-tests/src/test/resources/io/netty/util/internal/mutual_auth_ca.pem" ]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index 9c1d46cb380..7fd6d083967 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -87,6 +87,10 @@ public void applyTo(BlockHound.Builder builder) { "io.netty.util.concurrent.SingleThreadEventExecutor", "takeTask"); + builder.allowBlockingCallsInside( + "io.netty.handler.ssl.ReferenceCountedOpenSslClientContext$ExtendedTrustManagerVerifyCallback", + "verify"); + builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() { @Override public Predicate<Thread> apply(final Predicate<Thread> p) { diff --git a/transport-blockhound-tests/pom.xml b/transport-blockhound-tests/pom.xml index 28dcfba4767..a257a3c4ab0 100644 --- a/transport-blockhound-tests/pom.xml +++ b/transport-blockhound-tests/pom.xml @@ -71,6 +71,12 @@ <artifactId>netty-handler</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>${tcnative.artifactId}</artifactId> + <classifier>${tcnative.classifier}</classifier> + <optional>true</optional> + </dependency> <dependency> <groupId>org.bouncycastle</groupId>
diff --git a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java index b34b6b2f856..58e9284370c 100644 --- a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java +++ b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java @@ -153,6 +153,25 @@ public void testHandshakeWithExecutor() throws Exception { } } + @Test + public void testTrustManagerVerify() throws Exception { + final SslContext sslClientCtx = + SslContextBuilder.forClient() + .trustManager(ResourcesUtil.getFile(getClass(), "mutual_auth_ca.pem")) + .build(); + + final SslContext sslServerCtx = + SslContextBuilder.forServer(ResourcesUtil.getFile(getClass(), "localhost_server.pem"), + ResourcesUtil.getFile(getClass(), "localhost_server.key"), + null) + .build(); + + final SslHandler clientSslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT); + final SslHandler serverSslHandler = sslServerCtx.newHandler(UnpooledByteBufAllocator.DEFAULT); + + testHandshake(sslClientCtx, clientSslHandler, serverSslHandler); + } + private static void testHandshakeWithExecutor(Executor executor) throws Exception { String tlsVersion = "TLSv1.2"; final SslContext sslClientCtx = SslContextBuilder.forClient() @@ -163,12 +182,17 @@ private static void testHandshakeWithExecutor(Executor executor) throws Exceptio final SslContext sslServerCtx = SslContextBuilder.forServer(cert.key(), cert.cert()) .sslProvider(SslProvider.JDK).protocols(tlsVersion).build(); - EventLoopGroup group = new NioEventLoopGroup(); - Channel sc = null; - Channel cc = null; final SslHandler clientSslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT, executor); final SslHandler serverSslHandler = sslServerCtx.newHandler(UnpooledByteBufAllocator.DEFAULT, executor); + testHandshake(sslClientCtx, clientSslHandler, serverSslHandler); + } + + private static void testHandshake(SslContext sslClientCtx, SslHandler clientSslHandler, + SslHandler serverSslHandler) throws Exception { + EventLoopGroup group = new NioEventLoopGroup(); + Channel sc = null; + Channel cc = null; try { sc = new ServerBootstrap() .group(group) diff --git a/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.key b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.key new file mode 100644 index 00000000000..9aa6611400a --- /dev/null +++ b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDYrLtMlZzoe2BP +iCURF3So5XNLfsOLcAVERXXjnxqX6Mex55WdJiy6uWTFKbRHWJdbWELdZxVl5+GX +pMv3OdkKZt+19ZdSfByv6bB5RNdZOEGnKOHSY2XdnzYnF5JBaWEx0fvtvIPZOUlW +DWgsQzJk1UQhu+XnBc7P1hHYNvwsVNOR+HD9LGebDy+UcfiL34XwAyBdHUsbcIr8 +hltABcj6vNbqOLndpU86DxU9z9b1PDmkFVfisElhpDEhpxmTCwI22Us1GC8D81LM +ZzMlbWSzTfNPEuqNzJYGiFt/XPwPkPPyVvti0XWPBQpwzJFFUX5xKsOGERolELRT +0yNQYznFAgMBAAECggEAOFR/xSNITbB1k3ejm1PrwlUUqlXkZIXU+LDOO0UL1t5v +vDKm1Not2sWECzYSZlID132UtJauG3YzUgdH95gUcv3XvyiAFLOriZhJht181vcn +KlwYiWfJ/dn8bCFWpqbM2/TpeB8AcCLSjAqkQI2ftlMziUmeNXdvEt1mej2hRay1 +ULfoxlC0mftNRQptD5gBFzrc47O4mVpVEQt4yS3Qyzp2/9ds9UkhaCIFpXPVCalZ +ds7R+bDDP+wiYTkUcd8fvelaMkD3Wcy8DedGRShhILZvBYTDdWcpJ7+e5EkNlEq4 ++Ys4Y/u6aFDJD53g3zCaJhatmdAZcct2MMmWH1vewQKBgQD3Y2S245cad1D9AqYD +ChZGp95EfRo3EzXk4VkE50bjZXjHq9fD8T0CWEZGWQZrXJCR+vBpEURy0mrPD8se +QQ0Q5+I27RadtfPnMd6ry9nDGMPxyd/10vzU6LazzLNE+uf9ljF1RHZu1iDAvInR +r1cQGbn/wKBF6BurPPIXABZEuQKBgQDgN6JHbIfDzHKhwEoUTvRrYJsTXqplD+h0 +Whg+kSQyhtKdlpINFOoEj8FUNJvTjG8les1aoajyWIqikVdvHto/mrxrSIeRkEmt +X+KG+5ld2n466tzv1DmVcIGXSrBrH3lA0i6R8Ly26FLSqw0Z12fx5GUUa1qaVRqo +rwcrIZovbQKBgHa2mojs9AC+Sv3uvG1u9LuZKJ7jDaZqMI2R2d7xgOH0Op5Ohy6+ +39D1PVvasqroc3Op4J36rEcRVDHi2Uy+WJ/JNpO2+AhcXRuPodP88ZWel8C6aB+V +zL/6oFntnAU5BgR5g2hLny2W0YbLsrMNmhDe15O0AvUo6cYla+K/pu/5AoGACr/g +EdiMMcDthf+4DX0zjqpVBPq25J18oYdoPierOpjoJBIB8oqcJZfWxvi2t8+1zHA0 +xDGX7fZ8vwqEzJkIEaCTg/k4NqxaO+uq6pnJYoyFHMIB0aW1FQsNy3kTOC+MGqV5 +Ahoukf5VajA1MpX3L8upZO84qsmFu6yYhWLZB4kCgYBlgSD5G4q6rX4ELa3XG61h +fDtu75IYEsjWm4vgJzHjeYT2xPIm9OFFYXjPghto0f1oH37ODD3DoXmsnmddgpmn +tH7aRWWHsSpB5zVgftV4urNCIsm87LWw8mvUGgCwYV1CtCX8warKokfeoA2ltz4u +oeuUzo98hN+aKRU5RO6Bmg== +-----END PRIVATE KEY----- diff --git a/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.pem b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.pem new file mode 100644 index 00000000000..70759b29e52 --- /dev/null +++ b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/localhost_server.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICozCCAYsCAnS/MA0GCSqGSIb3DQEBDQUAMBgxFjAUBgNVBAMTDU5ldHR5VGVz +dFJvb3QwIBcNMTcwMjE3MDMzMzQ0WhgPMjExNzAxMjQwMzMzNDRaMBQxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANis +u0yVnOh7YE+IJREXdKjlc0t+w4twBURFdeOfGpfox7HnlZ0mLLq5ZMUptEdYl1tY +Qt1nFWXn4Zeky/c52Qpm37X1l1J8HK/psHlE11k4Qaco4dJjZd2fNicXkkFpYTHR +++28g9k5SVYNaCxDMmTVRCG75ecFzs/WEdg2/CxU05H4cP0sZ5sPL5Rx+IvfhfAD +IF0dSxtwivyGW0AFyPq81uo4ud2lTzoPFT3P1vU8OaQVV+KwSWGkMSGnGZMLAjbZ +SzUYLwPzUsxnMyVtZLNN808S6o3MlgaIW39c/A+Q8/JW+2LRdY8FCnDMkUVRfnEq +w4YRGiUQtFPTI1BjOcUCAwEAATANBgkqhkiG9w0BAQ0FAAOCAQEAQNXnwE2MJFy5 +ti07xyi8h/mY0Kl1dwZUqx4F9D9eoxLCq2/p3h/Z18AlOmjdW06pvC2sGtQtyEqL +YjuQFbMjXRo9c+6+d+xwdDKTu7+XOTHvznJ8xJpKnFOlohGq/n3efBIJSsaeasTU +slFzmdKYABDZzbsQ4X6YCIOF4XVdEQqmXpS+uEbn5C2sVtG+LXI8srmkVGpCcRew +SuTGanwxLparhBBeN1ARjKzNxXUWuK2UKZ9p8c7n7TXGhd12ZNTcLhk4rCnOFq1J +ySFvP5YL2q29fpEt+Tq0zm3V7An2qtaNDp26cEdevtKPjRyOLkCJx8OlZxc9DZvJ +HjalFDoRUw== +-----END CERTIFICATE----- diff --git a/transport-blockhound-tests/src/test/resources/io/netty/util/internal/mutual_auth_ca.pem b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/mutual_auth_ca.pem new file mode 100644 index 00000000000..9c9241bc653 --- /dev/null +++ b/transport-blockhound-tests/src/test/resources/io/netty/util/internal/mutual_auth_ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDLDCCAhSgAwIBAgIJAO1m5pioZhLLMA0GCSqGSIb3DQEBDQUAMBgxFjAUBgNV +BAMTDU5ldHR5VGVzdFJvb3QwHhcNMTcwMjE3MDMzMzQ0WhcNMTcwMzE5MDMzMzQ0 +WjAYMRYwFAYDVQQDEw1OZXR0eVRlc3RSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAnC7Y/p/TSWI1KxBKETfFKaRWCPEkoYn5G973WbCF0VDT90PX +xK6yHvhqNdDQZPmddgfDAQfjekHeeIFkjCKlvQu0js0G4Bubz4NffNumd/Mgsix8 +SWJ13lPk+Ly4PDv0bK1zB6BxP1qQm1qxVwsPy9zNP8ylJrM0Div4TXHmnWOfc0JD +4/XPpfeUHH1tt/GMtsS2Gx6EpTVPD2w7LDKUza1/rQ7d9sqmFpgsNcI9Db/sAtFP +lK2iJku5WIXQkmHimn4bqZ9wkiXJ85pm5ggGQqGMPSbe+2Lh24AvZMIBiwPbkjEU +EDFXEJfKOC3Dl71JgWOthtHZ9vcCRDQ3Sky6AQIDAQABo3kwdzAdBgNVHQ4EFgQU +qT+cH8qrebiVPpKCBQDB6At2iOAwSAYDVR0jBEEwP4AUqT+cH8qrebiVPpKCBQDB +6At2iOChHKQaMBgxFjAUBgNVBAMTDU5ldHR5VGVzdFJvb3SCCQDtZuaYqGYSyzAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBDQUAA4IBAQCEemXTIew4pR2cHEFpVsW2 +bLHXLAnC23wBMT46D3tqyxscukMYjFuWosCdEsgRW8d50BXy9o4dHWeg94+aDo3A +DX4OTRN/veQGIG7dgM6poDzFuVJlSN0ubKKg6gpDD60IhopZpMviFAOsmzr7OXwS +9hjbTqUWujMIEHQ95sPlQFdSaavYSFfqhSltWmVCPSbArxrw0lZ2QcnUqGN47EFp +whc5wFB+rSw/ojU1jBLMvgvgzf/8V8zr1IBTDSiHNlknGqGpOOaookzUh95YRiAT +hH82y9bBeflqroOeztqMpONpWoZjlz0sWbJNvXztXINL7LaNmVYOcoUrCcxPS54T +-----END CERTIFICATE-----
train
test
"2020-07-01T10:24:41"
"2020-06-30T18:49:31Z"
violetagg
val
netty/netty/10051_10428
netty/netty
netty/netty/10051
netty/netty/10428
[ "keyword_pr_to_issue" ]
28e1fbd366602d8f4b50921afb2c9d5e41b94a2d
0601389766e2feec82c1d6d9e1834a3930c82caa
[ "please ensure you use the latest netty release and re-open if you still see issues. ", "Encountered the same error, also trying to use `grpc-netty-shaded`.", "As a work-around: `-Dio.netty.noUnsafe=true` added at run time (that is, if your built application is called `app`, it is `./app -Dio.netty.noUnsafe=true`) seems to solve the problem.\r\n\r\nPlease note that it compleletely disables the usage of `Unsafe` by Netty. I'm not sure what other consequences it has (probably, some performance penalty).\r\n\r\nKudos to @johnou " ]
[ "@rpuch can we merge this directly to `PlatformDependent0` ? Seems overkill to add this class just for exposing one method etc ", "Use `SystemPropertyUtils.`", "I've moved the code to `PlatformDependent0` and removed `GraalDetector` class. Please let me know if it is ok now.", "Done" ]
"2020-07-26T15:04:12Z"
[]
Failure to run a service that uses a Netty library having built it with Graal native-image
### Expected behavior Ability to build and run a service using Netty with GraalVM [native-image](https://www.graalvm.org/docs/reference-manual/native-image/). ### Actual behavior It seems like Netty already has some support for GraalVM, but there still some issues. I can't find any reference to this error below, so maybe I'm doing something wrong! Any ideas would be greatly appreciated. Failure during runtime: ``` com.oracle.svm.core.jdk.UnsupportedFeatureError: Unsupported method of Unsafe at com.oracle.svm.core.util.VMError.unsupportedFeature(VMError.java:101) at jdk.internal.misc.Unsafe.staticFieldOffset(Unsafe.java:230) at sun.misc.Unsafe.staticFieldOffset(Unsafe.java:662) at io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$5.run(PlatformDependent0.java:281) at java.security.AccessController.doPrivileged(AccessController.java:81) at io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0.<clinit>(PlatformDependent0.java:266) at com.oracle.svm.core.hub.ClassInitializationInfo.invokeClassInitializer(ClassInitializationInfo.java:350) at com.oracle.svm.core.hub.ClassInitializationInfo.initialize(ClassInitializationInfo.java:270) at java.lang.Class.ensureInitialized(DynamicHub.java:496) at io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent.isAndroid(PlatformDependent.java:272) at io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent.<clinit>(PlatformDependent.java:92) at com.oracle.svm.core.hub.ClassInitializationInfo.invokeClassInitializer(ClassInitializationInfo.java:350) at com.oracle.svm.core.hub.ClassInitializationInfo.initialize(ClassInitializationInfo.java:270) at java.lang.Class.ensureInitialized(DynamicHub.java:496) at io.grpc.netty.shaded.io.netty.util.AsciiString.<init>(AsciiString.java:222) at io.grpc.netty.shaded.io.netty.util.AsciiString.<init>(AsciiString.java:209) at io.grpc.netty.shaded.io.netty.util.AsciiString.cached(AsciiString.java:1406) at io.grpc.netty.shaded.io.netty.util.AsciiString.<clinit>(AsciiString.java:47) at com.oracle.svm.core.hub.ClassInitializationInfo.invokeClassInitializer(ClassInitializationInfo.java:350) at com.oracle.svm.core.hub.ClassInitializationInfo.initialize(ClassInitializationInfo.java:270) at java.lang.Class.ensureInitialized(DynamicHub.java:496) at io.grpc.netty.shaded.io.grpc.netty.Utils.<clinit>(Utils.java:72) at com.oracle.svm.core.hub.ClassInitializationInfo.invokeClassInitializer(ClassInitializationInfo.java:350) at com.oracle.svm.core.hub.ClassInitializationInfo.initialize(ClassInitializationInfo.java:270) at java.lang.Class.ensureInitialized(DynamicHub.java:496) at io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder.<clinit>(NettyChannelBuilder.java:74) at com.oracle.svm.core.hub.ClassInitializationInfo.invokeClassInitializer(ClassInitializationInfo.java:350) at com.oracle.svm.core.hub.ClassInitializationInfo.initialize(ClassInitializationInfo.java:270) at java.lang.Class.ensureInitialized(DynamicHub.java:496) at io.grpc.netty.shaded.io.grpc.netty.NettyChannelProvider.builderForAddress(NettyChannelProvider.java:37) at io.grpc.netty.shaded.io.grpc.netty.NettyChannelProvider.builderForAddress(NettyChannelProvider.java:23) at io.grpc.ManagedChannelBuilder.forAddress(ManagedChannelBuilder.java:39) at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.createSingleChannel(InstantiatingGrpcChannelProvider.java:269) at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.access$1500(InstantiatingGrpcChannelProvider.java:71) at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider$1.createSingleChannel(InstantiatingGrpcChannelProvider.java:202) at com.google.api.gax.grpc.ChannelPool.create(ChannelPool.java:72) at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.createChannel(InstantiatingGrpcChannelProvider.java:209) at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.getTransportChannel(InstantiatingGrpcChannelProvider.java:192) at com.google.api.gax.rpc.ClientContext.create(ClientContext.java:155) at com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub.create(EnhancedBigtableStub.java:163) at com.google.cloud.bigtable.data.v2.BigtableDataClient.create(BigtableDataClient.java:152) ``` ### Steps to reproduce Try building a service that uses a library that uses Netty under the hood (in my particular example, `google-cloud-bigtable`). ### Minimal yet complete reproducer code (or URL to code) ### Netty version The library uses a shaded version of Netty `grpc-netty-shaded`, but using the latest version `1.27.1`, which I'm pretty sure it's Netty 4.1.44+ (but I don't know how to verify). ### JVM version (e.g. `java -version`) N/A ### OS version (e.g. `uname -a`) macOS Catalina
[ "common/src/main/java/io/netty/util/internal/PlatformDependent0.java" ]
[ "common/src/main/java/io/netty/util/internal/PlatformDependent0.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/internal/PlatformDependent0.java b/common/src/main/java/io/netty/util/internal/PlatformDependent0.java index 44976f18cb8..d95aaa8cdb8 100644 --- a/common/src/main/java/io/netty/util/internal/PlatformDependent0.java +++ b/common/src/main/java/io/netty/util/internal/PlatformDependent0.java @@ -53,6 +53,11 @@ final class PlatformDependent0 { private static final Object INTERNAL_UNSAFE; private static final boolean IS_EXPLICIT_TRY_REFLECTION_SET_ACCESSIBLE = explicitTryReflectionSetAccessible0(); + // See https://github.com/oracle/graal/blob/master/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ + // ImageInfo.java + private static final boolean RUNNING_IN_NATIVE_IMAGE = SystemPropertyUtil.contains( + "org.graalvm.nativeimage.imagecode"); + static final Unsafe UNSAFE; // constants borrowed from murmur3 @@ -283,7 +288,7 @@ public Object run() { Class<?> bitsClass = Class.forName("java.nio.Bits", false, getSystemClassLoader()); int version = javaVersion(); - if (version >= 9) { + if (unsafeStaticFieldOffsetSupported() && version >= 9) { // Java9/10 use all lowercase and later versions all uppercase. String fieldName = version >= 11 ? "UNALIGNED" : "unaligned"; // On Java9 and later we try to directly access the field as we can do this without @@ -399,6 +404,10 @@ public Object run() { DIRECT_BUFFER_CONSTRUCTOR != null ? "available" : "unavailable"); } + private static boolean unsafeStaticFieldOffsetSupported() { + return !RUNNING_IN_NATIVE_IMAGE; + } + static boolean isExplicitNoUnsafe() { return EXPLICIT_NO_UNSAFE_CAUSE != null; }
null
train
test
"2020-07-17T07:12:11"
"2020-02-21T11:33:21Z"
CremboC
val
netty/netty/10434_10457
netty/netty
netty/netty/10434
netty/netty/10457
[ "keyword_pr_to_issue" ]
5aea78950f6ee0b02169c9fec15901e8bc0c3888
54bfd21e525f5d197bbaff583a832ec0f3cb8735
[ "I don't think netty was ever intended to properly recover from OutOfMemoryError...it would be a nice addition but hard to recover from errors in every random place.", "> I don't think netty was ever intended to properly recover from OutOfMemoryError...it would be a nice addition but hard to recover from errors in every random place.\r\n\r\nyeah, recovery from Outofmemoryerror is difficult.\r\n\r\nI've only found out that `AbstractNioByteChannel.java#read` OutOfMemoryError causes the pipeline read event to be blocked. If many requests are in the request, many scokets will be occupied(socket). \r\n```shell\r\n[root@app ~]# ss -s\r\nTotal: 61569 (kernel 61731)\r\nTCP: 62328 (estab 49757, closed 9067, orphaned 3, synrecv 0, timewait 235/0), ports 411\r\n\r\nTransport Total IP IPv6\r\n*\t 61731 - - \r\nRAW\t 0 0 0 \r\nUDP\t 1 1 0 \r\nTCP\t 53261 6 53255 \r\nINET\t 53262 7 53255 \r\nFRAG\t 0 0 0 \r\n```\r\n\r\nIn each channel read implementation, netty creates a new receive buffer to read all inbound data when it enters the read event, in this case if OutOfMemoryError is supposed to release the pipeline read event. Otherwise, it will be blocked for a certain period of time, causing the server to monitor the alarm. \r\n\r\nIf we find out where to use `RecvByteBufAllocator.java#Handle#allocate`, and then catch in the catch to add out of the OutOfMemoryError judgment, and then call the release pipeline read event, will it solve the problem?\r\n\r\nI should probably test the 'EpollStreamChannel' KQueueStreamChannel environment to see if it will be the same, but I haven't tried to do it. " ]
[]
"2020-08-07T06:58:16Z"
[]
OutOfDirectMemoryError causes CPU 100% and fd is full
### Expected behavior Release connection ### Actual behavior Always occupy the connection ### Steps to reproduce see [# 10424](https://github.com/netty/netty/issues/10424) OOM Exception , OOM in netty 4.1.51.Final Fix,but OOM problem caused this problem. Should need to be fixed, because the next OOM will appear again . ### Minimal yet complete reproducer code (or URL to code) Gave me some thoughts in the code . in read event: ```java @Override public final void read() { final ChannelConfig config = config(); if (shouldBreakReadReady(config)) { clearReadPending(); return; } final ChannelPipeline pipeline = pipeline(); final ByteBufAllocator allocator = config.getAllocator(); final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle(); allocHandle.reset(config); ByteBuf byteBuf = null; boolean close = false; try { do { // * If an oom exception occurs in its place, oom will also be here * byteBuf = allocHandle.allocate(allocator); allocHandle.lastBytesRead(doReadBytes(byteBuf)); if (allocHandle.lastBytesRead() <= 0) { // nothing was read. release the buffer. byteBuf.release(); byteBuf = null; close = allocHandle.lastBytesRead() < 0; if (close) { // There is nothing left to read as we received an EOF. readPending = false; } break; } allocHandle.incMessagesRead(1); readPending = false; pipeline.fireChannelRead(byteBuf); byteBuf = null; } while (allocHandle.continueReading()); allocHandle.readComplete(); pipeline.fireChannelReadComplete(); if (close) { closeOnRead(pipeline); } } catch (Throwable t) { // there catch oom exceptin and handler it handleReadException(pipeline, byteBuf, t, close, allocHandle); } finally { ..... } } } ``` handleReadException method: ```java private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Throwable cause, boolean close, RecvByteBufAllocator.Handle allocHandle) { if (byteBuf != null) { if (byteBuf.isReadable()) { readPending = false; pipeline.fireChannelRead(byteBuf); } else { byteBuf.release(); } } allocHandle.readComplete(); pipeline.fireChannelReadComplete(); pipeline.fireExceptionCaught(cause); // If it is oom , clase value is false, no change, // and cause not instanceof IOException, because instanceof OutOfMemoryError , // So event is not fire , event readPending always , If http server, request block always too. // In high concurrency scenarios , this also explains why the socket is full and cpu full too // If oom maybe set close = ture, or condition `cause instanceof OutOfMemoryError` if (close || cause instanceof IOException) { closeOnRead(pipeline); } } ``` ```shell [root@app ~]# ss -s Total: 61569 (kernel 61731) TCP: 62328 (estab 49757, closed 9067, orphaned 3, synrecv 0, timewait 235/0), ports 411 Transport Total IP IPv6 * 61731 - - RAW 0 0 0 UDP 1 1 0 TCP 53261 6 53255 INET 53262 7 53255 FRAG 0 0 0 ``` ### Netty version 4.1.50.Final ### JVM version (e.g. `java -version`) java version "1.8.0_221" Java(TM) SE Runtime Environment (build 1.8.0_221-b11) Java HotSpot(TM) 64-Bit Server VM (build 25.221-b11, mixed mode) ### OS version (e.g. `uname -a`) Windows Linux
[ "transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java", "transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java", "transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java" ]
[ "transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java", "transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java", "transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java" ]
[]
diff --git a/transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java b/transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java index 95e31b380f7..8a5b79d4938 100644 --- a/transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java +++ b/transport-native-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java @@ -726,7 +726,10 @@ private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Thro allocHandle.readComplete(); pipeline.fireChannelReadComplete(); pipeline.fireExceptionCaught(cause); - if (close || cause instanceof IOException) { + + // If oom will close the read event, release connection. + // See https://github.com/netty/netty/issues/10434 + if (close || cause instanceof OutOfMemoryError || cause instanceof IOException) { shutdownInput(false); } } diff --git a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java index 4b7f4e06e5b..a412d6ac82b 100644 --- a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java +++ b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueStreamChannel.java @@ -587,7 +587,10 @@ private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Thro allocHandle.readComplete(); pipeline.fireChannelReadComplete(); pipeline.fireExceptionCaught(cause); - if (close || cause instanceof IOException) { + + // If oom will close the read event, release connection. + // See https://github.com/netty/netty/issues/10434 + if (close || cause instanceof OutOfMemoryError || cause instanceof IOException) { shutdownInput(false); } } diff --git a/transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java b/transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java index db87b100b0c..d0b6acd880b 100644 --- a/transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java +++ b/transport/src/main/java/io/netty/channel/nio/AbstractNioByteChannel.java @@ -123,7 +123,10 @@ private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Thro allocHandle.readComplete(); pipeline.fireChannelReadComplete(); pipeline.fireExceptionCaught(cause); - if (close || cause instanceof IOException) { + + // If oom will close the read event, release connection. + // See https://github.com/netty/netty/issues/10434 + if (close || cause instanceof OutOfMemoryError || cause instanceof IOException) { closeOnRead(pipeline); } } diff --git a/transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java b/transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java index 54ea0deadd8..87c24b7dce8 100644 --- a/transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java +++ b/transport/src/main/java/io/netty/channel/oio/AbstractOioByteChannel.java @@ -93,7 +93,10 @@ private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Thro allocHandle.readComplete(); pipeline.fireChannelReadComplete(); pipeline.fireExceptionCaught(cause); - if (close || cause instanceof IOException) { + + // If oom will close the read event, release connection. + // See https://github.com/netty/netty/issues/10434 + if (close || cause instanceof OutOfMemoryError || cause instanceof IOException) { closeOnRead(pipeline); } }
null
train
test
"2020-08-06T13:54:46"
"2020-07-30T06:37:01Z"
wuxiansen
val
netty/netty/10416_10473
netty/netty
netty/netty/10416
netty/netty/10473
[ "keyword_pr_to_issue" ]
f58223982ce8d9ed056c3d127972222966d0ebd1
61b0fa97d7e5fee763e331b536345844640c092e
[ "@ejona86 can you have a look ?", "I'll send out a PR.\r\n\r\nThe `verifyStreamId()` check for `streamDependency` should be a `verifyStreamOrConnectionId()`:\r\nhttps://github.com/netty/netty/blob/f58223982ce8d9ed056c3d127972222966d0ebd1/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java#L273-L277\r\n\r\nThe mislabled error seems to be a bug in the verify function, in that it ignores `argumentName`:\r\nhttps://github.com/netty/netty/blob/f58223982ce8d9ed056c3d127972222966d0ebd1/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java#L604-L610\r\n\r\nIt looks like the reading direction has the correct verification:\r\nhttps://github.com/netty/netty/blob/f58223982ce8d9ed056c3d127972222966d0ebd1/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java#L501-L511\r\n\r\nIt later re-verifies with (although note these are asserts, so they are internal checks and not for verifying remotes):\r\nhttps://github.com/netty/netty/blob/f58223982ce8d9ed056c3d127972222966d0ebd1/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2RemoteFlowController.java#L184-L186" ]
[]
"2020-08-11T15:32:36Z"
[]
Http2: writing valid PRIORITY frame fails with misleading error message
### Expected behavior * Writing valid PRIORITY frame with `streamDependency=0` succeeds * PRIORITY frame validation message contains actual cause of error ### Actual behavior Writing PRIORITY frame with `streamDependency=0` ``` http2Handler.encoder() .writePriority(ctx, streamId, /*dependency*/ 0, weight, /*exclusive*/ false, ctx.newPromise()); ``` fails with misleading error message ``` java.lang.IllegalArgumentException: streamId: 0 (expected: > 0) at io.netty.util.internal.ObjectUtil.checkPositive(ObjectUtil.java:44) at io.netty.handler.codec.http2.DefaultHttp2FrameWriter.verifyStreamId(DefaultHttp2FrameWriter.java:605) at io.netty.handler.codec.http2.DefaultHttp2FrameWriter.writePriority(DefaultHttp2FrameWriter.java:277) at io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.writePriority(DefaultHttp2ConnectionEncoder.java:278) at io.netty.handler.codec.http2.DecoratingHttp2FrameWriter.writePriority(DecoratingHttp2FrameWriter.java:59) ``` caused by [DefaultHttp2FrameWriter](https://github.com/netty/netty/blob/28e1fbd366602d8f4b50921afb2c9d5e41b94a2d/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java#L277) that expects `streamDependency > 0`, but fails with message `java.lang.IllegalArgumentException: streamId: 0 (expected: > 0)`. ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.51.Final ### JVM version (e.g. `java -version`) ### OS version (e.g. `uname -a`)
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java" ]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriterTest.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java index 71a6653437c..00a4fe42255 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java @@ -274,7 +274,7 @@ public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, boolean exclusive, ChannelPromise promise) { try { verifyStreamId(streamId, STREAM_ID); - verifyStreamId(streamDependency, STREAM_DEPENDENCY); + verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY); verifyWeight(weight); ByteBuf buf = ctx.alloc().buffer(PRIORITY_FRAME_LENGTH); @@ -602,11 +602,11 @@ private static void writePaddingLength(ByteBuf buf, int padding) { } private static void verifyStreamId(int streamId, String argumentName) { - checkPositive(streamId, "streamId"); + checkPositive(streamId, argumentName); } private static void verifyStreamOrConnectionId(int streamId, String argumentName) { - checkPositiveOrZero(streamId, "streamId"); + checkPositiveOrZero(streamId, argumentName); } private static void verifyWeight(short weight) {
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriterTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriterTest.java index 6d53c4087ea..7edb1eed516 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriterTest.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriterTest.java @@ -280,6 +280,38 @@ public void writeFrameHasPayload() throws Exception { assertEquals(expectedOutbound, outbound); } + @Test + public void writePriority() { + frameWriter.writePriority( + ctx, /* streamId= */ 1, /* dependencyId= */ 2, /* weight= */ (short) 256, /* exclusive= */ true, promise); + + expectedOutbound = Unpooled.copiedBuffer(new byte[] { + (byte) 0x00, (byte) 0x00, (byte) 0x05, // payload length = 5 + (byte) 0x02, // payload type = 2 + (byte) 0x00, // flags = 0x00 + (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01, // stream id = 1 + (byte) 0x80, (byte) 0x00, (byte) 0x00, (byte) 0x02, // dependency id = 2 | exclusive = 1 << 63 + (byte) 0xFF, // weight = 255 (implicit +1) + }); + assertEquals(expectedOutbound, outbound); + } + + @Test + public void writePriorityDefaults() { + frameWriter.writePriority( + ctx, /* streamId= */ 1, /* dependencyId= */ 0, /* weight= */ (short) 16, /* exclusive= */ false, promise); + + expectedOutbound = Unpooled.copiedBuffer(new byte[] { + (byte) 0x00, (byte) 0x00, (byte) 0x05, // payload length = 5 + (byte) 0x02, // payload type = 2 + (byte) 0x00, // flags = 0x00 + (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01, // stream id = 1 + (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, // dependency id = 0 | exclusive = 0 << 63 + (byte) 0x0F, // weight = 15 (implicit +1) + }); + assertEquals(expectedOutbound, outbound); + } + private byte[] headerPayload(int streamId, Http2Headers headers, byte padding) throws Http2Exception, IOException { if (padding == 0) { return headerPayload(streamId, headers);
train
test
"2020-08-11T14:00:08"
"2020-07-17T09:00:11Z"
mostroverkhov
val
netty/netty/10485_10488
netty/netty
netty/netty/10485
netty/netty/10488
[ "keyword_pr_to_issue", "connected" ]
514d349e1fa5a057e815a5f3ac6a7e3f3aa19784
6150a0e3c5af100e35e5fda3a1d3e3b647bce6a4
[ "It's not clear to me why you want to change the log level, and if you want to change it to TRACE or INFO, or some other level? Log levels are normally hard-coded on the usage site, and the current code follows that convention.\r\n\r\nThis setting is about whether or not logging is enabled. Detail configuration of how logging works is something we'd prefer to leave to the logging framework.", "@chrisvest The use case is the following: Reactor Netty wants to expose this as a configuration so that the user can decide which logger and what level to use. With the current approach the user sees that the tracing is enabled but needs to know the exact logger and level that are needed (which are not from Reactor Netty but from a library that Reactor Netty uses)", "But that's the case with all logging in Netty, isn't it? Well, almost I suppose. The logging implementation that is used everywhere is discovered at runtime, and the level is always hard coded, with the exception of `LoggingHandler` and `Http2FrameLogger`. With that in mind I think the second approach makes the most sense: expose some clean-up version of `TraceDnsQueryLifeCycleObserverFactory`, and remove `traceEnabled`.", "> But that's the case with all logging in Netty, isn't it? Well, almost I suppose. The logging implementation that is used everywhere is discovered at runtime, and the level is always hard coded, with the exception of `LoggingHandler` and `Http2FrameLogger`. With that in mind I think the second approach makes the most sense: expose some clean-up version of `TraceDnsQueryLifeCycleObserverFactory`, and remove `traceEnabled`.\r\n\r\nYes I agree, from my point of view it is like `LoggingHandler`, I can add this handler to the pipeline. The same is with `TraceDnsQueryLifeCycleObserverFactory` I can add it to the configuration." ]
[ "Should we use a `static` logger and so use `InternalLoggerFactory.getInstance(LoggingDnsQueryLifeCycleObserverFactory.class)` ?", "final ", "2020", "private ?", "![img](http://pa1.narvii.com/6187/d349302822f47c8ebb904fdcf971cfcaaefdb0c2_hq.gif)", "May be add `requireNonNull(classContext)`", "May be add `requireNonNull(nameContext)`", "nit: I would just call this `name`" ]
"2020-08-19T11:21:37Z"
[]
Ability to change the logger and level used by DnsNameResolverBuilder#traceEnabled
### Expected behavior Ability to change the logger and level used by `DnsNameResolverBuilder#traceEnabled` ### Actual behavior When `DnsNameResolverBuilder#traceEnabled` is enabled, it uses always the logger and level below https://github.com/netty/netty/blob/46cb4015ff25ff2dd3d0dede5c3185ac498e56b0/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java#L26-L28 It would be nice if those can be configured. Possible solutions: - `DnsNameResolverBuilder#traceEnabled` where one can specify logger and level Or - `TraceDnsQueryLifeCycleObserverFactory` can be made public and also it can expose construction similar to what `LoggingHandler` provides i.e. ``` public TraceDnsQueryLifeCycleObserverFactory(Class<?> clazz, LogLevel level) { ObjectUtil.checkNotNull(clazz, "clazz"); ObjectUtil.checkNotNull(level, "level"); this.logger = InternalLoggerFactory.getInstance(clazz); this.level = level.toInternalLevel(); } ``` With this approach one will be able to add such observer with `DnsNameResolverBuilder#dnsQueryLifecycleObserverFactory` this second approach will require a dependency to `netty-handler` module Wdyt? ### Netty version current snapshot ### JVM version (e.g. `java -version`) jdk 8 ### OS version (e.g. `uname -a`) Mac OS
[ "resolver-dns/pom.xml", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java" ]
[ "resolver-dns/pom.xml", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java", "resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java" ]
[ "resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java" ]
diff --git a/resolver-dns/pom.xml b/resolver-dns/pom.xml index 8a918c4b97b..18cb12778ae 100644 --- a/resolver-dns/pom.xml +++ b/resolver-dns/pom.xml @@ -63,6 +63,11 @@ <artifactId>netty-codec-dns</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>netty-handler</artifactId> + <version>${project.version}</version> + </dependency> <dependency> <groupId>org.apache.directory.server</groupId> <artifactId>apacheds-protocol-dns</artifactId> diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index cf485903d1a..44008fde58b 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -272,7 +272,6 @@ protected DnsServerAddressStream initialValue() { * @param resolvedAddressTypes the preferred address types * @param recursionDesired if recursion desired flag must be set * @param maxQueriesPerResolve the maximum allowed number of DNS queries for a given name resolution - * @param traceEnabled if trace is enabled * @param maxPayloadSize the capacity of the datagram packet buffer * @param optResourceEnabled if automatic inclusion of a optional records is enabled * @param hostsFileEntriesResolver the {@link HostsFileEntriesResolver} used to check for local aliases @@ -296,7 +295,6 @@ public DnsNameResolver( ResolvedAddressTypes resolvedAddressTypes, boolean recursionDesired, int maxQueriesPerResolve, - boolean traceEnabled, int maxPayloadSize, boolean optResourceEnabled, HostsFileEntriesResolver hostsFileEntriesResolver, @@ -306,7 +304,7 @@ public DnsNameResolver( boolean decodeIdn) { this(eventLoop, channelFactory, resolveCache, new AuthoritativeDnsServerCacheAdapter(authoritativeDnsServerCache), dnsQueryLifecycleObserverFactory, - queryTimeoutMillis, resolvedAddressTypes, recursionDesired, maxQueriesPerResolve, traceEnabled, + queryTimeoutMillis, resolvedAddressTypes, recursionDesired, maxQueriesPerResolve, maxPayloadSize, optResourceEnabled, hostsFileEntriesResolver, dnsServerAddressStreamProvider, searchDomains, ndots, decodeIdn); } @@ -324,7 +322,6 @@ public DnsNameResolver( * @param resolvedAddressTypes the preferred address types * @param recursionDesired if recursion desired flag must be set * @param maxQueriesPerResolve the maximum allowed number of DNS queries for a given name resolution - * @param traceEnabled if trace is enabled * @param maxPayloadSize the capacity of the datagram packet buffer * @param optResourceEnabled if automatic inclusion of a optional records is enabled * @param hostsFileEntriesResolver the {@link HostsFileEntriesResolver} used to check for local aliases @@ -348,7 +345,6 @@ public DnsNameResolver( ResolvedAddressTypes resolvedAddressTypes, boolean recursionDesired, int maxQueriesPerResolve, - boolean traceEnabled, int maxPayloadSize, boolean optResourceEnabled, HostsFileEntriesResolver hostsFileEntriesResolver, @@ -358,7 +354,7 @@ public DnsNameResolver( boolean decodeIdn) { this(eventLoop, channelFactory, null, resolveCache, NoopDnsCnameCache.INSTANCE, authoritativeDnsServerCache, dnsQueryLifecycleObserverFactory, queryTimeoutMillis, resolvedAddressTypes, recursionDesired, - maxQueriesPerResolve, traceEnabled, maxPayloadSize, optResourceEnabled, hostsFileEntriesResolver, + maxQueriesPerResolve, maxPayloadSize, optResourceEnabled, hostsFileEntriesResolver, dnsServerAddressStreamProvider, searchDomains, ndots, decodeIdn, false); } @@ -374,7 +370,6 @@ public DnsNameResolver( ResolvedAddressTypes resolvedAddressTypes, boolean recursionDesired, int maxQueriesPerResolve, - boolean traceEnabled, int maxPayloadSize, boolean optResourceEnabled, HostsFileEntriesResolver hostsFileEntriesResolver, @@ -397,11 +392,7 @@ public DnsNameResolver( requireNonNull(dnsServerAddressStreamProvider, "dnsServerAddressStreamProvider"); this.resolveCache = requireNonNull(resolveCache, "resolveCache"); this.cnameCache = requireNonNull(cnameCache, "cnameCache"); - this.dnsQueryLifecycleObserverFactory = traceEnabled ? - dnsQueryLifecycleObserverFactory instanceof NoopDnsQueryLifecycleObserverFactory ? - new TraceDnsQueryLifeCycleObserverFactory() : - new BiDnsQueryLifecycleObserverFactory(new TraceDnsQueryLifeCycleObserverFactory(), - dnsQueryLifecycleObserverFactory) : + this.dnsQueryLifecycleObserverFactory = requireNonNull(dnsQueryLifecycleObserverFactory, "dnsQueryLifecycleObserverFactory"); this.searchDomains = searchDomains != null ? searchDomains.clone() : DEFAULT_SEARCH_DOMAINS; this.ndots = ndots >= 0 ? ndots : DEFAULT_OPTIONS.ndots(); diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java index a03978eb91b..d107aa1a9a2 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java @@ -318,18 +318,6 @@ public DnsNameResolverBuilder maxQueriesPerResolve(int maxQueriesPerResolve) { return this; } - /** - * Sets if this resolver should generate the detailed trace information in an exception message so that - * it is easier to understand the cause of resolution failure. - * - * @param traceEnabled true if trace is enabled - * @return {@code this} - */ - public DnsNameResolverBuilder traceEnabled(boolean traceEnabled) { - this.traceEnabled = traceEnabled; - return this; - } - /** * Sets the capacity of the datagram packet buffer (in bytes). The default value is {@code 4096} bytes. * @@ -482,7 +470,6 @@ public DnsNameResolver build() { resolvedAddressTypes, recursionDesired, maxQueriesPerResolve, - traceEnabled, maxPayloadSize, optResourceEnabled, hostsFileEntriesResolver, @@ -540,7 +527,6 @@ public DnsNameResolverBuilder copy() { copiedBuilder.resolvedAddressTypes(resolvedAddressTypes); copiedBuilder.recursionDesired(recursionDesired); copiedBuilder.maxQueriesPerResolve(maxQueriesPerResolve); - copiedBuilder.traceEnabled(traceEnabled); copiedBuilder.maxPayloadSize(maxPayloadSize); copiedBuilder.optResourceEnabled(optResourceEnabled); copiedBuilder.hostsFileEntriesResolver(hostsFileEntriesResolver); diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java new file mode 100644 index 00000000000..f24222b3537 --- /dev/null +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.resolver.dns; + +import io.netty.handler.codec.dns.DnsQuestion; +import io.netty.handler.logging.LogLevel; +import io.netty.util.internal.logging.InternalLogLevel; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import static java.util.Objects.requireNonNull; + +/** + * A {@link DnsQueryLifecycleObserverFactory} that enables detailed logging in the {@link DnsNameResolver}. + * <p> + * When {@linkplain DnsNameResolverBuilder#dnsQueryLifecycleObserverFactory(DnsQueryLifecycleObserverFactory) + * configured on the resolver}, detailed trace information will be generated so that it is easier to understand the + * cause of resolution failure. + */ +public final class LoggingDnsQueryLifeCycleObserverFactory implements DnsQueryLifecycleObserverFactory { + private static final InternalLogger DEFAULT_LOGGER = + InternalLoggerFactory.getInstance(LoggingDnsQueryLifeCycleObserverFactory.class); + private final InternalLogger logger; + private final InternalLogLevel level; + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events at the default {@link LogLevel#DEBUG} level. + */ + public LoggingDnsQueryLifeCycleObserverFactory() { + this(LogLevel.DEBUG); + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events at the given log level. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = DEFAULT_LOGGER; + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events to a logger with the given class context, + * at the given log level. + * @param classContext The class context for the logger to use. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(Class<?> classContext, LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = InternalLoggerFactory.getInstance(requireNonNull(classContext, "classContext")); + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events to a logger with the given name context, + * at the given log level. + * @param name The name for the logger to use. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(String name, LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = InternalLoggerFactory.getInstance(requireNonNull(name, "name")); + } + + private static InternalLogLevel checkAndConvertLevel(LogLevel level) { + return requireNonNull(level, "level").toInternalLevel(); + } + + @Override + public DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) { + return new LoggingDnsQueryLifecycleObserver(question, logger, level); + } +} diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java similarity index 93% rename from resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java rename to resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java index 2a71aed5624..3f90957d6d8 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java @@ -26,13 +26,13 @@ import static java.util.Objects.requireNonNull; -final class TraceDnsQueryLifecycleObserver implements DnsQueryLifecycleObserver { +final class LoggingDnsQueryLifecycleObserver implements DnsQueryLifecycleObserver { private final InternalLogger logger; private final InternalLogLevel level; private final DnsQuestion question; private InetSocketAddress dnsServerAddress; - TraceDnsQueryLifecycleObserver(DnsQuestion question, InternalLogger logger, InternalLogLevel level) { + LoggingDnsQueryLifecycleObserver(DnsQuestion question, InternalLogger logger, InternalLogLevel level) { this.question = requireNonNull(question, "question"); this.logger = requireNonNull(logger, "logger"); this.level = requireNonNull(level, "level"); diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java b/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java deleted file mode 100644 index b0ad8cc785e..00000000000 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2017 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.resolver.dns; - -import io.netty.handler.codec.dns.DnsQuestion; -import io.netty.util.internal.logging.InternalLogLevel; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import static java.util.Objects.requireNonNull; - -final class TraceDnsQueryLifeCycleObserverFactory implements DnsQueryLifecycleObserverFactory { - private static final InternalLogger DEFAULT_LOGGER = - InternalLoggerFactory.getInstance(TraceDnsQueryLifeCycleObserverFactory.class); - private static final InternalLogLevel DEFAULT_LEVEL = InternalLogLevel.DEBUG; - private final InternalLogger logger; - private final InternalLogLevel level; - - TraceDnsQueryLifeCycleObserverFactory() { - this(DEFAULT_LOGGER, DEFAULT_LEVEL); - } - - TraceDnsQueryLifeCycleObserverFactory(InternalLogger logger, InternalLogLevel level) { - this.logger = requireNonNull(logger, "logger"); - this.level = requireNonNull(level, "level"); - } - - @Override - public DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) { - return new TraceDnsQueryLifecycleObserver(question, logger, level); - } -}
diff --git a/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java b/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java index f15df51e74f..3154f1808f0 100644 --- a/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java +++ b/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java @@ -922,7 +922,7 @@ private static void testCNAMERecursiveResolveMultipleNameServers(boolean ipv4Pre group.next(), new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), NoopDnsCache.INSTANCE, nsCache, NoopDnsQueryLifecycleObserverFactory.INSTANCE, 3000, ipv4Preferred ? ResolvedAddressTypes.IPV4_ONLY : ResolvedAddressTypes.IPV6_ONLY, true, - 10, true, 4096, false, HostsFileEntriesResolver.DEFAULT, + 10, 4096, false, HostsFileEntriesResolver.DEFAULT, new SequentialDnsServerAddressStreamProvider(dnsServer2.localAddress(), dnsServer3.localAddress()), DnsNameResolver.DEFAULT_SEARCH_DOMAINS, 0, true) { @Override @@ -1268,7 +1268,7 @@ private static void testRecursiveResolveCache(boolean cache) final DnsNameResolver resolver = new DnsNameResolver( group.next(), new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), NoopDnsCache.INSTANCE, nsCache, lifecycleObserverFactory, 3000, ResolvedAddressTypes.IPV4_ONLY, true, - 10, true, 4096, false, HostsFileEntriesResolver.DEFAULT, + 10, 4096, false, HostsFileEntriesResolver.DEFAULT, new SingletonDnsServerAddressStreamProvider(dnsServer.localAddress()), DnsNameResolver.DEFAULT_SEARCH_DOMAINS, 0, true) { @Override @@ -1425,7 +1425,7 @@ protected DnsMessage filterMessage(DnsMessage message) { final DnsNameResolver resolver = new DnsNameResolver( group.next(), new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), cache, authoritativeDnsServerCache, NoopDnsQueryLifecycleObserverFactory.INSTANCE, 2000, - ResolvedAddressTypes.IPV4_ONLY, true, 10, true, 4096, + ResolvedAddressTypes.IPV4_ONLY, true, 10, 4096, false, HostsFileEntriesResolver.DEFAULT, new SingletonDnsServerAddressStreamProvider(redirectServer.localAddress()), DnsNameResolver.DEFAULT_SEARCH_DOMAINS, 0, true) { @@ -1587,7 +1587,7 @@ public boolean clear(String hostname) { group.next(), new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), NoopDnsCache.INSTANCE, authoritativeDnsServerCache, NoopDnsQueryLifecycleObserverFactory.INSTANCE, 2000, ResolvedAddressTypes.IPV4_ONLY, - true, 10, true, 4096, + true, 10, 4096, false, HostsFileEntriesResolver.DEFAULT, new SingletonDnsServerAddressStreamProvider(redirectServer.localAddress()), DnsNameResolver.DEFAULT_SEARCH_DOMAINS, 0, true) { @@ -1724,7 +1724,7 @@ public boolean clear(String hostname) { loop, new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), cache, authoritativeDnsServerCache, NoopDnsQueryLifecycleObserverFactory.INSTANCE, 2000, ResolvedAddressTypes.IPV4_ONLY, - true, 10, true, 4096, + true, 10, 4096, false, HostsFileEntriesResolver.DEFAULT, new SingletonDnsServerAddressStreamProvider(redirectServer.localAddress()), DnsNameResolver.DEFAULT_SEARCH_DOMAINS, 0, true) { @@ -2532,7 +2532,6 @@ public void testInstanceWithNullPreferredAddressType() { null, // resolvedAddressTypes, see https://github.com/netty/netty/pull/8445 true, // recursionDesired 1, // maxQueriesPerResolve - false, // traceEnabled 4096, // maxPayloadSize true, // optResourceEnabled HostsFileEntriesResolver.DEFAULT, // hostsFileEntriesResolver
val
test
"2020-08-18T19:01:09"
"2020-08-18T12:21:09Z"
violetagg
val
netty/netty/10485_10490
netty/netty
netty/netty/10485
netty/netty/10490
[ "keyword_pr_to_issue" ]
32178fac7f0d46d9ed7059eeec4708ed3d70c6eb
0bbe4ce9fd2c531a777bdfa3e0d8bfcd1e93768a
[ "It's not clear to me why you want to change the log level, and if you want to change it to TRACE or INFO, or some other level? Log levels are normally hard-coded on the usage site, and the current code follows that convention.\r\n\r\nThis setting is about whether or not logging is enabled. Detail configuration of how logging works is something we'd prefer to leave to the logging framework.", "@chrisvest The use case is the following: Reactor Netty wants to expose this as a configuration so that the user can decide which logger and what level to use. With the current approach the user sees that the tracing is enabled but needs to know the exact logger and level that are needed (which are not from Reactor Netty but from a library that Reactor Netty uses)", "But that's the case with all logging in Netty, isn't it? Well, almost I suppose. The logging implementation that is used everywhere is discovered at runtime, and the level is always hard coded, with the exception of `LoggingHandler` and `Http2FrameLogger`. With that in mind I think the second approach makes the most sense: expose some clean-up version of `TraceDnsQueryLifeCycleObserverFactory`, and remove `traceEnabled`.", "> But that's the case with all logging in Netty, isn't it? Well, almost I suppose. The logging implementation that is used everywhere is discovered at runtime, and the level is always hard coded, with the exception of `LoggingHandler` and `Http2FrameLogger`. With that in mind I think the second approach makes the most sense: expose some clean-up version of `TraceDnsQueryLifeCycleObserverFactory`, and remove `traceEnabled`.\r\n\r\nYes I agree, from my point of view it is like `LoggingHandler`, I can add this handler to the pipeline. The same is with `TraceDnsQueryLifeCycleObserverFactory` I can add it to the configuration." ]
[ "Is there any particular concern to use `checkNotNull ` instead of `requireNonNull`. The latter is used in the version for Netty 5.", "Because it not depends on java8!", "ops ... true ", "Netty 4.1 needs to support Java 6 at runtime where `requireNonNull` is not available." ]
"2020-08-19T13:37:34Z"
[]
Ability to change the logger and level used by DnsNameResolverBuilder#traceEnabled
### Expected behavior Ability to change the logger and level used by `DnsNameResolverBuilder#traceEnabled` ### Actual behavior When `DnsNameResolverBuilder#traceEnabled` is enabled, it uses always the logger and level below https://github.com/netty/netty/blob/46cb4015ff25ff2dd3d0dede5c3185ac498e56b0/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java#L26-L28 It would be nice if those can be configured. Possible solutions: - `DnsNameResolverBuilder#traceEnabled` where one can specify logger and level Or - `TraceDnsQueryLifeCycleObserverFactory` can be made public and also it can expose construction similar to what `LoggingHandler` provides i.e. ``` public TraceDnsQueryLifeCycleObserverFactory(Class<?> clazz, LogLevel level) { ObjectUtil.checkNotNull(clazz, "clazz"); ObjectUtil.checkNotNull(level, "level"); this.logger = InternalLoggerFactory.getInstance(clazz); this.level = level.toInternalLevel(); } ``` With this approach one will be able to add such observer with `DnsNameResolverBuilder#dnsQueryLifecycleObserverFactory` this second approach will require a dependency to `netty-handler` module Wdyt? ### Netty version current snapshot ### JVM version (e.g. `java -version`) jdk 8 ### OS version (e.g. `uname -a`) Mac OS
[ "resolver-dns/pom.xml", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java" ]
[ "resolver-dns/pom.xml", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java", "resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java", "resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java" ]
[]
diff --git a/resolver-dns/pom.xml b/resolver-dns/pom.xml index 0925c9f47de..84e1733a79b 100644 --- a/resolver-dns/pom.xml +++ b/resolver-dns/pom.xml @@ -63,6 +63,11 @@ <artifactId>netty-codec-dns</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>netty-handler</artifactId> + <version>${project.version}</version> + </dependency> <dependency> <groupId>org.apache.directory.server</groupId> <artifactId>apacheds-protocol-dns</artifactId> diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index 344be375c6d..eb3b8046c6d 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -399,8 +399,8 @@ public DnsNameResolver( this.cnameCache = checkNotNull(cnameCache, "cnameCache"); this.dnsQueryLifecycleObserverFactory = traceEnabled ? dnsQueryLifecycleObserverFactory instanceof NoopDnsQueryLifecycleObserverFactory ? - new TraceDnsQueryLifeCycleObserverFactory() : - new BiDnsQueryLifecycleObserverFactory(new TraceDnsQueryLifeCycleObserverFactory(), + new LoggingDnsQueryLifeCycleObserverFactory() : + new BiDnsQueryLifecycleObserverFactory(new LoggingDnsQueryLifeCycleObserverFactory(), dnsQueryLifecycleObserverFactory) : checkNotNull(dnsQueryLifecycleObserverFactory, "dnsQueryLifecycleObserverFactory"); this.searchDomains = searchDomains != null ? searchDomains.clone() : DEFAULT_SEARCH_DOMAINS; diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java index 2afaf1c68ff..3ec59ff2907 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java @@ -324,7 +324,10 @@ public DnsNameResolverBuilder maxQueriesPerResolve(int maxQueriesPerResolve) { * * @param traceEnabled true if trace is enabled * @return {@code this} + * @deprecated Prefer to {@linkplain #dnsQueryLifecycleObserverFactory(DnsQueryLifecycleObserverFactory) configure} + * a {@link LoggingDnsQueryLifeCycleObserverFactory} instead. */ + @Deprecated public DnsNameResolverBuilder traceEnabled(boolean traceEnabled) { this.traceEnabled = traceEnabled; return this; diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java new file mode 100644 index 00000000000..d33a47e9b7b --- /dev/null +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifeCycleObserverFactory.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.resolver.dns; + +import io.netty.handler.codec.dns.DnsQuestion; +import io.netty.handler.logging.LogLevel; +import io.netty.util.internal.logging.InternalLogLevel; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import static io.netty.util.internal.ObjectUtil.checkNotNull; + +/** + * A {@link DnsQueryLifecycleObserverFactory} that enables detailed logging in the {@link DnsNameResolver}. + * <p> + * When {@linkplain DnsNameResolverBuilder#dnsQueryLifecycleObserverFactory(DnsQueryLifecycleObserverFactory) + * configured on the resolver}, detailed trace information will be generated so that it is easier to understand the + * cause of resolution failure. + */ +public final class LoggingDnsQueryLifeCycleObserverFactory implements DnsQueryLifecycleObserverFactory { + private static final InternalLogger DEFAULT_LOGGER = + InternalLoggerFactory.getInstance(LoggingDnsQueryLifeCycleObserverFactory.class); + private final InternalLogger logger; + private final InternalLogLevel level; + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events at the default {@link LogLevel#DEBUG} level. + */ + public LoggingDnsQueryLifeCycleObserverFactory() { + this(LogLevel.DEBUG); + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events at the given log level. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = DEFAULT_LOGGER; + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events to a logger with the given class context, + * at the given log level. + * @param classContext The class context for the logger to use. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(Class<?> classContext, LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = InternalLoggerFactory.getInstance(checkNotNull(classContext, "classContext")); + } + + /** + * Create {@link DnsQueryLifecycleObserver} instances that log events to a logger with the given name context, + * at the given log level. + * @param name The name for the logger to use. + * @param level The log level to use for logging resolver events. + */ + public LoggingDnsQueryLifeCycleObserverFactory(String name, LogLevel level) { + this.level = checkAndConvertLevel(level); + logger = InternalLoggerFactory.getInstance(checkNotNull(name, "name")); + } + + private static InternalLogLevel checkAndConvertLevel(LogLevel level) { + return checkNotNull(level, "level").toInternalLevel(); + } + + @Override + public DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) { + return new LoggingDnsQueryLifecycleObserver(question, logger, level); + } +} diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java similarity index 93% rename from resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java rename to resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java index 8166ff2c301..6f1aa629680 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifecycleObserver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/LoggingDnsQueryLifecycleObserver.java @@ -26,13 +26,13 @@ import static io.netty.util.internal.ObjectUtil.checkNotNull; -final class TraceDnsQueryLifecycleObserver implements DnsQueryLifecycleObserver { +final class LoggingDnsQueryLifecycleObserver implements DnsQueryLifecycleObserver { private final InternalLogger logger; private final InternalLogLevel level; private final DnsQuestion question; private InetSocketAddress dnsServerAddress; - TraceDnsQueryLifecycleObserver(DnsQuestion question, InternalLogger logger, InternalLogLevel level) { + LoggingDnsQueryLifecycleObserver(DnsQuestion question, InternalLogger logger, InternalLogLevel level) { this.question = checkNotNull(question, "question"); this.logger = checkNotNull(logger, "logger"); this.level = checkNotNull(level, "level"); diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java b/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java deleted file mode 100644 index dc7572bca17..00000000000 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/TraceDnsQueryLifeCycleObserverFactory.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2017 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.resolver.dns; - -import io.netty.handler.codec.dns.DnsQuestion; -import io.netty.util.internal.logging.InternalLogLevel; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import static io.netty.util.internal.ObjectUtil.checkNotNull; - -final class TraceDnsQueryLifeCycleObserverFactory implements DnsQueryLifecycleObserverFactory { - private static final InternalLogger DEFAULT_LOGGER = - InternalLoggerFactory.getInstance(TraceDnsQueryLifeCycleObserverFactory.class); - private static final InternalLogLevel DEFAULT_LEVEL = InternalLogLevel.DEBUG; - private final InternalLogger logger; - private final InternalLogLevel level; - - TraceDnsQueryLifeCycleObserverFactory() { - this(DEFAULT_LOGGER, DEFAULT_LEVEL); - } - - TraceDnsQueryLifeCycleObserverFactory(InternalLogger logger, InternalLogLevel level) { - this.logger = checkNotNull(logger, "logger"); - this.level = checkNotNull(level, "level"); - } - - @Override - public DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) { - return new TraceDnsQueryLifecycleObserver(question, logger, level); - } -}
null
test
test
"2020-08-18T19:00:08"
"2020-08-18T12:21:09Z"
violetagg
val
netty/netty/10582_10583
netty/netty
netty/netty/10582
netty/netty/10583
[ "keyword_pr_to_issue" ]
79a7c157a3e122872321844b73c4881e412dba77
d01471917b94d15fbe8b3b2e0f0ed8f24ee2954a
[ "@Doyuni please open a PR with the fix and a testcase- ", "> @Doyuni please open a PR with the fix and a testcase-\r\n\r\nOkay I got it." ]
[ "`req.uri()` could be moved to a variable.", "All this should be done only in case of `serverConfig.checkStartsWith()`. In all other cases, we don't need to perform those checks.", "Okay, thank you for reviewing.", "So..what should I do? That logic could be moved to new method?", "Probably own method makes sense as it became big enough.\r\n\r\nAlso, I think it would make sense to rename **isNotWebSocketPath** to **isWebSocketPath**. It's a generic Java way. When **NO** is required you can use **!isWebSocketPath()**.\r\n", "Thank you for your kind explanation.\r\n\r\nI'll change it based on your reference." ]
"2020-09-16T10:36:44Z"
[]
Add validation check about websocket path
### Expected behavior If I add WebSocketServerProtocolHandler like below, ```java private final String WEBSOCKET_PATH = "/websocket"; // ~~ final ChannelPipeline p = ch.pipeline(); p.addLast(new WebSocketServerProtocolHandler(WEBSOCKET_PATH, true)); ``` I expected : `/websocket/a` : return true `/websocketabc` : return false `/websocket?name=doyuni` : return true ### Actual behavior But it return true on all above case. I don't want this handler to pass `/websocketabc` through. ### Steps to reproduce Just modify one method like below code. ### Minimal yet complete reproducer code (or URL to code) - WebSocketServerProtocolHandshakeHandler ```java private boolean isNotWebSocketPath(FullHttpRequest req) { String websocketPath = serverConfig.websocketPath(); char nextStartUri = '/'; if (req.uri().length() > websocketPath.length()) nextStartUri = req.uri().charAt(websocketPath.length()); return serverConfig.checkStartsWith() ? !(req.uri().startsWith(websocketPath) && (nextStartUri == '/' || nextStartUri == '?')) : !req.uri().equals(websocketPath); } ``` ### Netty version - 4.1.52.Final ### JVM version (e.g. `java -version`) ``` java version "11.0.5" 2019-10-15 LTS Java(TM) SE Runtime Environment 18.9 (build 11.0.5+10-LTS) Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.5+10-LTS, mixed mode) ``` ### OS version (e.g. `uname -a`) - Mac OS Catalina v. 10.15.6
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java index ec9b4ff1049..f3f53327563 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java @@ -61,7 +61,7 @@ public void handlerAdded(ChannelHandlerContext ctx) { @Override public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception { final FullHttpRequest req = (FullHttpRequest) msg; - if (isNotWebSocketPath(req)) { + if (!isWebSocketPath(req)) { ctx.fireChannelRead(msg); return; } @@ -113,9 +113,21 @@ public void operationComplete(ChannelFuture future) { } } - private boolean isNotWebSocketPath(FullHttpRequest req) { + private boolean isWebSocketPath(FullHttpRequest req) { String websocketPath = serverConfig.websocketPath(); - return serverConfig.checkStartsWith() ? !req.uri().startsWith(websocketPath) : !req.uri().equals(websocketPath); + String uri = req.uri(); + boolean checkStartUri = uri.startsWith(websocketPath); + boolean checkNextUri = checkNextUri(uri, websocketPath); + return serverConfig.checkStartsWith() ? (checkStartUri && checkNextUri) : uri.equals(websocketPath); + } + + private boolean checkNextUri(String uri, String websocketPath) { + int len = websocketPath.length(); + if (uri.length() > len) { + char nextUri = uri.charAt(len); + return nextUri == '/' || nextUri == '?'; + } + return true; } private static void sendHttpResponse(ChannelHandlerContext ctx, HttpRequest req, HttpResponse res) {
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java index 69cda51f0b4..1ca29b0a5e6 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java @@ -22,17 +22,18 @@ import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; + import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpObjectAggregator; -import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; import org.junit.Before; @@ -194,6 +195,61 @@ public void testHandleTextFrame() { assertFalse(ch.finish()); } + @Test + public void testCheckValidWebSocketPath() { + HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1) + .method(HttpMethod.GET) + .uri("/test") + .key(HttpHeaderNames.SEC_WEBSOCKET_KEY) + .connection("Upgrade") + .upgrade(HttpHeaderValues.WEBSOCKET) + .version13() + .build(); + + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .checkStartsWith(true) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + ch.writeInbound(httpRequest); + + FullHttpResponse response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + } + + @Test + public void testCheckInvalidWebSocketPath() { + HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1) + .method(HttpMethod.GET) + .uri("/testabc") + .key(HttpHeaderNames.SEC_WEBSOCKET_KEY) + .connection("Upgrade") + .upgrade(HttpHeaderValues.WEBSOCKET) + .version13() + .build(); + + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .checkStartsWith(true) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + ch.writeInbound(httpRequest); + + ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class); + assertNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel())); + } + @Test public void testExplicitCloseFrameSentWhenServerChannelClosed() throws Exception { WebSocketCloseStatus closeStatus = WebSocketCloseStatus.ENDPOINT_UNAVAILABLE;
train
test
"2020-09-16T09:40:42"
"2020-09-16T09:56:02Z"
Doyuni
val
netty/netty/10508_10623
netty/netty
netty/netty/10508
netty/netty/10623
[ "keyword_pr_to_issue" ]
8b2ed77042b5a3023974d7a6d2f22fc0d3edef34
1c230405fd4f7c445773b662beeccebc18f85f98
[ "My 2 cents:\r\nFirst change:\r\n- Before, it was a `copy` which limits the max, \r\n- now is a `buf.alloc.buffer(...).writeByte(buf)` => Pooled buffer using the original ByteBufAllocator from the `buf`\r\n\r\nSecond change, which is probably the one you point out:\r\n- Before (2 places): `Unpooled.buffer(64)` (Unpooled implicit)\r\n- After (same): `undecodedChunk.alloc().heapBuffer(64)`\r\n\r\nThis should use the ByteBufAllocator used for undecoded, which in turns should be (if I understand) the same allocator than original `buf` (so the one from IO) since either it is the same, or a new allocated one using the same allocator.\r\n\r\nI could suggest to replace `undecodedChunk.alloc().heapBuffer(64)` by `undecodedChunk.alloc().buffer(64, 64)` (here there should not be any more than 64), but as you noticed that putting a `PooledByteBufAllocator` to direct is not correct, but heap seems ok, then I finally could suggest: `undecodedChunk.alloc().heapBuffer(64, 64)`, which is almost what we have there...\r\n\r\n@normanmaurer Any suggestion since I do not have a clear mind on this... ?\r\n", "@zentol I tried to reproduce using only Netty, but I can't get the numbers you've got.\r\n\r\nCould you have a reproducer code with Netty only (almost a Junit or simple example)?\r\nWhat kind of data are you sending (which forms and numbers)?\r\nAnd to be sure, your tests shows that Direct buffers are not ok, while Heap buffers are ?\r\nThanks", "@zentol Note that I used 4.1.51 and I can't reproduce the issue you raised... ", "@zentol I tried another time, but the numbers I've got are almost the same whatever the allocation:\r\n- PooledHeap, PooledDirect, PooledBuffer (from undecodedChunked\r\n- UnpooledHeap, UnpooledDirect, UnpooledBuffer\r\n\r\nI've tried both 1000 chunks for big data (64KB each) so 1000 decoded vars or 1000 chunks for small data (4KB each but 16 times so still chunks of 64KB each) so 16000 decoded vars, and the results are the same : \r\n- 4KB data size: roughly 230ms each test (16000 data) (highest - lowest = 5ms)\r\n- 64KB data size: roughly 2900ms each test (1000 data), the difference being the data written on disk due to large size (> 16KB) (highest - lowest = 310ms, but not so relevant since disk access is most of the issue as without disk access, there is no issue)\r\n- 64KB data size: roughly 190ms each test (1000 data), the difference being the data not written on disk (memory only) (highest - lowest = 8ms)\r\n\r\nSo my conclusion until further information from you is that there is no issue there... Either I'm missing something, either it was fixed in 4.1.51...", "@fredericBregier I did not test it with 4.1.51 due to #10425. I will check whether the issue persists in 4.1.52, and if so try to create a reproducer.", "Ok, I think I got closer to the problem.\r\n\r\nI wrote a reproducing test, which ran fast in 4.1.49 and slow in 4.1.50, but when I moved it into a separate project it suddenly ran fast either way.\r\nI then noticed that the test we used so far also uses a `ResourceLeakDetector` set to `PARANOID`. If I set the level to `ADVANCED` it runs quickly with both versions, but `PARANOID` appears to slow things down way more in 4.1.50+.\r\n\r\nHere's the test in question:\r\n\r\n```\r\npublic class RegressionTest {\r\n\r\n\t@Rule\r\n\tpublic final TemporaryFolder temporaryFolder = new TemporaryFolder();\r\n\r\n\tstatic {\r\n\t\tResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID);\r\n\t}\r\n\r\n\t@Test\r\n\tpublic void testRegression() throws Exception {\r\n\t\tFile file = temporaryFolder.newFile();\r\n\t\ttry (RandomAccessFile rw = new RandomAccessFile(file, \"rw\")) {\r\n\t\t\trw.setLength(1024 * 1024 * 5);\r\n\t\t}\r\n\t\tDefaultHttpDataFactory httpDataFactory = new DefaultHttpDataFactory(true);\r\n\r\n\t\tHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, \"\");\r\n\t\tHttpPostRequestEncoder bodyRequestEncoder = new HttpPostRequestEncoder(httpDataFactory, httpRequest, true);\r\n\t\tbodyRequestEncoder.addBodyFileUpload(\"file\", file, \"application/octet-stream\", false);\r\n\r\n\t\tHttpRequest firstHttpRequest = bodyRequestEncoder.finalizeRequest();\r\n\t\tHttpPostRequestDecoder httpPostRequestDecoder = new HttpPostRequestDecoder(httpDataFactory, firstHttpRequest);\r\n\r\n\t\tByteBufAllocator allocator = PooledByteBufAllocator.DEFAULT;\r\n\t\tfor (HttpContent httpContent = bodyRequestEncoder.readChunk(allocator); httpContent != null; httpContent = bodyRequestEncoder.readChunk(allocator)) {\r\n\t\t\thttpPostRequestDecoder.offer(httpContent);\r\n\t\t}\r\n\t}\r\n}\r\n```", "@zentol Effctively, with PARANOID level set, chunks are quite high consuming time, while other levels are quite quickier. On my tests (same than before, based on 4.1.52): whatever the pooled/unpooled heap/direct buffers\r\n- No detector: 225 to 230ms\r\n- Simple: 229 to 233ms\r\n- Advanced; 228 to 245ms\r\n- Paranoid: 147.000ms\r\nSo yes, there is a hug gap using Paranoid (almost x600). Maybe you can stay in at most Advanced mode ?", "@normanmaurer However I have a question for you, do you think this kind of gap is normal ?\r\nI do know that Paranoid is not for Production, but maybe it is so huge that it could not be usable. Note however that I used Paranoid level for a long time is all my tests and did not found any issue about on my side.", "Using Advanced would be workaround we would go for, but this runtime increase on Paranoid does not seem normal. It was perfectly fine to use Paranoid in earlier versions, and it would be unfortunate to weaken our tests now with never versions. :/", "sorry for the late reply... The problem with \"paranoid\" is that it basically creates one stack trace per `ByteBuf` access which can be very expensive. So these kind of performance drops can happen.", "@normanmaurer Thanks!\r\nFor me, it's fine as is.\r\nIf I'm correct, changing the number of items tracked through `io.netty.leakDetection.targetRecords` will not change every access to be checked but just limit the number of retained items ?\r\n\r\nI believe that one could set specific values for `io.netty.leakDetection.samplingInterval` (default being 128 to a small value (still power of 2, such as 16 to reduce the PARANOID effect to a smaller part (If I'm correct, the main difference between ADVANCED and PARANOID is that this samplingInterval is ignored within PARANOID ?).\r\n\r\nI checked using level = `ADVANCED` and `io.netty.leakDetection.samplingInterval` to: (using Pooled one)\r\n- compared to `PARANOID`: 140.000ms whatever Direct or Heap\r\n- 8 : 140.000ms Direct vs 4.000ms Heap, so almost the same as PARANOID, except using Heap\r\n- 16 : 210ms whatever\r\n- 128 (default) : 200ms whatever Direct or Heap\r\n- 12 (*I know that the code said it should be a power of 2): 590ms Direct 420ms Heap\r\nSo maybe 12 could be a good tradeof ? (more trace but not as many as in PARANOID).\r\n\r\nAgain, on my side, I use PARANOID everywhere in my tests and I'm fine with it. It is more for the question of @zentol .\r\n\r\nNote: I used this in my test bench code\r\n\r\n @BeforeClass\r\n public static void setDefaultAdvancedLevel() {\r\n System.setProperty(\"io.netty.leakDetection.samplingInterval\", \"12\");\r\n ResourceLeakDetector.setLevel(Level.ADVANCED);\r\n }\r\n", "Our problem isn't that PARANOID is slower than other modes, that's fine and expected, but that due to _some change that impacts leak detection_ our test is 100x slower in 4.1.50 than before.\r\n\r\nI would just like to understand what has changed; if the leak detection was made slower in general, then that's fine, but I'm worried that something else has changed that indirectly causes leak detection to be slower in our specific case.", "After merging #11001 I'm getting ~360-400ms for ADVANCED and ~460-770ms for PARANOID when running the test in https://github.com/netty/netty/issues/10508#issuecomment-692037524\r\n\r\nSo I think we can close this issue?", "I hope so ;-)" ]
[ "@fredericBregier can you explain why setting an upper limit improves things here ?", "@normanmaurer \r\nI don't have any real clue, but it seems it does. That's why I'm asking you to look at this why. \r\n\r\nThe change is huge on performances in PARANOID mode (using the example test given by the final user), about 10 times better... I test several times, and without it, the times are about 1000ms, while with it, it gives about 100ms.\r\n\r\nUsing my old test (a bit different, where a lot of items, about 6000, are passed and not only one file), the improvement is there also on PARANOID mode, but not that much (not 10 times better, comparing 146.000 old code vs 105.000 new code). \r\nThere, the issue (and change) is on the `undecodedChunk.write(buf)`.\r\nBut if I change it to something like `wrappedBuffers(undecodedChunk, buf)`, the time is close to 1000ms again in PARANOID mode, but increases sensibly in other modes (500ms against 200ms without any change).\r\n\r\n\r\nIt was strange for me to see that comparing:\r\n- `undecodedChunk = isLast? ... : buf.alloc.buffer(buf.readableSize(), buf.readableSize()).write(buf)` and `undecodedChunk = Unpooled.wrapped(undecodedChunk, buf.alloc.buffer(buf.readableSize(), buf.readableSize()).write(buf))`\r\n- vs original ones (still in place at lines 338 and 340\r\n\r\ngives impact performances (better for PARANOID, about 1000 times, but worst in other modes, about 2 times). I feel like it was the same in both cases (allocation and writing), but it seems not. Note that in current code, `undecodedChunk` in line 338 is still allocated using no upper bound.\r\n\r\nThe main \"same root cause\" seems to be \"no upper bound for buffer\" when using PARANOID mode, but in different ways.\r\n\r\n\r\nI feel like this proposal is safe as it enhances a bit the general performances and takes down the PARANOID issue, at least for the user's example.\r\nBut I feel also that there is something else, since in my test (high number of items) this is really not enough.\r\n\r\nSo the reason I try to test it and produce at least this in order to get help.", "@normanmaurer \r\n\r\nI added a fake commit to let you run the different cases at once:\r\n\r\nThis commit is only intend to show the result on tests using 3 cases:\r\n- HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 1: original case\r\n- HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 2: where only upper bound is modified\r\n- HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 3: where write is replaced by wrapped and the last \"no upper bounded\" buffer is replaced too\r\n\r\nTest to run is `testRegressionMultipleLevelLeakDetector`.\r\n\r\nOn my side results are:\r\n\r\nTimer: DISABLED NotUsingDisk1 => 220.995585\r\n**Timer: SIMPLE NotUsingDisk1 => 222.134248**\r\nTimer: ADVANCED NotUsingDisk1 => 208.68989\r\n**_Timer: PARANOID NotUsingDisk1 => 100044.293876_**\r\nTimer: DISABLED UsingDisk1 => 707.480873\r\nTimer: DISABLED UsingDisk1 => 518.695771\r\nTimer: SIMPLE UsingDisk1 => 522.790849\r\nTimer: ADVANCED UsingDisk1 => 585.759164\r\nTimer: PARANOID UsingDisk1 => 105090.135276\r\n\r\nTimer: DISABLED NotUsingDisk2 => 136.461289\r\n**Timer: SIMPLE NotUsingDisk2 => 126.793194**\r\nTimer: ADVANCED NotUsingDisk2 => 129.771282\r\n**_Timer: PARANOID NotUsingDisk2 => 973.38734_**\r\nTimer: DISABLED UsingDisk2 => 74.279745\r\nTimer: DISABLED UsingDisk2 => 71.332363\r\nTimer: SIMPLE UsingDisk2 => 114.811827\r\nTimer: ADVANCED UsingDisk2 => 89.290387\r\nTimer: PARANOID UsingDisk2 => 1155.278418\r\n\r\nTimer: DISABLED NotUsingDisk3 => 1829.364849\r\n**Timer: SIMPLE NotUsingDisk3 => 1949.512746**\r\nTimer: ADVANCED NotUsingDisk3 => 2120.573799\r\n**_Timer: PARANOID NotUsingDisk3 => 2073.506771_**\r\nTimer: DISABLED UsingDisk3 => 1998.837595\r\nTimer: DISABLED UsingDisk3 => 1952.342478\r\nTimer: SIMPLE UsingDisk3 => 2099.951834\r\nTimer: ADVANCED UsingDisk3 => 2058.028121\r\nTimer: PARANOID UsingDisk3 => 2295.847557\r\n", "Last fake commit:\r\n\r\nThis commit is only intend to show the result on tests using 3 cases:\r\n - HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 1: original case\r\n - HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 2: where only upper bound is modified\r\n - HttpPostMultipartRequestDecoder.TEST_TEMP_ITEM = 3: where temptative to reuse more the existing ByteBuf is done\r\n \r\nTest to run is `testRegressionMultipleLevelLeakDetectorNoDisk` or `testHighNumberCheckLeakDetectorVersions`\r\n \r\nCurrent results (stability is correct but numbers depend on host)\r\n\r\nI've done various tests:\r\n- using `wrapped` (or through `buf.alloc().compositeByteBuf()`): worst timers\r\n- using fixed allocation (but high enough to fit,the necessary elements, just for test): not good timers (probably because the size is too huge for a lot of cases, and whatever, it is not acceptable to have fixed size, it was just for testing purpose)\r\n- using various ways to discard read bytes: it gives a bit improvement (TEST_TEMP_ITEM=3): almost best results, but not that much\r\n\r\nIn particular, in the \"big\" test (testHighNumberCheckLeakDetectorVersions where more than 16000 items are sent, while the other test is only one item from a big file), I am not able to reduce the timer in PARANOID mode, while in \"unique\" item test ( testRegressionMultipleLevelLeakDetectorNoDisk), the PARANOID decreases a lot.\r\n\r\nEven if I cannot understand why setting the upper bound brings such a better improvement, I feel like this is at least a good change.\r\nHowever, for the other try using discardReadBytes (or tries using wrapped buffers), it seems to not enhanced the timers.\r\n\r\n@normanmaurer If you have any clue to continue, I will. I would propose to at least have the minimal change (upper bounds) in order to fix a bit this issue in PARANOID mode (and does not changed a lot, better or worst, for other levels).\r\n\r\n_Here are my results (I tried to use profiling to understand the reasons, but the profiling just kill all results, therefore I was unable to deep in buffer implementations to see where is the main difference)._\r\n\r\n\r\ntestHighNumberCheckLeakDetectorVersions\r\n=======================================\r\n\r\nHighItemNumberDISABLED1=506.43838600000004, \r\nHighItemNumberDISABLED2=496.729877, \r\nHighItemNumberDISABLED3=503.72193200000004, \r\n\r\n**_HighItemNumberSIMPLE1=460.46265600000004, \r\nHighItemNumberSIMPLE2=475.950721, \r\nHighItemNumberSIMPLE3=467.69606699999997,_** \r\n\r\nHighItemNumberADVANCED1=465.508472, \r\nHighItemNumberADVANCED2=611.797092, \r\nHighItemNumberADVANCED3=467.812221, \r\n\r\n**HighItemNumberPARANOID1=304098.68770899996, \r\nHighItemNumberPARANOID2=304140.256149, \r\nHighItemNumberPARANOID3=303301.14581200003,** \r\n\r\nBigItemDISABLED1=426.205971, \r\nBigItemDISABLED2=441.343974, \r\nBigItemDISABLED3=420.528978, \r\n\r\n**_BigItemSIMPLE1=420.825118, \r\nBigItemSIMPLE2=432.968082, \r\nBigItemSIMPLE3=418.955781,_** \r\n\r\nBigItemADVANCED1=420.391646, \r\nBigItemADVANCED2=445.089119, \r\nBigItemADVANCED3=428.97732599999995, \r\n\r\n**BigItemPARANOID1=318339.78715600003\r\nBigItemPARANOID2=319121.713933\r\nBigItemPARANOID3=316866.262051**\r\n\r\ntestRegressionMultipleLevelLeakDetectorNoDisk\r\n=============================================\r\n\r\nTimer: DISABLED NotUsingDisk1 => 195.185296\r\nTimer: DISABLED NotUsingDisk2 => 206.28149\r\nTimer: DISABLED NotUsingDisk3 => 142.354934\r\n\r\n**_Timer: SIMPLE NotUsingDisk1 => 185.459894\r\nTimer: SIMPLE NotUsingDisk2 => 82.243331\r\nTimer: SIMPLE NotUsingDisk3 => 83.65489_**\r\n\r\nTimer: ADVANCED NotUsingDisk1 => 175.245971\r\nTimer: ADVANCED NotUsingDisk2 => 214.114222\r\nTimer: ADVANCED NotUsingDisk3 => 89.005829\r\n\r\n**Timer: PARANOID NotUsingDisk1 => 119286.301819\r\nTimer: PARANOID NotUsingDisk2 => 1604.135822\r\nTimer: PARANOID NotUsingDisk3 => 2489.555437**\r\n", "Use final here, it would change a lot how the JIT would optimize it.\r\nUse a sys property to set this and just use different runs with different JVMs (probably using a good profiler + JMH bench would be ideal to be sure of the impact/meaning of changes)", "@franz1981 Thank you !\r\nI agree that making it final shall be done to achieve final checks. This variable was only intend to allow 3 kinds of tests, not to be keeped of course in final code (and if final, I couldn't change the value of course).\r\n\r\nNow, using a final here, 3 different runs (changing value of course) and a good profiler could be far better and ideal, I agree. \r\n\r\nHowever, as I said, when using a profiler (the one I have is from Oracle - VisualVM-, and I believe it is not the best around, but the only one I've got), the performances drop severely (even in DISCARDED mode), therefore nothing to look at correctly. For my other tasks, this one is great, giving the information I need and with not that much impact. But there, the performances are almost all at the same level than PARANOID, even in DISCARDED or SIMPLE mode.\r\n\r\nCan you suggest some tools (free, sadly) that could help in this research?\r\n", "Sure!\r\n\r\nhttps://github.com/jvm-profiling-tools/async-profiler is probably one of the most complete, accurate and cheap (;)) ones I know.\r\nHighly suggested to add -XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints", "@njhill now you read my mind bud?:)", "2020", "It's suspicious to see a while-loop for releasing a reference counted object. Increments and decrements of the ref count are supposed to be pair-wise, following the structure of the code.", "Odd formatting that the constants ended up on their own line.", "OK", "Indeed, and I did not found why in some cases I have a huge number of refCnt(), so this.\r\nHowever, I will remove it, going back to previous situation (probably bad programming on my side).", "Right, wrong indentation through IJ", "```suggestion\r\n * https://www.apache.org/licenses/LICENSE-2.0\r\n```", "Already done ;-)", "nit: we use 4 spaces ... Please change everywhere to be consistent with our code-styling ", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "hmm... retainedSlice(...) should be fine. This will do an extra memory copy which I think is not needed.", "I think the `b.readable()` for the second arg is not needed. ", "OK, I will double check.", "As I wrote, there is a \"old\" bug, rarely catched, when the buffer `undecodedChunk` exceed the memory limit. When it reached this limit, there is a call to `undecodedChunk.discardReadBytes();`. I found this bug accidently, through my other new calls to the very same methods and errors in unit testing.\r\n\r\nWhen this call occurs, the underlying buffers are changed, changing all HttpData values (of course, incorrectly).\r\nTo prevent this, I propose to copy it. I know this is higher in memory copy, but correct from final values, instead of current status.\r\nAlso, I changed this \"high limit only\" call to \"each time it is relevant\" (when new data can be added while there are enough space within the current buffer to fill it with this new data).\r\n\r\nAnother way would be to igonre totally this high limit and therefore not calling, nowhere, this `undecodedChunk.discardReadBytes();`, in order to keep final consistency.\r\n\r\nWDYT?", "Not mandatory, but there, it is valid (there is no need to allocate more than this in this specific case).\r\nIf you prefer, I could remove it (while valid).", "Of course, the second option (no more calls to `discardReadBytes()`) would lead to higher memory usage since the underlying buffer will be kept until the end.\r\nUsing \"to disk\" (or mixed way) management of HttpData can decrease this memory pressure, espcially for File upload (but not limited to), but as in mixed way, some HttpData could stay using the original undecodedChunk, this could lead in wrong data at the end, so my original proposition.\r\n\r\nAny direction or idea ?", "I think we should only call `undecoded.discardReadBytes()` if `refCnt() == 1`.", "It could be a good idea !\r\nI know this will be almost never true (due to `retainedSlice()` that will increment `refCnt()`), but at least, it will give a chance for it (as with `SeekAhead` algorithm that is almost never usable) and obviously limits the copy as much as possible.\r\n\r\nI will git it a try. Thanks @normanmaurer for this idea !", "Done", "Done:\r\n\r\nNew benchmark results:\r\nBenchmark Mode Cnt Score Error Units\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 2,248 ± 0,198 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 2,067 ± 1,219 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 1,109 ± 0,038 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 2,326 ± 0,314 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 1,444 ± 0,226 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 1,462 ± 0,642 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,159 ± 0,003 ops/ms\r\nHttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 1,522 ± 0,049 ops/ms\r\n\r\nAlmost the same as in previous code.", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "seems like a leak in the test that was not related to your change... correct ?", "Yes, not related. I was just getting from time to time (before and after changes) an error in PARANOID mode, so I tried to fix as much as possible the tests to be more coherent. \r\nThe reason is: as the test is not going through network but directly, so there is no handler that take care of incoming messages, there is no clean of the request after receiving it by the main handler as it should.", "Same answear: not related, just fix the test", "Same answear: not related, just fix the test", "Same answear: not related, just fix the test", "Same answear: not related, just fix the test", "Same answear: not related, just fix the test", "Same answear: not related, just fix the test" ]
"2020-09-30T09:16:13Z"
[]
HttpPostMultipartRequestDecoder performance regression
### Expected behavior #10226 improves performance. ### Actual behavior The decoder is now at least 100x slower than before iff it is offered a direct buffer. If the decoder is offered a direct buffer it now creates a bunch of 64b direct buffers during parsing, where before it always used heap buffers. Using a `PooledByteBufAllocator` that prefers direct buffers did not solve the issue. Using a `(Un)PooledByteBufAllocator` that prefers heap buffers normalizes performance again. Given that this issue was not listed in the announcement under important, I would think that this was not intentional. ### Steps to reproduce Create a HttpPostMultipartRequestDecoder and offer around a 1000 chunks (64k); in 4.1.49 this runs in seconds, whereas in 4.1.50 it takes minutes. ### Minimal yet complete reproducer code (or URL to code) We have [this](https://github.com/apache/flink/blob/e991de19337591cde444fbede010cb8bdc7f118f/flink-runtime/src/test/java/org/apache/flink/runtime/rest/FileUploadHandlerTest.java) test in Apache Flink that exhibits the problem. I can try to write a minimal reproducer if requested; may take some time to figure out how to correctly create the requests :/ ### Netty version 4.1.50 ### JVM version (e.g. `java -version`) 8 ### OS version (e.g. `uname -a`) Windows 10, but we also saw it on ubuntu linux
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java", "microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java", "microbench/src/main/java/io/netty/handler/codec/http/multipart/package-info.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index a366bd31606..b125dfae0e7 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -123,6 +123,11 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest */ private Attribute currentAttribute; + /** + * The current Data position before finding delimiter + */ + private int lastDataPosition; + private boolean destroyed; private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD; @@ -337,12 +342,15 @@ public HttpPostMultipartRequestDecoder offer(HttpContent content) { // which is not really usable for us as we may exceed it once we add more bytes. buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); } else { + int readPos = undecodedChunk.readerIndex(); + int writable = undecodedChunk.writableBytes(); + int toWrite = buf.readableBytes(); + if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) { + undecodedChunk.discardReadBytes(); + } undecodedChunk.writeBytes(buf); } parseBody(); - if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { - undecodedChunk.discardReadBytes(); - } return this; } @@ -980,47 +988,6 @@ private void cleanMixedAttributes() { currentFieldAttributes.remove(HttpHeaderValues.FILENAME); } - /** - * Read one line up to the CRLF or LF - * - * @return the String from one line - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the {@code readerIndex} to the previous - * value - */ - private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) { - int readerIndex = undecodedChunk.readerIndex(); - ByteBuf line = undecodedChunk.alloc().heapBuffer(64); - try { - while (undecodedChunk.isReadable()) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.CR) { - // check but do not changed readerIndex - nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex()); - if (nextByte == HttpConstants.LF) { - // force read - undecodedChunk.readByte(); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - line.writeByte(HttpConstants.CR); - } - } else if (nextByte == HttpConstants.LF) { - return line.toString(charset); - } else { - line.writeByte(nextByte); - } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); - } finally { - line.release(); - } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - /** * Read one line up to the CRLF or LF * @@ -1030,44 +997,23 @@ private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) * value */ private static String readLine(ByteBuf undecodedChunk, Charset charset) { - if (!undecodedChunk.hasArray()) { - return readLineStandard(undecodedChunk, charset); + final int readerIndex = undecodedChunk.readerIndex(); + int posLf = undecodedChunk.bytesBefore(HttpConstants.LF); + if (posLf == -1) { + throw new NotEnoughDataDecoderException(); } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - ByteBuf line = undecodedChunk.alloc().heapBuffer(64); - try { - while (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - sao.pos--; - line.writeByte(HttpConstants.CR); - } - } else { - line.writeByte(nextByte); - } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return line.toString(charset); - } else { - line.writeByte(nextByte); - } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); - } finally { - line.release(); + boolean crFound = + undecodedChunk.getByte(readerIndex + posLf - 1) == HttpConstants.CR; + if (crFound) { + posLf--; } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); + CharSequence line = undecodedChunk.readCharSequence(posLf, charset); + if (crFound) { + undecodedChunk.skipBytes(2); + } else { + undecodedChunk.skipBytes(1); + } + return line.toString(); } /** @@ -1085,201 +1031,73 @@ private static String readLine(ByteBuf undecodedChunk, Charset charset) { * Need more chunks and reset the {@code readerIndex} to the previous * value */ - private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) { - int readerIndex = undecodedChunk.readerIndex(); + private String readDelimiter(ByteBuf undecodedChunk, String delimiter) { + final int readerIndex = undecodedChunk.readerIndex(); try { - StringBuilder sb = new StringBuilder(64); - int delimiterPos = 0; - int len = delimiter.length(); - while (undecodedChunk.isReadable() && delimiterPos < len) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); + final int len = delimiter.length(); + if (len + 2 > undecodedChunk.readableBytes()) { + // Not able to check if "--" is present + throw new NotEnoughDataDecoderException(); + } + int newPositionDelimiter = findDelimiter(undecodedChunk, delimiter, 0); + if (newPositionDelimiter != 0) { + // Delimiter not fully found + throw new NotEnoughDataDecoderException(); + } + byte nextByte = undecodedChunk.getByte(readerIndex + len); + // first check for opening delimiter + if (nextByte == HttpConstants.CR) { + nextByte = undecodedChunk.getByte(readerIndex + len + 1); + if (nextByte == HttpConstants.LF) { + CharSequence line = undecodedChunk.readCharSequence(len, charset); + undecodedChunk.skipBytes(2); + return line.toString(); } else { + // error since CR must be followed by LF // delimiter not found so break here ! undecodedChunk.readerIndex(readerIndex); throw new NotEnoughDataDecoderException(); } - } - // Now check if either opening delimiter or closing delimiter - if (undecodedChunk.isReadable()) { - byte nextByte = undecodedChunk.readByte(); - // first check for opening delimiter - if (nextByte == HttpConstants.CR) { - nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.LF) { - return sb.toString(); - } else { - // error since CR must be followed by LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - return sb.toString(); - } else if (nextByte == '-') { - sb.append('-'); - // second check for closing delimiter - nextByte = undecodedChunk.readByte(); - if (nextByte == '-') { - sb.append('-'); - // now try to find if CRLF or LF there - if (undecodedChunk.isReadable()) { + } else if (nextByte == HttpConstants.LF) { + CharSequence line = undecodedChunk.readCharSequence(len, charset); + undecodedChunk.skipBytes(1); + return line.toString(); + } else if (nextByte == '-') { + // second check for closing delimiter + nextByte = undecodedChunk.getByte(readerIndex + len + 1); + if (nextByte == '-') { + CharSequence line = undecodedChunk.readCharSequence(len + 2, charset); + // now try to find if CRLF or LF there + if (undecodedChunk.isReadable()) { + nextByte = undecodedChunk.readByte(); + if (nextByte == HttpConstants.CR) { nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.CR) { - nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.LF) { - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - return sb.toString(); + if (nextByte == HttpConstants.LF) { + return line.toString(); } else { - // No CRLF but ok however (Adobe Flash uploader) - // minus 1 since we read one char ahead but - // should not - undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1); - return sb.toString(); + // error CR without LF + // delimiter not found so break here ! + undecodedChunk.readerIndex(readerIndex); + throw new NotEnoughDataDecoderException(); } - } - // FIXME what do we do here? - // either considering it is fine, either waiting for - // more data to come? - // lets try considering it is fine... - return sb.toString(); - } - // only one '-' => not enough - // whatever now => error since incomplete - } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); - } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - - /** - * Read one line up to --delimiter or --delimiter-- and if existing the CRLF - * or LF. Note that CRLF or LF are mandatory for opening delimiter - * (--delimiter) but not for closing delimiter (--delimiter--) since some - * clients does not include CRLF in this case. - * - * @param delimiter - * of the form --string, such that '--' is already included - * @return the String from one line as the delimiter searched (opening or - * closing) - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the readerInder to the previous - * value - */ - private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) { - if (!undecodedChunk.hasArray()) { - return readDelimiterStandard(undecodedChunk, delimiter); - } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - int delimiterPos = 0; - int len = delimiter.length(); - try { - StringBuilder sb = new StringBuilder(64); - // check conformity with delimiter - while (sao.pos < sao.limit && delimiterPos < len) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); - } else { - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } - // Now check if either opening delimiter or closing delimiter - if (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - // first check for opening delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); + } else if (nextByte == HttpConstants.LF) { + return line.toString(); } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); + // No CRLF but ok however (Adobe Flash uploader) + // minus 1 since we read one char ahead but + // should not + undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1); + return line.toString(); } - } else { - // error since CR must be followed by LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - // same first check for opening delimiter where LF used with - // no CR - sao.setReadPosition(0); - return sb.toString(); - } else if (nextByte == '-') { - sb.append('-'); - // second check for closing delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == '-') { - sb.append('-'); - // now try to find if CRLF or LF there - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // No CRLF but ok however (Adobe Flash - // uploader) - // minus 1 since we read one char ahead but - // should not - sao.setReadPosition(1); - return sb.toString(); - } - } - // FIXME what do we do here? - // either considering it is fine, either waiting for - // more data to come? - // lets try considering it is fine... - sao.setReadPosition(0); - return sb.toString(); - } - // whatever now => error since incomplete - // only one '-' => not enough or whatever not enough - // element } + // FIXME what do we do here? + // either considering it is fine, either waiting for + // more data to come? + // lets try considering it is fine... + return line.toString(); } + // only one '-' => not enough + // whatever now => error since incomplete } } catch (IndexOutOfBoundsException e) { undecodedChunk.readerIndex(readerIndex); @@ -1290,96 +1108,93 @@ private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) { } /** - * Load the field value or file data from a Multipart request + * @param undecodedChunk the source where the delimiter is to be found + * @param delimiter the string to find out + * @param offset the offset from readerIndex within the undecodedChunk to + * start from to find out the delimiter * - * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks - * @throws ErrorDataDecoderException + * @return a number >= 0 if found, else new offset with negative value + * (to inverse), both from readerIndex + * @throws NotEnoughDataDecoderException + * Need more chunks while relative position with readerIndex is 0 */ - private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { + private static int findDelimiter(ByteBuf undecodedChunk, String delimiter, int offset) { final int startReaderIndex = undecodedChunk.readerIndex(); final int delimeterLength = delimiter.length(); - int index = 0; - int lastPosition = startReaderIndex; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (undecodedChunk.isReadable()) { - final byte nextByte = undecodedChunk.readByte(); - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; + final int toRead = undecodedChunk.readableBytes(); + int newOffset = offset; + boolean delimiterNotFound = true; + while (delimiterNotFound && newOffset + delimeterLength <= toRead) { + int posFirstChar = undecodedChunk + .bytesBefore(startReaderIndex + newOffset, toRead - newOffset, + (byte) delimiter.codePointAt(0)); + if (posFirstChar == -1) { + newOffset = toRead; + return -newOffset; + } + newOffset = posFirstChar + offset; + if (newOffset + delimeterLength > toRead) { + return -newOffset; + } + // assume will found it + delimiterNotFound = false; + for (int index = 1; index < delimeterLength; index++) { + if (undecodedChunk.getByte(startReaderIndex + newOffset + index) != delimiter.codePointAt(index)) { + // ignore first found offset and redo search from next char + newOffset++; + delimiterNotFound = true; break; } - continue; } - lastPosition = undecodedChunk.readerIndex(); - if (nextByte == HttpConstants.LF) { - index = 0; - lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1; - } - prevByte = nextByte; } - if (prevByte == HttpConstants.CR) { - lastPosition--; - } - ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); - try { - httpData.addContent(content, delimiterFound); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); + if (delimiterNotFound || newOffset + delimeterLength > toRead) { + if (newOffset == 0) { + throw new NotEnoughDataDecoderException(); + } + return -newOffset; } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; + return newOffset; } /** * Load the field value from a Multipart request * * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks + * * @throws ErrorDataDecoderException */ - private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { - if (!undecodedChunk.hasArray()) { - return loadDataMultipartStandard(undecodedChunk, delimiter, httpData); - } - final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); + private boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, + HttpData httpData) { final int startReaderIndex = undecodedChunk.readerIndex(); - final int delimeterLength = delimiter.length(); - int index = 0; - int lastRealPos = sao.pos; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (sao.pos < sao.limit) { - final byte nextByte = sao.bytes[sao.pos++]; - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; - break; - } - continue; - } - lastRealPos = sao.pos; - if (nextByte == HttpConstants.LF) { - index = 0; - lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1; + int newOffset; + try { + newOffset = findDelimiter(undecodedChunk, delimiter, lastDataPosition); + if (newOffset < 0) { + // delimiter not found + lastDataPosition = -newOffset; + return false; } - prevByte = nextByte; + } catch (NotEnoughDataDecoderException e) { + // Not enough data and no change to lastDataPosition + return false; } - if (prevByte == HttpConstants.CR) { - lastRealPos--; + // found delimiter but still need to check if CRLF before + int startDelimiter = newOffset; + if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.LF) { + startDelimiter--; + if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.CR) { + startDelimiter--; + } } - final int lastPosition = sao.getReadPosition(lastRealPos); - final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); + ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, startDelimiter); try { - httpData.addContent(content, delimiterFound); + httpData.addContent(content, true); } catch (IOException e) { throw new ErrorDataDecoderException(e); } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; + lastDataPosition = 0; + undecodedChunk.readerIndex(startReaderIndex + startDelimiter); + return true; } /** diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index 37e41fa9176..89733794a96 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -299,12 +299,15 @@ public HttpPostStandardRequestDecoder offer(HttpContent content) { // which is not really usable for us as we may exceed it once we add more bytes. buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); } else { + int readPos = undecodedChunk.readerIndex(); + int writable = undecodedChunk.writableBytes(); + int toWrite = buf.readableBytes(); + if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) { + undecodedChunk.discardReadBytes(); + } undecodedChunk.writeBytes(buf); } parseBody(); - if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { - undecodedChunk.discardReadBytes(); - } return this; } diff --git a/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java b/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java new file mode 100644 index 00000000000..c570cd67455 --- /dev/null +++ b/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java @@ -0,0 +1,202 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.microbench.util.AbstractMicrobenchmark; +import io.netty.util.ResourceLeakDetector; +import io.netty.util.ResourceLeakDetector.Level; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.concurrent.TimeUnit; + + +@Threads(1) +@Warmup(iterations = 2) +@Measurement(iterations = 3) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class HttpPostMultipartRequestDecoderBenchmark + extends AbstractMicrobenchmark { + + public double testHighNumberChunks(boolean big, boolean noDisk) { + String BOUNDARY = "01f136d9282f"; + int size = 8 * 1024; + int chunkNumber = 64; + StringBuilder stringBuilder = new StringBuilder(size); + stringBuilder.setLength(size); + String data = stringBuilder.toString(); + + byte[] bodyStartBytes = ("--" + BOUNDARY + "\n" + + "Content-Disposition: form-data; name=\"msg_id\"\n\n15200\n--" + + BOUNDARY + + "\nContent-Disposition: form-data; name=\"msg1\"; filename=\"file1.txt\"\n\n" + + data).getBytes(); + byte[] bodyPartBigBytes = data.getBytes(); + byte[] intermediaryBytes = ("\n--" + BOUNDARY + + "\nContent-Disposition: form-data; name=\"msg2\"; filename=\"file2.txt\"\n\n" + + data).getBytes(); + byte[] finalBigBytes = ("\n" + "--" + BOUNDARY + "--\n").getBytes(); + ByteBuf firstBuf = Unpooled.wrappedBuffer(bodyStartBytes); + ByteBuf finalBuf = Unpooled.wrappedBuffer(finalBigBytes); + ByteBuf nextBuf; + if (big) { + nextBuf = Unpooled.wrappedBuffer(bodyPartBigBytes); + } else { + nextBuf = Unpooled.wrappedBuffer(intermediaryBytes); + } + DefaultHttpRequest req = + new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.POST, "/up"); + req.headers().add(HttpHeaderNames.CONTENT_TYPE, + "multipart/form-data; boundary=" + BOUNDARY); + + long start = System.nanoTime(); + + DefaultHttpDataFactory defaultHttpDataFactory = + new DefaultHttpDataFactory(noDisk? 1024 * 1024 : 16 * 1024); + HttpPostRequestDecoder decoder = + new HttpPostRequestDecoder(defaultHttpDataFactory, req); + firstBuf.retain(); + decoder.offer(new DefaultHttpContent(firstBuf)); + firstBuf.release(); + for (int i = 1; i < chunkNumber; i++) { + nextBuf.retain(); + decoder.offer(new DefaultHttpContent(nextBuf)); + nextBuf.release(); + nextBuf.readerIndex(0); + } + finalBuf.retain(); + decoder.offer(new DefaultLastHttpContent(finalBuf)); + finalBuf.release(); + while (decoder.hasNext()) { + InterfaceHttpData httpData = decoder.next(); + } + while (finalBuf.refCnt() > 0) { + finalBuf.release(); + } + while (nextBuf.refCnt() > 0) { + nextBuf.release(); + } + while (finalBuf.refCnt() > 0) { + finalBuf.release(); + } + long stop = System.nanoTime(); + double time = (stop - start) / 1000000.0; + defaultHttpDataFactory.cleanAllHttpData(); + defaultHttpDataFactory.cleanRequestHttpData(req); + decoder.destroy(); + return time; + } + + @Benchmark + public double multipartRequestDecoderHighDisabledLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.DISABLED); + return testHighNumberChunks(false, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderBigDisabledLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.DISABLED); + return testHighNumberChunks(true, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderHighSimpleLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.SIMPLE); + return testHighNumberChunks(false, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderBigSimpleLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.SIMPLE); + return testHighNumberChunks(true, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderHighAdvancedLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.ADVANCED); + return testHighNumberChunks(false, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderBigAdvancedLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.ADVANCED); + return testHighNumberChunks(true, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderHighParanoidLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.PARANOID); + return testHighNumberChunks(false, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + + @Benchmark + public double multipartRequestDecoderBigParanoidLevel() { + final Level level = ResourceLeakDetector.getLevel(); + try { + ResourceLeakDetector.setLevel(Level.PARANOID); + return testHighNumberChunks(true, true); + } finally { + ResourceLeakDetector.setLevel(level); + } + } + +} diff --git a/microbench/src/main/java/io/netty/handler/codec/http/multipart/package-info.java b/microbench/src/main/java/io/netty/handler/codec/http/multipart/package-info.java new file mode 100644 index 00000000000..1f4cc119dcb --- /dev/null +++ b/microbench/src/main/java/io/netty/handler/codec/http/multipart/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +/** + * Benchmarks for {@link io.netty.handler.codec.http.multipart}. + */ +package io.netty.handler.codec.http.multipart;
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index ba01e4d240b..45b8461ce72 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -132,6 +132,7 @@ public void testFullHttpRequestUpload() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); + req.release(); decoder.destroy(); } @@ -178,6 +179,7 @@ public void testMultipartCodecWithCRasEndOfAttribute() throws Exception { assertNotNull(datar); assertEquals(datas[i].getBytes(CharsetUtil.UTF_8).length, datar.length); + req.release(); decoder.destroy(); } } @@ -211,6 +213,7 @@ public void testQuotedBoundary() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); + req.release(); decoder.destroy(); } @@ -368,6 +371,7 @@ public void testFilenameContainingSemicolon() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); + req.release(); decoder.destroy(); } @@ -397,6 +401,7 @@ public void testFilenameContainingSemicolon2() throws Exception { assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp 0.txt", fileUpload.getFilename()); + req.release(); decoder.destroy(); } @@ -427,6 +432,7 @@ public void testMultipartRequestWithoutContentTypeBody() { // Create decoder instance to test without any exception. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); + req.release(); decoder.destroy(); } @@ -459,8 +465,8 @@ public void testDecodeOtherMimeHeaderFields() throws Exception { FileUpload fileUpload = (FileUpload) part1; byte[] fileBytes = fileUpload.get(); assertTrue("the filecontent should not be decoded", filecontent.equals(new String(fileBytes))); - decoder.destroy(); req.release(); + decoder.destroy(); } @Test @@ -538,8 +544,8 @@ public void testFormEncodeIncorrect() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); } finally { - decoder.destroy(); content.release(); + decoder.destroy(); } } @@ -573,8 +579,8 @@ public void testDecodeContentDispositionFieldParameters() throws Exception { assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - decoder.destroy(); req.release(); + decoder.destroy(); } // https://github.com/netty/netty/pull/7265 @@ -609,8 +615,8 @@ public void testDecodeWithLanguageContentDispositionFieldParameters() throws Exc assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - decoder.destroy(); req.release(); + decoder.destroy(); } // https://github.com/netty/netty/pull/7265 @@ -704,6 +710,7 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp-0.txt", fileUpload.getFilename()); + req.release(); decoder.destroy(); } @@ -752,7 +759,7 @@ public void testNotLeak() { FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.copiedBuffer("a=1&&b=2", CharsetUtil.US_ASCII)); try { - new HttpPostStandardRequestDecoder(request); + new HttpPostStandardRequestDecoder(request).destroy(); } finally { assertTrue(request.release()); } @@ -772,7 +779,7 @@ private static void testNotLeakWhenWrapIllegalArgumentException(ByteBuf buf) { buf.writeCharSequence("a=b&foo=%22bar%22&==", CharsetUtil.US_ASCII); FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", buf); try { - new HttpPostStandardRequestDecoder(request); + new HttpPostStandardRequestDecoder(request).destroy(); } finally { assertTrue(request.release()); } @@ -823,8 +830,8 @@ public void testDecodeWithLanguageContentDispositionFieldParametersForFix() thro FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - decoder.destroy(); req.release(); + decoder.destroy(); } @Test @@ -860,8 +867,8 @@ public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { assertTrue(attr.getByteBuf().isDirect()); assertEquals("los angeles", attr.getValue()); - decoder.destroy(); req.release(); + decoder.destroy(); } @Test @@ -877,7 +884,7 @@ public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte0() { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertEquals("Invalid hex byte at index '0' in string: '%'", e.getMessage()); } finally { - req.release(); + assertTrue(req.release()); } } @@ -894,7 +901,7 @@ public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte1() { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertEquals("Invalid hex byte at index '0' in string: '%2'", e.getMessage()); } finally { - req.release(); + assertTrue(req.release()); } } @@ -911,7 +918,7 @@ public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleHi() } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertEquals("Invalid hex byte at index '0' in string: '%Zc'", e.getMessage()); } finally { - req.release(); + assertTrue(req.release()); } } @@ -928,7 +935,7 @@ public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo() } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertEquals("Invalid hex byte at index '0' in string: '%2g'", e.getMessage()); } finally { - req.release(); + assertTrue(req.release()); } } }
val
test
"2020-11-16T09:03:37"
"2020-08-27T08:53:35Z"
zentol
val
netty/netty/10614_10659
netty/netty
netty/netty/10614
netty/netty/10659
[ "keyword_pr_to_issue" ]
9d457c3f0f883312c155a495b566f6c576d89a52
fd8c1874b4e24a18c562c7013efabcb155395459
[ "hi~ I not sure UnorderedThreadPoolEventExecutor#scheduleAtFixedRate should work as you expected ,but as you expected ,you can change the code as I already commit. " ]
[ "After this unblocks we should cancel the scheduled task via `future.cancel()`. Also we should shutdown the `executor`.", "nit: shutdownGracefully() ?", "Alright, done." ]
"2020-10-08T14:11:08Z"
[]
UnorderedThreadPoolEventExecutor#scheduleAtFixedRate not working as expected
### Expected behavior The runnable passed to scheduleAtFixedRate() should run multiple times, not just once. ### Actual behavior using UnorderedThreadPoolEventExecutor the runnable is executed just once, using directly the ScheduledThreadPoolExecutor or DefaultEventExecutorGroup the task is executed at fixed rate as expected ### Minimal yet complete reproducer code ```java public static void main(final String[] args) throws Exception { final UnorderedThreadPoolEventExecutor executor = new UnorderedThreadPoolEventExecutor(2); //final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(2); // THIS WORKS //final DefaultEventExecutorGroup executor = new DefaultEventExecutorGroup(2); // THIS WORKS executor.scheduleAtFixedRate(() -> System.out.println("XXX"), 1, 1, TimeUnit.SECONDS); executor.awaitTermination(10, TimeUnit.SECONDS); // let it run... } ``` ### Netty version 4.1.52.Final ### JVM version latest OpenJDK 11 and 15
[ "common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java" ]
[ "common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java" ]
[ "common/src/test/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutorTest.java", "transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java" ]
diff --git a/common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java b/common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java index 277c90322ad..dbf3f25fda6 100644 --- a/common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java +++ b/common/src/main/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutor.java @@ -161,12 +161,12 @@ public Iterator<EventExecutor> iterator() { @Override protected <V> RunnableScheduledFuture<V> decorateTask(Runnable runnable, RunnableScheduledFuture<V> task) { return runnable instanceof NonNotifyRunnable ? - task : new RunnableScheduledFutureTask<V>(this, runnable, task); + task : new RunnableScheduledFutureTask<V>(this, task); } @Override protected <V> RunnableScheduledFuture<V> decorateTask(Callable<V> callable, RunnableScheduledFuture<V> task) { - return new RunnableScheduledFutureTask<V>(this, callable, task); + return new RunnableScheduledFutureTask<V>(this, task); } @Override @@ -213,15 +213,8 @@ private static final class RunnableScheduledFutureTask<V> extends PromiseTask<V> implements RunnableScheduledFuture<V>, ScheduledFuture<V> { private final RunnableScheduledFuture<V> future; - RunnableScheduledFutureTask(EventExecutor executor, Runnable runnable, - RunnableScheduledFuture<V> future) { - super(executor, runnable); - this.future = future; - } - - RunnableScheduledFutureTask(EventExecutor executor, Callable<V> callable, - RunnableScheduledFuture<V> future) { - super(executor, callable); + RunnableScheduledFutureTask(EventExecutor executor, RunnableScheduledFuture<V> future) { + super(executor, future); this.future = future; }
diff --git a/common/src/test/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutorTest.java b/common/src/test/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutorTest.java index d96db3fcb07..2f4e02dc0ba 100644 --- a/common/src/test/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutorTest.java +++ b/common/src/test/java/io/netty/util/concurrent/UnorderedThreadPoolEventExecutorTest.java @@ -19,6 +19,7 @@ import org.junit.Test; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; public class UnorderedThreadPoolEventExecutorTest { @@ -54,4 +55,22 @@ public void operationComplete(Future<Object> future) throws Exception { executor.shutdownGracefully(); } } + + @Test(timeout = 10000) + public void scheduledAtFixedRateMustRunTaskRepeatedly() throws InterruptedException { + UnorderedThreadPoolEventExecutor executor = new UnorderedThreadPoolEventExecutor(1); + final CountDownLatch latch = new CountDownLatch(3); + Future<?> future = executor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + latch.countDown(); + } + }, 1, 1, TimeUnit.MILLISECONDS); + try { + latch.await(); + } finally { + future.cancel(true); + executor.shutdownGracefully(); + } + } } diff --git a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java index 58cf5af646c..c2f200a771a 100644 --- a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java +++ b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java @@ -152,7 +152,12 @@ public void testGetOptionWhenClosed() { ch.config().getSoLinger(); fail(); } catch (ChannelException e) { - assertTrue(e.getCause() instanceof ClosedChannelException); + if (!(e.getCause() instanceof ClosedChannelException)) { + AssertionError error = new AssertionError( + "Expected the suppressed exception to be an instance of ClosedChannelException."); + error.addSuppressed(e.getCause()); + throw error; + } } }
train
test
"2020-10-12T18:25:58"
"2020-09-26T12:18:54Z"
matteobertozzi
val
netty/netty/10649_10683
netty/netty
netty/netty/10649
netty/netty/10683
[ "keyword_pr_to_issue" ]
1ca7d5db8167f904395d879b8851eb35d6f596f3
065c39611e0f65a3c6d4650638b395258e1fd8f2
[ "SHA-512 is an overkill and not useful for this purpose. SHA-256 or SHA-384 is the way to go.", "@trustin @normanmaurer What do you think?", "As far as I understand how `FingerprintTrustManagerFactory` is meant to be used, I think it's better to deprecate it, and add a new implementation that uses a stronger hash function, or allow the hash function to be configured. Maybe the configurable implementation can be extracted into a new super-class of `FingerprintTrustManagerFactory`.", "I agree with @chrisvest here... ", "I don't see a reason to introduce a new `TrustManagerFactory` implementation class. We could deprecate the public constructors in favor of builders and static factory methods. e.g.\r\n\r\n```java\r\nFingerprintTrustManagerFactory f =\r\n FingerprintTrustManagerFactory\r\n .builder(\"SHA256\") // Always require the algorithm name\r\n .fingerprint(\"deadbeef...\")\r\n .fingerprint(\"cafecafe...\")\r\n .build();\r\n```\r\n\r\nThe public constructors could call the internal constructor that passes the message digest function, e.g.\r\n\r\n```java\r\n@Deprecated\r\npublic FingerprintTrustManagerFactory(String... fingerprints) {\r\n this(FingerprintTrustManagerFactory::sha1Fingerprint, fingerprints);\r\n}\r\n\r\n// Called by the builder or the legacy public constructors.\r\n// The example uses Java 8 Function but we can define a dedicated interface if necessary.\r\nFingerprintTrustManagerFactory(\r\n Function<? super X509Certificate, byte[]> fingerprintFunc,\r\n String... fingerprints) {\r\n ...\r\n}\r\n```\r\n\r\nWe could also consider implementing more versatile `TrustManagerFactory` by extracting the verification logic out of the `TrustManagerFactory`, e.g.\r\n\r\n```java\r\nSomeTrustManagerFactory\r\n .builder()\r\n .add(CertificateMatchers.fingerprint(\"SHA256\", \"deadbeef...\"))\r\n .add(\"bar.foo.com\", CertificateMatchers.strict())\r\n .add(\"*.foo.com\", CertificateMatchers.any());\r\n .build();\r\n```", "I like the idea with a builder and deprecating only the public constructor suggested by @trustin \r\n\r\n```java\r\nFingerprintTrustManagerFactory f =\r\n FingerprintTrustManagerFactory\r\n .builder(\"SHA256\") // Always require the algorithm name\r\n .fingerprint(\"deadbeef...\")\r\n .fingerprint(\"cafecafe...\")\r\n .build();\r\n```\r\n\r\nIf no objections, I'll implement this logic then." ]
[ "Add method to add a List of `fingerprints`.", "Also add `@deprecated` javadoc tag", "also add `@deprecated` javadoc tag ", "should we call `MessageDigest.getInstance(algorithm)` once in the constructor now to ensure it will not fail later on ? ", "see above... I think it is very unlikely that the \"constructing\" thread will use the factory itself. so I think it would be better to not waste resources in the `FastThreadLocal` and construct it directly.", "`EmptyArrays.BYTES`", "honestly this feels a bit like overkill.. Let's just keep it or use `String...`", "Calling `public Builder fingerprint(String fingerprint)` every time is a little painful that's why we need something to add fingerprints in bulk. Both `List` and `String...` will do the job.", "I disagree... what is the point of a builder if you will never call its methods multiple times ? ", "That's a thing too. ", "I think both points are valid. The builder may be used in multiple ways. For example, someone may want to hardcode fingerprints like the following:\r\n\r\n```java\r\nFingerprintTrustManagerFactory f =\r\n FingerprintTrustManagerFactory\r\n .builder(\"SHA256\")\r\n .fingerprint(\"deadbeef...\")\r\n .fingerprint(\"cafecafe...\")\r\n .build();\r\n```\r\n\r\nBut someone may want to load fingerprints from a file. In this case, `fingerprint()` would need to be called in a loop:\r\n\r\n```java\r\nFingerprintTrustManagerFactory.Builder b = FingerprintTrustManagerFactory.builder(\"SHA256\");\r\nfor (String s : loadFingerprintsFrom(file)) {\r\n b.fingerprint(s);\r\n}\r\n```\r\n\r\nHere `fingerprint(String...)` might make it look a bit nicer:\r\n\r\n```java\r\nFingerprintTrustManagerFactory f =\r\n FingerprintTrustManagerFactory\r\n .builder(\"SHA256\")\r\n .fingerprint(loadFingerprintsFrom(file))\r\n .build();\r\n```\r\n\r\nAnyway, I am fine to keep it as-is, or updating `fingerprint()` to accept `String...`.", "I am ok with `String...` but then call it fingerprints(...)", "If I understand you correctly, you're suggesting getting rid of `FastThreadLocal`. I was not sure about it since I am new to the code. Okay, I can remove `FastThreadLocal` and create a `MessageDigest` directly in the constructor if it's fine.", "If I am not missing something, that could not be used here because `EmptyArrays.EMPTY_BYTES` is `byte[]`.\r\n\r\nhttps://github.com/netty/netty/blob/00afb19d7a37de21b35ce4f6cb3fa7f74809f2ab/common/src/main/java/io/netty/util/internal/EmptyArrays.java#L28", "ah sorry I missed that it is `[][]`", "no ... What I was saying that you should keep the `FastThreadLocal` but not access it in the constructor but better just create a new instance in the constructor and use it here. ", "Okay, sure I can create a `MessageDigest` in the constructor as an early-check that the algorithm exists. The initialization of `FastThreadLocal` would still stay in the constructor since it now requires an algorithm identifier.", "nit: include `algorithm` in the message as well. ", "nit: include the actual length and the expected length in the message as well ", "The wrapped exception usually mention the algorithm but nevertheless let's include it to the top message as well.", "How about moving this method before the member fields in this class? i.e.\r\n\r\n```\r\nclass MyClass {\r\n\r\n private static final ...;\r\n\r\n public static Builder builder() { ... }\r\n\r\n private final ...;\r\n\r\n public MyClass() { ... }\r\n}\r\n```", "I'd prefer having this in a top-level class as `FingerprintTrustManagerFactoryBuilder`.", "- How about accepting `CharSequence...` and then converting it to a `String`?\r\n- Could we also add the version that accepts `Iterable<? extends CharSequence>`?", "What would be the expected behavior when `fingerprints` is empty? Throwing an `IllegalStateException` in `build()`?", "The way this works means the `FingerprintTrustManagerFactory` cannot be shared between multiple threads, because only the constructing thread has initialised their thread-local, and there's not enough information in the factory instance for other threads to initialise their thread-locals as well.", "@chrisvest huh... can you explain to me why is this ? ", "@chrisvest was right here... you need to override `initialValue()` here ", "nevermind... https://github.com/netty/netty/pull/10683#discussion_r506221085", "Since `FingerprintTrustManagerFactory` is now configured with a hash algorithm, `tlmd` needs to be initialized in the constructor. It used to be before. Then, `initialValue()` was removed because we wanted to create an instance of `MessageDigest` earlier in the constructor to check if the algorithm exists. If I understand correctly, we just want to revert the logic to the previous version. I am not sure if we need to create another `MessageDigest` in the constructor. Please let me know if so.", "@chrisvest Thanks for pointing it out!", "> * How about accepting `CharSequence...` and then converting it to a `String`?\r\n\r\nLet me try.\r\n\r\n> * Could we also add the version that accepts `Iterable<? extends CharSequence>`?\r\n\r\nWe've discussed adding `fingerprings(List)` but decided not to do that\r\n\r\nhttps://github.com/netty/netty/pull/10683#discussion_r504560104\r\n\r\nI am fine with adding `fingerprints(Iterable<? extends CharSequence>)` as long as other are fine.", "> I am not sure if we need to create another MessageDigest in the constructor. Please let me know if so.\r\n\r\nWell, looks like there is no harm if we do that. So, I'll add such a check if no objections.", "Since `tlmd` is an instance field, you can create the `FastThreadLocal` with an `initialValue` method that has all the logic necessary to create `MessageDigest` instances. I'd still check that the algorithm exists before creating the FTL, though, because each FTL instance will claim some static resources that we'd prefer not to leak.", "Currently, no fingerprints are allowed. That would mean that no certificate is going to be trusted. In theory, it's okay but it looks unlikely that an application may want to use such a paranoid `TrustManager`. Therefore, I think it would be fine to throw an exception if no fingerprints are provided. I'd put this check in to a constructor since all the checks are currently done there, and `FingerprintTrustManagerFactory` may be still created by calling the deprecated constructors.", "I think it was more about `fingerprint(String)` and `fingerprints(String...)`. We do need `fingerprints(Iterable<? extends CharSequence>)` for the users who want to feed the fingerprints from a collection.", "Sure, no problem. I've added `fingerprints(Iterable<? extends CharSequence>)`.", "How about moving this validation to `build()` and raising `IllegalStateException`? Otherwise, a user will get `IllegalArgumentException` on `build()`, which does not make sense. We could add an assertion here instead.", "If we would like to prohibit creating a `TrustManager` with no fingerprints, then I think this check should be in the constructor because `FingerprintTrustManagerFactory` can still be created by calling the deprecated constructors.\r\n\r\nIf I understand you correctly, you mean that `IllegalArgumentException` may confuse users whey they call `build()` that has no arguments itself. We can add an extra check in `build()` that throws `IllegalStateException` if the list of fingerprints is empty. I'll update the code then. Please let me know if I missed something.", "```suggestion\r\n * https://www.apache.org/licenses/LICENSE-2.0\r\n```", "```suggestion\r\n * https://www.apache.org/licenses/LICENSE-2.0\r\n```", "Thanks for fixing this @normanmaurer !" ]
"2020-10-14T08:26:24Z"
[]
Better hash algorithm in FingerprintTrustManagerFactory
`FingerprintTrustManagerFactory` checks if a certificate is trusted by comparing its SHA-1 hash to a list of pre-configured ones: https://github.com/netty/netty/blob/0cde4d9cb4d19ddc0ecafc5be7c5f7c781a1f6e9/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java#L85 Unfortunately, nowadays SHA-1 is considered insecure. The issue was reported by [LGTM](https://lgtm.com/projects/g/netty/netty/snapshot/090bf3d107cb3099317e1cd9e0d661ac0797c126/files/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java?sort=name&dir=ASC&mode=heatmap#xedd0183a9626d84c:1). I see the following ways to fix it: 1. Update `FingerprintTrustManagerFactory` to use a stronger algorithm such as SHA-512. Unfortunately, most likely it'll break applications that pass a SHA-1 hash to the class. 1. Deprecate `FingerprintTrustManagerFactory` and add a new implementation that use a stronger algorithm. 1. Updated `FingerprintTrustManagerFactory` to determine a hash algorithm based on a length of a hash passed to the constructor. If a caller passes a SHA-1 hash, the class can also print a warning. Please let me know if one of the options above is fine (or, you see a better option), and I'll open a pull request.
[ "handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java", "handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryBuilder.java" ]
[ "handler/src/test/java/io/netty/handler/ssl/Java8SslTestUtils.java", "handler/src/test/java/io/netty/handler/ssl/SslContextTrustManagerTest.java", "handler/src/test/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryTest.java" ]
diff --git a/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java b/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java index 8a023d476e4..ff6772a695a 100644 --- a/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java +++ b/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactory.java @@ -39,7 +39,7 @@ import java.util.regex.Pattern; /** - * An {@link TrustManagerFactory} that trusts an X.509 certificate whose SHA1 checksum matches. + * An {@link TrustManagerFactory} that trusts an X.509 certificate whose hash matches. * <p> * <strong>NOTE:</strong> It is recommended to verify certificates and their chain to prevent * <a href="https://en.wikipedia.org/wiki/Man-in-the-middle_attack">Man-in-the-middle attacks</a>. @@ -51,22 +51,30 @@ * actually perform Man-in-the-middle attacks and thus present a different certificate fingerprint. * </p> * <p> - * The SHA1 checksum of an X.509 certificate is calculated from its DER encoded format. You can get the fingerprint of + * The hash of an X.509 certificate is calculated from its DER encoded format. You can get the fingerprint of * an X.509 certificate using the {@code openssl} command. For example: * * <pre> - * $ openssl x509 -fingerprint -sha1 -in my_certificate.crt - * SHA1 Fingerprint=4E:85:10:55:BC:7B:12:08:D1:EA:0A:12:C9:72:EE:F3:AA:B2:C7:CB + * $ openssl x509 -fingerprint -sha256 -in my_certificate.crt + * SHA256 Fingerprint=1C:53:0E:6B:FF:93:F0:DE:C2:E6:E7:9D:10:53:58:FF:DD:8E:68:CD:82:D9:C9:36:9B:43:EE:B3:DC:13:68:FB * -----BEGIN CERTIFICATE----- - * MIIBqjCCAROgAwIBAgIJALiT3Nvp0kvmMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV - * BAMTC2V4YW1wbGUuY29tMCAXDTcwMDEwMTAwMDAwMFoYDzk5OTkxMjMxMjM1OTU5 - * WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw - * gYkCgYEAnadvODG0QCiHhaFZlLHtr5gLIkDQS8ErZ//KfqeCHTC/KJsl3xYFk0zG - * aCv2FcmkOlokm77qV8qOW2DZdND7WuYzX6nLVuLb+GYxZ7b45iMAbAajvGh8jc9U - * o07fUIahGqTDAIAGCWsoLUOQ9nMzO/8GRHcXJAeQ2MGY2VpCcv0CAwEAATANBgkq - * hkiG9w0BAQUFAAOBgQBpRCnmjmNM0D7yrpkUJpBTNiqinhKLbeOvPWm+YmdInUUs - * LoMu0mZ1IANemLwqbwJJ76fknngeB+YuVAj46SurvVCV6ekwHcbgpW1u063IRwKk - * tQhOBO0HQxldUS4+4MYv/kuvnKkbjfgh5qfWw89Kx4kD+cycpP4yPtgDGk8ZMA== + * MIIC/jCCAeagAwIBAgIIIMONxElm0AIwDQYJKoZIhvcNAQELBQAwPjE8MDoGA1UE + * AwwzZThhYzAyZmEwZDY1YTg0MjE5MDE2MDQ1ZGI4YjA1YzQ4NWI0ZWNkZi5uZXR0 + * eS50ZXN0MCAXDTEzMDgwMjA3NTEzNloYDzk5OTkxMjMxMjM1OTU5WjA+MTwwOgYD + * VQQDDDNlOGFjMDJmYTBkNjVhODQyMTkwMTYwNDVkYjhiMDVjNDg1YjRlY2RmLm5l + * dHR5LnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb+HBO3C0U + * RBKvDUgJHbhIlBye8X/cbNH3lDq3XOOFBz7L4XZKLDIXS+FeQqSAUMo2otmU+Vkj + * 0KorshMjbUXfE1KkTijTMJlaga2M2xVVt21fRIkJNWbIL0dWFLWyRq7OXdygyFkI + * iW9b2/LYaePBgET22kbtHSCAEj+BlSf265+1rNxyAXBGGGccCKzEbcqASBKHOgVp + * 6pLqlQAfuSy6g/OzGzces3zXRrGu1N3pBIzAIwCW429n52ZlYfYR0nr+REKDnRrP + * IIDsWASmEHhBezTD+v0qCJRyLz2usFgWY+7agUJE2yHHI2mTu2RAFngBilJXlMCt + * VwT0xGuQxkbHAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEv8N7Xm8qaY2FgrOc6P + * a1GTgA+AOb3aU33TGwAR86f+nLf6BSPaohcQfOeJid7FkFuYInuXl+oqs+RqM/j8 + * R0E5BuGYY2wOKpL/PbFi1yf/Kyvft7KVh8e1IUUec/i1DdYTDB0lNWvXXxjfMKGL + * ct3GMbEHKvLfHx42Iwz/+fva6LUrO4u2TDfv0ycHuR7UZEuC1DJ4xtFhbpq/QRAj + * CyfNx3cDc7L2EtJWnCmivTFA9l8MF1ZPMDSVd4ecQ7B0xZIFQ5cSSFt7WGaJCsGM + * zYkU4Fp4IykQcWxdlNX7wJZRwQ2TZJFFglpTiFZdeq6I6Ad9An1Encpz5W8UJ4tv + * hmw= * -----END CERTIFICATE----- * </pre> * </p> @@ -75,20 +83,18 @@ public final class FingerprintTrustManagerFactory extends SimpleTrustManagerFact private static final Pattern FINGERPRINT_PATTERN = Pattern.compile("^[0-9a-fA-F:]+$"); private static final Pattern FINGERPRINT_STRIP_PATTERN = Pattern.compile(":"); - private static final int SHA1_BYTE_LEN = 20; - private static final int SHA1_HEX_LEN = SHA1_BYTE_LEN * 2; - private static final FastThreadLocal<MessageDigest> tlmd = new FastThreadLocal<MessageDigest>() { - @Override - protected MessageDigest initialValue() { - try { - return MessageDigest.getInstance("SHA1"); - } catch (NoSuchAlgorithmException e) { - // All Java implementation must have SHA1 digest algorithm. - throw new Error(e); - } - } - }; + /** + * Creates a builder for {@link FingerprintTrustManagerFactory}. + * + * @param algorithm a hash algorithm + * @return a builder + */ + public static FingerprintTrustManagerFactoryBuilder builder(String algorithm) { + return new FingerprintTrustManagerFactoryBuilder(algorithm); + } + + private final FastThreadLocal<MessageDigest> tlmd; private final TrustManager tm = new X509TrustManager() { @@ -136,45 +142,99 @@ public X509Certificate[] getAcceptedIssuers() { /** * Creates a new instance. * + * @deprecated This deprecated constructor uses SHA-1 that is considered insecure. + * It is recommended to specify a stronger hash algorithm, such as SHA-256, + * by calling {@link FingerprintTrustManagerFactory#builder(String)} method. + * * @param fingerprints a list of SHA1 fingerprints in hexadecimal form */ + @Deprecated public FingerprintTrustManagerFactory(Iterable<String> fingerprints) { - this(toFingerprintArray(fingerprints)); + this("SHA1", toFingerprintArray(fingerprints)); } /** * Creates a new instance. * + * @deprecated This deprecated constructor uses SHA-1 that is considered insecure. + * It is recommended to specify a stronger hash algorithm, such as SHA-256, + * by calling {@link FingerprintTrustManagerFactory#builder(String)} method. + * * @param fingerprints a list of SHA1 fingerprints in hexadecimal form */ + @Deprecated public FingerprintTrustManagerFactory(String... fingerprints) { - this(toFingerprintArray(Arrays.asList(fingerprints))); + this("SHA1", toFingerprintArray(Arrays.asList(fingerprints))); } /** * Creates a new instance. * + * @deprecated This deprecated constructor uses SHA-1 that is considered insecure. + * It is recommended to specify a stronger hash algorithm, such as SHA-256, + * by calling {@link FingerprintTrustManagerFactory#builder(String)} method. + * * @param fingerprints a list of SHA1 fingerprints */ + @Deprecated public FingerprintTrustManagerFactory(byte[]... fingerprints) { + this("SHA1", fingerprints); + } + + /** + * Creates a new instance. + * + * @param algorithm a hash algorithm + * @param fingerprints a list of fingerprints + */ + FingerprintTrustManagerFactory(final String algorithm, byte[][] fingerprints) { + ObjectUtil.checkNotNull(algorithm, "algorithm"); ObjectUtil.checkNotNull(fingerprints, "fingerprints"); + if (fingerprints.length == 0) { + throw new IllegalArgumentException("No fingerprints provided"); + } + + // check early if the hash algorithm is available + final MessageDigest md; + try { + md = MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException( + String.format("Unsupported hash algorithm: %s", algorithm), e); + } + + int hashLength = md.getDigestLength(); List<byte[]> list = new ArrayList<byte[]>(fingerprints.length); for (byte[] f: fingerprints) { if (f == null) { break; } - if (f.length != SHA1_BYTE_LEN) { - throw new IllegalArgumentException("malformed fingerprint: " + - ByteBufUtil.hexDump(Unpooled.wrappedBuffer(f)) + " (expected: SHA1)"); + if (f.length != hashLength) { + throw new IllegalArgumentException( + String.format("malformed fingerprint (length is %d but expected %d): %s", + f.length, hashLength, ByteBufUtil.hexDump(Unpooled.wrappedBuffer(f)))); } list.add(f.clone()); } + this.tlmd = new FastThreadLocal<MessageDigest>() { + + @Override + protected MessageDigest initialValue() { + try { + return MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException( + String.format("Unsupported hash algorithm: %s", algorithm), e); + } + } + }; + this.fingerprints = list.toArray(new byte[0][]); } - private static byte[][] toFingerprintArray(Iterable<String> fingerprints) { + static byte[][] toFingerprintArray(Iterable<String> fingerprints) { ObjectUtil.checkNotNull(fingerprints, "fingerprints"); List<byte[]> list = new ArrayList<byte[]>(); @@ -187,9 +247,6 @@ private static byte[][] toFingerprintArray(Iterable<String> fingerprints) { throw new IllegalArgumentException("malformed fingerprint: " + f); } f = FINGERPRINT_STRIP_PATTERN.matcher(f).replaceAll(""); - if (f.length() != SHA1_HEX_LEN) { - throw new IllegalArgumentException("malformed fingerprint: " + f + " (expected: SHA1)"); - } list.add(StringUtil.decodeHexDump(f)); } diff --git a/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryBuilder.java b/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryBuilder.java new file mode 100644 index 00000000000..38182661d8a --- /dev/null +++ b/handler/src/main/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryBuilder.java @@ -0,0 +1,89 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.ssl.util; + +import io.netty.util.internal.ObjectUtil; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * A builder for creating {@link FingerprintTrustManagerFactory}. + */ +public final class FingerprintTrustManagerFactoryBuilder { + + /** + * A hash algorithm for fingerprints. + */ + private final String algorithm; + + /** + * A list of fingerprints. + */ + private final List<String> fingerprints = new ArrayList<String>(); + + /** + * Creates a builder. + * + * @param algorithm a hash algorithm + */ + FingerprintTrustManagerFactoryBuilder(String algorithm) { + this.algorithm = ObjectUtil.checkNotNull(algorithm, "algorithm"); + } + + /** + * Adds fingerprints. + * + * @param fingerprints a number of fingerprints + * @return the same builder + */ + public FingerprintTrustManagerFactoryBuilder fingerprints(CharSequence... fingerprints) { + ObjectUtil.checkNotNull(fingerprints, "fingerprints"); + return fingerprints(Arrays.asList(fingerprints)); + } + + /** + * Adds fingerprints. + * + * @param fingerprints a number of fingerprints + * @return the same builder + */ + public FingerprintTrustManagerFactoryBuilder fingerprints(Iterable<? extends CharSequence> fingerprints) { + ObjectUtil.checkNotNull(fingerprints, "fingerprints"); + for (CharSequence fingerprint : fingerprints) { + if (fingerprint == null) { + throw new IllegalArgumentException("One of the fingerprints is null"); + } + this.fingerprints.add(fingerprint.toString()); + } + return this; + } + + /** + * Creates a {@link FingerprintTrustManagerFactory}. + * + * @return a new {@link FingerprintTrustManagerFactory} + */ + public FingerprintTrustManagerFactory build() { + if (fingerprints.isEmpty()) { + throw new IllegalStateException("No fingerprints provided"); + } + return new FingerprintTrustManagerFactory(this.algorithm, + FingerprintTrustManagerFactory.toFingerprintArray(this.fingerprints)); + } +}
diff --git a/handler/src/test/java/io/netty/handler/ssl/Java8SslTestUtils.java b/handler/src/test/java/io/netty/handler/ssl/Java8SslTestUtils.java index 50fb935d9dc..dc3a2774bfe 100644 --- a/handler/src/test/java/io/netty/handler/ssl/Java8SslTestUtils.java +++ b/handler/src/test/java/io/netty/handler/ssl/Java8SslTestUtils.java @@ -22,11 +22,16 @@ import javax.net.ssl.SNIServerName; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; +import java.io.InputStream; import java.security.Provider; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; import java.util.Arrays; import java.util.Collections; -final class Java8SslTestUtils { +import static org.junit.Assert.assertNotNull; + +public final class Java8SslTestUtils { private Java8SslTestUtils() { } @@ -53,4 +58,27 @@ static SSLEngine wrapSSLEngineForTesting(SSLEngine engine) { } return engine; } + + public static X509Certificate[] loadCertCollection(String... resourceNames) + throws Exception { + CertificateFactory certFactory = CertificateFactory + .getInstance("X.509"); + + X509Certificate[] certCollection = new X509Certificate[resourceNames.length]; + for (int i = 0; i < resourceNames.length; i++) { + String resourceName = resourceNames[i]; + InputStream is = null; + try { + is = SslContextTest.class.getResourceAsStream(resourceName); + assertNotNull("Cannot find " + resourceName, is); + certCollection[i] = (X509Certificate) certFactory + .generateCertificate(is); + } finally { + if (is != null) { + is.close(); + } + } + } + return certCollection; + } } diff --git a/handler/src/test/java/io/netty/handler/ssl/SslContextTrustManagerTest.java b/handler/src/test/java/io/netty/handler/ssl/SslContextTrustManagerTest.java index 04b6a8e08b4..6f209e0757b 100644 --- a/handler/src/test/java/io/netty/handler/ssl/SslContextTrustManagerTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/SslContextTrustManagerTest.java @@ -15,20 +15,18 @@ */ package io.netty.handler.ssl; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertNotNull; - -import java.io.InputStream; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; -import java.security.cert.X509Certificate; -import java.util.Arrays; +import org.junit.Test; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Arrays; -import org.junit.Test; +import static io.netty.handler.ssl.Java8SslTestUtils.loadCertCollection; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertNotNull; public class SslContextTrustManagerTest { @Test @@ -121,27 +119,4 @@ private static X509TrustManager getTrustManager(String[] resourceNames) throw new Exception( "Unable to find any X509TrustManager from this factory."); } - - private static X509Certificate[] loadCertCollection(String[] resourceNames) - throws Exception { - CertificateFactory certFactory = CertificateFactory - .getInstance("X.509"); - - X509Certificate[] certCollection = new X509Certificate[resourceNames.length]; - for (int i = 0; i < resourceNames.length; i++) { - String resourceName = resourceNames[i]; - InputStream is = null; - try { - is = SslContextTest.class.getResourceAsStream(resourceName); - assertNotNull("Cannot find " + resourceName, is); - certCollection[i] = (X509Certificate) certFactory - .generateCertificate(is); - } finally { - if (is != null) { - is.close(); - } - } - } - return certCollection; - } } diff --git a/handler/src/test/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryTest.java b/handler/src/test/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryTest.java new file mode 100644 index 00000000000..12c0d894deb --- /dev/null +++ b/handler/src/test/java/io/netty/handler/ssl/util/FingerprintTrustManagerFactoryTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.ssl.util; + +import org.junit.Test; + +import javax.net.ssl.X509TrustManager; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; + +import static io.netty.handler.ssl.Java8SslTestUtils.loadCertCollection; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class FingerprintTrustManagerFactoryTest { + + private static final String FIRST_CERT_SHA1_FINGERPRINT + = "18:C7:C2:76:1F:DF:72:3B:2A:A7:BB:2C:B0:30:D4:C0:C0:72:AD:84"; + + private static final String FIRST_CERT_SHA256_FINGERPRINT + = "1C:53:0E:6B:FF:93:F0:DE:C2:E6:E7:9D:10:53:58:FF:" + + "DD:8E:68:CD:82:D9:C9:36:9B:43:EE:B3:DC:13:68:FB"; + + private static final X509Certificate[] FIRST_CHAIN; + + private static final X509Certificate[] SECOND_CHAIN; + + static { + try { + FIRST_CHAIN = loadCertCollection("test.crt"); + SECOND_CHAIN = loadCertCollection("test2.crt"); + } catch (Exception e) { + throw new Error(e); + } + } + + @Test(expected = IllegalArgumentException.class) + public void testFingerprintWithInvalidLength() { + FingerprintTrustManagerFactory.builder("SHA-256").fingerprints("00:00:00").build(); + } + + @Test(expected = IllegalArgumentException.class) + public void testFingerprintWithUnexpectedCharacters() { + FingerprintTrustManagerFactory.builder("SHA-256").fingerprints("00:00:00\n").build(); + } + + @Test(expected = IllegalStateException.class) + public void testWithNoFingerprints() { + FingerprintTrustManagerFactory.builder("SHA-256").build(); + } + + @Test(expected = IllegalArgumentException.class) + public void testWithNullFingerprint() { + FingerprintTrustManagerFactory + .builder("SHA-256") + .fingerprints(FIRST_CERT_SHA256_FINGERPRINT, null) + .build(); + } + + @Test + public void testValidSHA1Fingerprint() throws Exception { + FingerprintTrustManagerFactory factory = new FingerprintTrustManagerFactory(FIRST_CERT_SHA1_FINGERPRINT); + + assertTrue(factory.engineGetTrustManagers().length > 0); + assertTrue(factory.engineGetTrustManagers()[0] instanceof X509TrustManager); + X509TrustManager tm = (X509TrustManager) factory.engineGetTrustManagers()[0]; + tm.checkClientTrusted(FIRST_CHAIN, "test"); + } + + @Test + public void testTrustedCertificateWithSHA256Fingerprint() throws Exception { + FingerprintTrustManagerFactory factory = FingerprintTrustManagerFactory + .builder("SHA-256") + .fingerprints(FIRST_CERT_SHA256_FINGERPRINT) + .build(); + + X509Certificate[] keyCertChain = loadCertCollection("test.crt"); + assertNotNull(keyCertChain); + assertTrue(factory.engineGetTrustManagers().length > 0); + assertTrue(factory.engineGetTrustManagers()[0] instanceof X509TrustManager); + X509TrustManager tm = (X509TrustManager) factory.engineGetTrustManagers()[0]; + tm.checkClientTrusted(keyCertChain, "test"); + } + + @Test(expected = CertificateException.class) + public void testUntrustedCertificateWithSHA256Fingerprint() throws Exception { + FingerprintTrustManagerFactory factory = FingerprintTrustManagerFactory + .builder("SHA-256") + .fingerprints(FIRST_CERT_SHA256_FINGERPRINT) + .build(); + + assertTrue(factory.engineGetTrustManagers().length > 0); + assertTrue(factory.engineGetTrustManagers()[0] instanceof X509TrustManager); + X509TrustManager tm = (X509TrustManager) factory.engineGetTrustManagers()[0]; + tm.checkClientTrusted(SECOND_CHAIN, "test"); + } + +}
val
test
"2020-10-17T09:49:44"
"2020-10-07T09:54:31Z"
artem-smotrakov
val
netty/netty/10731_10737
netty/netty
netty/netty/10731
netty/netty/10737
[ "keyword_pr_to_issue" ]
9da336f2fc4d5e7aa761191c3eb5edff334c03c7
9a02832fdb10afbc09f144a462d604176fb11049
[ "@normanmaurer While having fun implementing this I've found something odd by adding this test to `AbstractByteBufTest`:\r\n```java\r\n @Test\r\n public void testEndiannessIndexOf() {\r\n buffer.clear();\r\n final int v = 1\r\n | (2 << 8)\r\n | (3 << 16)\r\n | (2 << 24);\r\n buffer.writeIntLE(v);\r\n buffer.writeByte((byte) 1);\r\n\r\n assertEquals(-1, buffer.indexOf(1, 4, (byte) 1));\r\n assertEquals(-1, buffer.indexOf(4, 1, (byte) 1));\r\n assertEquals(1, buffer.indexOf(1, 4, (byte) 2));\r\n assertEquals(3, buffer.indexOf(4, 1, (byte) 2));\r\n }\r\n```\r\nSee the test results:\r\n\r\n![image](https://user-images.githubusercontent.com/13125299/97137221-99819f00-1755-11eb-89e7-4755c1c8feef.png)\r\n\r\nI don't think is expected...or I'm not correctly using the `writeXXXLE` methods?\r\n\r\nI can open a separate issue for this if needed\r\n\r\nNOTE: this test is failing with the current `indexOf` implementation, not the new one!", "Possible to do Draft PR?", "@hyperxpro \r\nAlready done on a private branch: need first to understand what's the problem with LE/BE order first!", "@franz1981 I don't get any test failures when I run all the `AbstractByteBufTest`. Both `master` and `4.1` branches pass. Maybe you have some local changes somewhere.", "@chrisvest you should add the test I've linked on https://github.com/netty/netty/issues/10731#issuecomment-716315268", "Nice article. Maybe @richardstartin would be interested, since he mentioned Netty specifically." ]
[ "you can do this as long pattern = (byteToFind & 0xFFL) * 0x101010101010101L", "Since you're doing a little endian check elsewhere, I would consider making little and big endian variants of this method, just because it makes it harder to mix and match", "This is 2KB, but if you change this to the multiplication mentioned earlier, I don't see benefit from caching", "Currently this one isn't used yet, but `firstAnyPattern` instead, but I got the good idea, I will do it for both, :+1: ", "That's better, it's now very cheap, great", "Will do it right after breakfast :P \r\n", "This method isn't used.", "Did you also try a small counted loop?", "I wonder if this class would make sense as a top-level ByteSearch class, and adopt more methods.", "Why would `fromIndex` ever be negative?", "I suppose the first `linearFirstIndexOf` call for platforms that don't support unaligned access is because we have no idea what the underlying alignment of the buffer is. But do we need this unrolled call on platforms that do support unaligned access?", "Don't we need to handle a tail? Or is that what the initial unrolled call is for?", "Exactly!", "Yep, it won't improve things for unpredictable inputs (that makes sense) but seems to help a lot the other cases", "Copied from the original implementation, suppose can be removed now", "Yes yes and yes :)", "Yess with byte indexes too", "Yep can be removed given that it won't improve things as expected" ]
"2020-10-28T08:09:27Z"
[ "improvement", "discussion" ]
Implement SWAR indexOf byte search
Currently Netty uses an expensive (GC-wise too) `ByteBufUtil::indexOf` implementation based on `ByteProcessor.IndexOfProcessor` to save bound-checks to happen: this could be changed into what @richardstartin has implemented on https://richardstartin.github.io/posts/finding-bytes. Same could be applied to `ByteBuf::indexOf` that's *not* using `ByteBufUtil::indexOf`(!!!) @normanmaurer Any idea why?
[ "buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java", "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java" ]
[ "buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java", "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java", "microbench/src/main/java/io/netty/microbench/buffer/ByteBufIndexOfBenchmark.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java index e7b514b7dd7..c13efdd0046 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java @@ -1251,43 +1251,9 @@ public String toString(int index, int length, Charset charset) { @Override public int indexOf(int fromIndex, int toIndex, byte value) { if (fromIndex <= toIndex) { - return firstIndexOf(fromIndex, toIndex, value); - } else { - return lastIndexOf(fromIndex, toIndex, value); - } - } - - private int firstIndexOf(int fromIndex, int toIndex, byte value) { - fromIndex = Math.max(fromIndex, 0); - if (fromIndex >= toIndex || capacity() == 0) { - return -1; - } - checkIndex(fromIndex, toIndex - fromIndex); - - for (int i = fromIndex; i < toIndex; i ++) { - if (_getByte(i) == value) { - return i; - } - } - - return -1; - } - - private int lastIndexOf(int fromIndex, int toIndex, byte value) { - fromIndex = Math.min(fromIndex, capacity()); - if (fromIndex < 0 || capacity() == 0) { - return -1; - } - - checkIndex(toIndex, fromIndex - toIndex); - - for (int i = fromIndex - 1; i >= toIndex; i --) { - if (_getByte(i) == value) { - return i; - } + return ByteBufUtil.firstIndexOf(this, fromIndex, toIndex, value); } - - return -1; + return ByteBufUtil.lastIndexOf(this, fromIndex, toIndex, value); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java index f8ca525b761..118f3a009a9 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java @@ -389,16 +389,124 @@ private static long compareUintBigEndianB( return 0; } + private static final class SWARByteSearch { + + private static long compilePattern(byte byteToFind) { + return (byteToFind & 0xFFL) * 0x101010101010101L; + } + + private static int firstAnyPattern(long word, long pattern, boolean leading) { + long input = word ^ pattern; + long tmp = (input & 0x7F7F7F7F7F7F7F7FL) + 0x7F7F7F7F7F7F7F7FL; + tmp = ~(tmp | input | 0x7F7F7F7F7F7F7F7FL); + final int binaryPosition = leading? Long.numberOfLeadingZeros(tmp) : Long.numberOfTrailingZeros(tmp); + return binaryPosition >>> 3; + } + } + + private static int unrolledFirstIndexOf(AbstractByteBuf buffer, int fromIndex, int byteCount, byte value) { + assert byteCount > 0 && byteCount < 8; + if (buffer._getByte(fromIndex) == value) { + return fromIndex; + } + if (byteCount == 1) { + return -1; + } + if (buffer._getByte(fromIndex + 1) == value) { + return fromIndex + 1; + } + if (byteCount == 2) { + return -1; + } + if (buffer._getByte(fromIndex + 2) == value) { + return fromIndex + 2; + } + if (byteCount == 3) { + return -1; + } + if (buffer._getByte(fromIndex + 3) == value) { + return fromIndex + 3; + } + if (byteCount == 4) { + return -1; + } + if (buffer._getByte(fromIndex + 4) == value) { + return fromIndex + 4; + } + if (byteCount == 5) { + return -1; + } + if (buffer._getByte(fromIndex + 5) == value) { + return fromIndex + 5; + } + if (byteCount == 6) { + return -1; + } + if (buffer._getByte(fromIndex + 6) == value) { + return fromIndex + 6; + } + return -1; + } + + /** + * This is using a SWAR (SIMD Within A Register) batch read technique to minimize bound-checks and improve memory + * usage while searching for {@code value}. + */ + static int firstIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + fromIndex = Math.max(fromIndex, 0); + if (fromIndex >= toIndex || buffer.capacity() == 0) { + return -1; + } + final int length = toIndex - fromIndex; + buffer.checkIndex(fromIndex, length); + if (!PlatformDependent.isUnaligned()) { + return linearFirstIndexOf(buffer, fromIndex, toIndex, value); + } + assert PlatformDependent.isUnaligned(); + int offset = fromIndex; + final int byteCount = length & 7; + if (byteCount > 0) { + final int index = unrolledFirstIndexOf(buffer, fromIndex, byteCount, value); + if (index != -1) { + return index; + } + offset += byteCount; + if (offset == toIndex) { + return -1; + } + } + final int longCount = length >>> 3; + final ByteOrder nativeOrder = ByteOrder.nativeOrder(); + final boolean isNative = nativeOrder == buffer.order(); + final boolean useLE = nativeOrder == ByteOrder.LITTLE_ENDIAN; + final long pattern = SWARByteSearch.compilePattern(value); + for (int i = 0; i < longCount; i++) { + // use the faster available getLong + final long word = useLE? buffer._getLongLE(offset) : buffer._getLong(offset); + int index = SWARByteSearch.firstAnyPattern(word, pattern, isNative); + if (index < Long.BYTES) { + return offset + index; + } + offset += Long.BYTES; + } + return -1; + } + + private static int linearFirstIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + for (int i = fromIndex; i < toIndex; i++) { + if (buffer._getByte(i) == value) { + return i; + } + } + return -1; + } + /** * The default implementation of {@link ByteBuf#indexOf(int, int, byte)}. * This method is useful when implementing a new buffer type. */ public static int indexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - if (fromIndex <= toIndex) { - return firstIndexOf(buffer, fromIndex, toIndex, value); - } else { - return lastIndexOf(buffer, fromIndex, toIndex, value); - } + return buffer.indexOf(fromIndex, toIndex, value); } /** @@ -477,23 +585,21 @@ public static ByteBuf readBytes(ByteBufAllocator alloc, ByteBuf buffer, int leng } } - private static int firstIndexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - fromIndex = Math.max(fromIndex, 0); - if (fromIndex >= toIndex || buffer.capacity() == 0) { - return -1; - } - - return buffer.forEachByte(fromIndex, toIndex - fromIndex, new ByteProcessor.IndexOfProcessor(value)); - } - - private static int lastIndexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - int capacity = buffer.capacity(); + static int lastIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + assert fromIndex > toIndex; + final int capacity = buffer.capacity(); fromIndex = Math.min(fromIndex, capacity); if (fromIndex < 0 || capacity == 0) { return -1; } + buffer.checkIndex(toIndex, fromIndex - toIndex); + for (int i = fromIndex - 1; i >= toIndex; i--) { + if (buffer._getByte(i) == value) { + return i; + } + } - return buffer.forEachByteDesc(toIndex, fromIndex - toIndex, new ByteProcessor.IndexOfProcessor(value)); + return -1; } private static CharSequence checkCharSequenceBounds(CharSequence seq, int start, int end) { diff --git a/microbench/src/main/java/io/netty/microbench/buffer/ByteBufIndexOfBenchmark.java b/microbench/src/main/java/io/netty/microbench/buffer/ByteBufIndexOfBenchmark.java new file mode 100644 index 00000000000..dcb6362f693 --- /dev/null +++ b/microbench/src/main/java/io/netty/microbench/buffer/ByteBufIndexOfBenchmark.java @@ -0,0 +1,114 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.microbench.buffer; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.microbench.util.AbstractMicrobenchmark; +import io.netty.util.internal.SuppressJava6Requirement; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +@State(Scope.Benchmark) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@Fork(2) +@Warmup(iterations = 5, time = 1) +@Measurement(iterations = 8, time = 1) +public class ByteBufIndexOfBenchmark extends AbstractMicrobenchmark { + + @Param({ "7", "16", "23", "32" }) + int size; + + @Param({ "4", "11" }) + int logPermutations; + + @Param({ "1" }) + int seed; + + int permutations; + + ByteBuf[] data; + private int i; + + @Param({ "0" }) + private byte needleByte; + + @Param({ "true", "false" }) + private boolean direct; + @Param({ "false", "true" }) + private boolean noUnsafe; + + @Param({ "false", "true" }) + private boolean pooled; + + @Setup(Level.Trial) + @SuppressJava6Requirement(reason = "using SplittableRandom to reliably produce data") + public void init() { + System.setProperty("io.netty.noUnsafe", Boolean.valueOf(noUnsafe).toString()); + SplittableRandom random = new SplittableRandom(seed); + permutations = 1 << logPermutations; + this.data = new ByteBuf[permutations]; + final ByteBufAllocator allocator = pooled? PooledByteBufAllocator.DEFAULT : UnpooledByteBufAllocator.DEFAULT; + for (int i = 0; i < permutations; ++i) { + data[i] = direct? allocator.directBuffer(size, size) : allocator.heapBuffer(size, size); + for (int j = 0; j < size; j++) { + int value = random.nextInt(Byte.MIN_VALUE, Byte.MAX_VALUE + 1); + // turn any found value into something different + if (value == needleByte) { + if (needleByte != 1) { + value = 1; + } else { + value = 0; + } + } + data[i].setByte(j, value); + } + final int foundIndex = random.nextInt(Math.max(0, size - 8), size); + data[i].setByte(foundIndex, needleByte); + } + } + + private ByteBuf getData() { + return data[i++ & (permutations - 1)]; + } + + @Benchmark + public int indexOf() { + return getData().indexOf(0, size, needleByte); + } + + @TearDown + public void releaseBuffers() { + for (ByteBuf buffer : data) { + buffer.release(); + } + } + +}
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java index e68bac828c3..0c1bc7fa3c3 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java @@ -2122,6 +2122,36 @@ public void run() { } } + @Test + public void testSWARIndexOf() { + ByteBuf buffer = newBuffer(16); + buffer.clear(); + // Ensure the buffer is completely zero'ed. + buffer.setZero(0, buffer.capacity()); + buffer.writeByte((byte) 0); // 0 + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); // 7 + + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 1); // 11 + buffer.writeByte((byte) 2); + buffer.writeByte((byte) 3); + buffer.writeByte((byte) 4); + buffer.writeByte((byte) 1); + assertEquals(11, buffer.indexOf(0, 12, (byte) 1)); + assertEquals(12, buffer.indexOf(0, 16, (byte) 2)); + assertEquals(-1, buffer.indexOf(0, 11, (byte) 1)); + assertEquals(11, buffer.indexOf(0, 16, (byte) 1)); + buffer.release(); + } + @Test public void testIndexOf() { buffer.clear();
train
test
"2020-12-23T09:32:04"
"2020-10-26T05:34:23Z"
franz1981
val
netty/netty/10748_10765
netty/netty
netty/netty/10748
netty/netty/10765
[ "keyword_pr_to_issue" ]
9da336f2fc4d5e7aa761191c3eb5edff334c03c7
08681a817d66e5a1a443ccb2b2b0e3d4a18d474b
[ "I think we need to add a custom frame to handle Push Promise Read.\r\n\r\n@normanmaurer WDYT?", "Maybe something like this.\r\n\r\n```java\r\npublic class PushPromiseRead implements Http2StreamFrame {\r\n\r\n private Http2FrameStream stream;\r\n private final int promisedStreamId;\r\n private final Http2Headers headers;\r\n private final int padding;\r\n\r\n public PushPromiseRead(Http2FrameStream stream, int promisedStreamId, Http2Headers headers, int padding) {\r\n this.stream = stream;\r\n this.promisedStreamId = promisedStreamId;\r\n this.headers = headers;\r\n this.padding = padding;\r\n }\r\n\r\n @Override\r\n public Http2StreamFrame stream(Http2FrameStream stream) {\r\n this.stream = stream;\r\n return this;\r\n }\r\n\r\n @Override\r\n public Http2FrameStream stream() {\r\n return stream;\r\n }\r\n\r\n public int getPromisedStreamId() {\r\n return promisedStreamId;\r\n }\r\n\r\n public Http2Headers getHeaders() {\r\n return headers;\r\n }\r\n\r\n public int getPadding() {\r\n return padding;\r\n }\r\n\r\n @Override\r\n public String name() {\r\n return \"PUSH_PROMISE_READ\";\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return \"PushPromiseRead{\" +\r\n \"streamId=\" + stream.id() +\r\n \", promisedStreamId=\" + promisedStreamId +\r\n \", headers=\" + headers +\r\n \", padding=\" + padding +\r\n '}';\r\n }\r\n}\r\n\r\n```", "Can I help on this?", "Yes, but first we need to figure out the best way to notify Handler about Push Promise Read." ]
[ "Why `2`?", "It looks like we return the newly created stream id in two places: as the return value and as the newly populated `http2FrameStream.id` field. What do you think about making the return value of this method `void` and using the latter when it's necessary? To me, that avoids the ambiguity up higher as to whether we should use `streamId` returned by this method or `pushPromiseFrame.pushStream().id()` (or the parallel in the `writeHeaders` method).", "ffti: it's easier to read these branches if you put the simple case first and you get an added bonus of being able to drop the boolean not operator from the predicate.", "Would it be easier to just add the tests to `Http2FrameCodecTest.java`? It already has a well developed testing scaffold.", "In general, it's not a great idea to hard code ports for tests since you can't be sure they're available to you in all environments.", "Just wanted to give Server and Client dedicated threads.", "I tried but `Http2FrameCodecTest.java` was already big and complex and adding this made it 2x complex. So decided to move it into a separate test class.", "Alright, will fix this.", "Will fix this.", "@hyperxpro why is this public ?", "missing return. ", "Also this should return `boolean` so when using this you can detect when it failed and when not to early return.", "like said below this should return a boolean and you should return early when the method failed.", "like said below this should return a boolean and you should return early when the method failed.", "Some use case needs Stream ID to be generated early before sending HeadersFrame. That's why I made it public. If this approach is not good then I will change it back to package-private.\r\n\r\nMy use case:\r\n```java\r\nHttp2FrameCodec.DefaultHttp2FrameStream http2FrameStream = (Http2FrameCodec.DefaultHttp2FrameStream) newStream();\r\nframeCodec.initializeNewStream(ctx, http2FrameStream, promise);\r\nlong id = httpFrame.id();\r\n\r\n// Put the stream ID and Outbound Property into the map.\r\naddStream(new OutboundProperty(id, http2FrameStream, httpFrame.protocol()));\r\n```", "Sorry, I didn't get it completely. PTAL at latest commit and suggest.", "just merge these two lines ", "just merge the two lines ", "So I strongly believe this should not be public... If the user needs this method we need to find a better way of exposing this ", "Seeing a sleep here seems like there needs to be something else that needs to be fixed ", "This should have no `get` prefix... Also it missed javadocs. Beside this I wonder why this is not part of the interface ?", "never call `sync()` in the eventloop. Remove this call and also remove the `throws...`", "nit: you could even merge this with the else and make it `if else`", "nit: you could even merge this with the else and make it `if else`", "Then I get NPE if I don't block it with something. That's why I had `Thread#sleep`.", "I think we need to solve this in a better way... Maybe we should at least expose this via the `Http2ChannelDuplexHandler`", "show me the stack trace ", "Tried adding a listener. PTAL.", "This method should not block... You need to not call `sync()` in it when you call it in a handler ", "Done", "final...\r\n", "Add `@UnstableApi`", "final\r\n\r\nAdd `@UnstableApi`", "@hyperxpro ^^", "Add `@UnstableApi`", "Add `@UnstableApi`", "Remove `throws InterruptedException` and also remove the `catch` for it above", "consider removing all these release and just add a big `try {... } finally { ReferenceCountUtil.release(msg); }`", "This interface should add an override like:\r\n\r\n```\r\n@Override\r\nHttp2PriorityFrame stream(Http2FrameStream stream) {\r\n```", "This interface should add an override like:\r\n\r\n```\r\n@Override\r\nHttp2PushPromiseFrame stream(Http2FrameStream stream) {\r\n```", "Done, PTAL.", "Is this override needed ? Seems like it returns the same type as the parent method ", "Is this override needed ? Seems like it returns the same type as the parent method", "Never call sync in the eventloop", "s/Http2StreamFrame/Http2PushPromiseFrame/", "s/Http2StreamFrame/Http2PriorityFrame/", "Sorry, I didn't get it. Do you want me to rename `Http2StreamFrame stream(Http2FrameStream stream);` to ` Http2PriorityFrame stream(Http2PriorityFrame stream);`?", "```suggestion\r\n Http2PushPromiseFrame stream(Http2FrameStream stream);\r\n```", "```suggestion\r\n Http2PriorityFrame stream(Http2FrameStream stream);\r\n```", "remove 4 spaces in all lines in the if block... you use 8 spaces while we use 4 everywhere", "remove 4 spaces in all lines in the if block... you use 8 spaces while we use 4 everywhere", "Honestly I think this should be done in a `ChannelFutureListener` so you are sure the write was actually done and so the id was set... Basically move this line and everything below in a a `ChannelFutureListener` that is added to the headers write." ]
"2020-11-02T18:58:17Z"
[]
Http2FrameCodec#onPushPromiseRead Support
### Expected behavior `Http2FrameCodec#onPushPromiseRead` should be supported. ### Actual behavior `Http2FrameCodec#onPushPromiseRead` is not yet supported. We cannot handle Push Promise Read. The stream is completely ignored. https://github.com/netty/netty/blob/e9b28e76a366dadaf6da4375c503f670ec3e3ae4/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java#L626 ### Steps to reproduce Read push promise from HTTP/2 Server, ### Netty version 4.1.53.Final ### JVM version (e.g. `java -version`) Java 11 ### OS version (e.g. `uname -a`) Windows 10 Pro x64
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java" ]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java", "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java", "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java", "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PriorityFrame.java", "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PushPromiseFrame.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrameTest.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java new file mode 100644 index 00000000000..e131936e242 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * Default implementation of {@linkplain Http2PriorityFrame} + */ +@UnstableApi +public final class DefaultHttp2PriorityFrame implements Http2PriorityFrame { + + private final int streamDependency; + private final short weight; + private final boolean exclusive; + private Http2FrameStream http2FrameStream; + + public DefaultHttp2PriorityFrame(int streamDependency, short weight, boolean exclusive) { + this.streamDependency = streamDependency; + this.weight = weight; + this.exclusive = exclusive; + } + + @Override + public int streamDependency() { + return streamDependency; + } + + @Override + public short weight() { + return weight; + } + + @Override + public boolean exclusive() { + return exclusive; + } + + @Override + public Http2PriorityFrame stream(Http2FrameStream stream) { + http2FrameStream = stream; + return this; + } + + @Override + public Http2FrameStream stream() { + return http2FrameStream; + } + + @Override + public String name() { + return "PRIORITY_FRAME"; + } + + @Override + public String toString() { + return "DefaultHttp2PriorityFrame(" + + "stream=" + http2FrameStream + + ", streamDependency=" + streamDependency + + ", weight=" + weight + + ", exclusive=" + exclusive + + ')'; + } +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java new file mode 100644 index 00000000000..f9fd9871093 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java @@ -0,0 +1,101 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * Default implementation of {@link Http2PushPromiseFrame} + */ +@UnstableApi +public final class DefaultHttp2PushPromiseFrame implements Http2PushPromiseFrame { + + private Http2FrameStream pushStreamFrame; + private final Http2Headers http2Headers; + private Http2FrameStream streamFrame; + private final int padding; + private final int promisedStreamId; + + public DefaultHttp2PushPromiseFrame(Http2Headers http2Headers) { + this(http2Headers, 0); + } + + public DefaultHttp2PushPromiseFrame(Http2Headers http2Headers, int padding) { + this(http2Headers, padding, -1); + } + + DefaultHttp2PushPromiseFrame(Http2Headers http2Headers, int padding, int promisedStreamId) { + this.http2Headers = http2Headers; + this.padding = padding; + this.promisedStreamId = promisedStreamId; + } + + @Override + public Http2StreamFrame pushStream(Http2FrameStream stream) { + pushStreamFrame = stream; + return this; + } + + @Override + public Http2FrameStream pushStream() { + return pushStreamFrame; + } + + @Override + public Http2Headers http2Headers() { + return http2Headers; + } + + @Override + public int padding() { + return padding; + } + + @Override + public int promisedStreamId() { + if (pushStreamFrame != null) { + return pushStreamFrame.id(); + } else { + return promisedStreamId; + } + } + + @Override + public Http2PushPromiseFrame stream(Http2FrameStream stream) { + streamFrame = stream; + return this; + } + + @Override + public Http2FrameStream stream() { + return streamFrame; + } + + @Override + public String name() { + return "PUSH_PROMISE_FRAME"; + } + + @Override + public String toString() { + return "DefaultHttp2PushPromiseFrame{" + + "pushStreamFrame=" + pushStreamFrame + + ", http2Headers=" + http2Headers + + ", streamFrame=" + streamFrame + + ", padding=" + padding + + '}'; + } +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java index 7265a4efcc5..1664b58f481 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodec.java @@ -55,7 +55,7 @@ * creating outbound streams. * * <h3>Stream Lifecycle</h3> - * + * <p> * The frame codec delivers and writes frames for active streams. An active stream is closed when either side sends a * {@code RST_STREAM} frame or both sides send a frame with the {@code END_STREAM} flag set. Each * {@link Http2StreamFrame} has a {@link Http2FrameStream} object attached that uniquely identifies a particular stream. @@ -65,7 +65,7 @@ * {@link Http2StreamFrame#stream(Http2FrameStream)}. * * <h3>Flow control</h3> - * + * <p> * The frame codec automatically increments stream and connection flow control windows. * * <p>Incoming flow controlled frames need to be consumed by writing a {@link Http2WindowUpdateFrame} with the consumed @@ -79,12 +79,12 @@ * connection-level flow control window is the same as initial stream-level flow control window. * * <h3>New inbound Streams</h3> - * + * <p> * The first frame of an HTTP/2 stream must be an {@link Http2HeadersFrame}, which will have an {@link Http2FrameStream} * object attached. * * <h3>New outbound Streams</h3> - * + * <p> * A outbound HTTP/2 stream can be created by first instantiating a new {@link Http2FrameStream} object via * {@link Http2ChannelDuplexHandler#newStream()}, and then writing a {@link Http2HeadersFrame} object with the stream * attached. @@ -126,13 +126,13 @@ * the last stream identifier of the GO_AWAY frame will fail with a {@link Http2GoAwayException}. * * <h3>Error Handling</h3> - * + * <p> * Exceptions and errors are propagated via {@link ChannelInboundHandler#exceptionCaught}. Exceptions that apply to * a specific HTTP/2 stream are wrapped in a {@link Http2FrameStreamException} and have the corresponding * {@link Http2FrameStream} object attached. * * <h3>Reference Counting</h3> - * + * <p> * Some {@link Http2StreamFrame}s implement the {@link ReferenceCounted} interface, as they carry * reference counted objects (e.g. {@link ByteBuf}s). The frame codec will call {@link ReferenceCounted#retain()} before * propagating a reference counted object through the pipeline, and thus an application handler needs to release such @@ -140,7 +140,7 @@ * https://netty.io/wiki/reference-counted-objects.html * * <h3>HTTP Upgrade</h3> - * + * <p> * Server-side HTTP to HTTP/2 upgrade is supported in conjunction with {@link Http2ServerUpgradeCodec}; the necessary * HTTP-to-HTTP/2 conversion is performed automatically. */ @@ -156,7 +156,9 @@ public class Http2FrameCodec extends Http2ConnectionHandler { ChannelHandlerContext ctx; - /** Number of buffered streams if the {@link StreamBufferingEncoder} is used. **/ + /** + * Number of buffered streams if the {@link StreamBufferingEncoder} is used. + **/ private int numBufferedStreams; private final IntObjectMap<DefaultHttp2FrameStream> frameStreamToInitializeMap = new IntObjectHashMap<DefaultHttp2FrameStream>(8); @@ -204,7 +206,7 @@ public boolean visit(Http2Stream stream) { /** * Retrieve the number of streams currently in the process of being initialized. - * + * <p> * This is package-private for testing only. */ int numInitializingStreams() { @@ -337,6 +339,13 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) encoder().writeSettingsAck(ctx, promise); } else if (msg instanceof Http2GoAwayFrame) { writeGoAwayFrame(ctx, (Http2GoAwayFrame) msg, promise); + } else if (msg instanceof Http2PushPromiseFrame) { + Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg; + writePushPromise(ctx, pushPromiseFrame, promise); + } else if (msg instanceof Http2PriorityFrame) { + Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg; + encoder().writePriority(ctx, priorityFrame.stream().id(), priorityFrame.streamDependency(), + priorityFrame.weight(), priorityFrame.exclusive(), promise); } else if (msg instanceof Http2UnknownFrame) { Http2UnknownFrame unknownFrame = (Http2UnknownFrame) msg; encoder().writeFrame(ctx, unknownFrame.frameType(), unknownFrame.stream().id(), @@ -383,37 +392,14 @@ private void writeGoAwayFrame(ChannelHandlerContext ctx, Http2GoAwayFrame frame, goAway(ctx, (int) lastStreamId, frame.errorCode(), frame.content(), promise); } - private void writeHeadersFrame( - final ChannelHandlerContext ctx, Http2HeadersFrame headersFrame, final ChannelPromise promise) { + private void writeHeadersFrame(final ChannelHandlerContext ctx, Http2HeadersFrame headersFrame, + final ChannelPromise promise) { if (isStreamIdValid(headersFrame.stream().id())) { encoder().writeHeaders(ctx, headersFrame.stream().id(), headersFrame.headers(), headersFrame.padding(), headersFrame.isEndStream(), promise); - } else { - final DefaultHttp2FrameStream stream = (DefaultHttp2FrameStream) headersFrame.stream(); - final Http2Connection connection = connection(); - final int streamId = connection.local().incrementAndGetNextStreamId(); - if (streamId < 0) { - promise.setFailure(new Http2NoMoreStreamIdsException()); - - // Simulate a GOAWAY being received due to stream exhaustion on this connection. We use the maximum - // valid stream ID for the current peer. - onHttp2Frame(ctx, new DefaultHttp2GoAwayFrame(connection.isServer() ? Integer.MAX_VALUE : - Integer.MAX_VALUE - 1, NO_ERROR.code(), - writeAscii(ctx.alloc(), "Stream IDs exhausted on local stream creation"))); - return; - } - stream.id = streamId; - - // Use a Map to store all pending streams as we may have multiple. This is needed as if we would store the - // stream in a field directly we may override the stored field before onStreamAdded(...) was called - // and so not correctly set the property for the buffered stream. - // - // See https://github.com/netty/netty/issues/8692 - Object old = frameStreamToInitializeMap.put(streamId, stream); - - // We should not re-use ids. - assert old == null; + } else if (initializeNewStream(ctx, (DefaultHttp2FrameStream) headersFrame.stream(), promise)) { + final int streamId = headersFrame.stream().id(); encoder().writeHeaders(ctx, streamId, headersFrame.headers(), headersFrame.padding(), headersFrame.isEndStream(), promise); @@ -426,7 +412,6 @@ private void writeHeadersFrame( @Override public void operationComplete(ChannelFuture channelFuture) { numBufferedStreams--; - handleHeaderFuture(channelFuture, streamId); } }); @@ -436,6 +421,62 @@ public void operationComplete(ChannelFuture channelFuture) { } } + private void writePushPromise(final ChannelHandlerContext ctx, Http2PushPromiseFrame pushPromiseFrame, + final ChannelPromise promise) { + if (isStreamIdValid(pushPromiseFrame.pushStream().id())) { + encoder().writePushPromise(ctx, pushPromiseFrame.stream().id(), pushPromiseFrame.pushStream().id(), + pushPromiseFrame.http2Headers(), pushPromiseFrame.padding(), promise); + } else if (initializeNewStream(ctx, (DefaultHttp2FrameStream) pushPromiseFrame.pushStream(), promise)) { + final int streamId = pushPromiseFrame.stream().id(); + encoder().writePushPromise(ctx, streamId, pushPromiseFrame.pushStream().id(), + pushPromiseFrame.http2Headers(), pushPromiseFrame.padding(), promise); + + if (promise.isDone()) { + handleHeaderFuture(promise, streamId); + } else { + numBufferedStreams++; + // Clean up the stream being initialized if writing the headers fails and also + // decrement the number of buffered streams. + promise.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture channelFuture) { + numBufferedStreams--; + handleHeaderFuture(channelFuture, streamId); + } + }); + } + } + } + + private boolean initializeNewStream(ChannelHandlerContext ctx, DefaultHttp2FrameStream http2FrameStream, + ChannelPromise promise) { + final Http2Connection connection = connection(); + final int streamId = connection.local().incrementAndGetNextStreamId(); + if (streamId < 0) { + promise.setFailure(new Http2NoMoreStreamIdsException()); + + // Simulate a GOAWAY being received due to stream exhaustion on this connection. We use the maximum + // valid stream ID for the current peer. + onHttp2Frame(ctx, new DefaultHttp2GoAwayFrame(connection.isServer() ? Integer.MAX_VALUE : + Integer.MAX_VALUE - 1, NO_ERROR.code(), + writeAscii(ctx.alloc(), "Stream IDs exhausted on local stream creation"))); + + return false; + } + http2FrameStream.id = streamId; + + // Use a Map to store all pending streams as we may have multiple. This is needed as if we would store the + // stream in a field directly we may override the stored field before onStreamAdded(...) was called + // and so not correctly set the property for the buffered stream. + // + // See https://github.com/netty/netty/issues/8692 + Object old = frameStreamToInitializeMap.put(streamId, http2FrameStream); + + // We should not re-use ids. + assert old == null; + return true; + } + private void handleHeaderFuture(ChannelFuture channelFuture, int streamId) { if (!channelFuture.isSuccess()) { frameStreamToInitializeMap.remove(streamId); @@ -504,7 +545,7 @@ protected void onConnectionError( */ @Override protected final void onStreamError(ChannelHandlerContext ctx, boolean outbound, Throwable cause, - Http2Exception.StreamException streamException) { + Http2Exception.StreamException streamException) { int streamId = streamException.streamId(); Http2Stream connectionStream = connection().stream(streamId); if (connectionStream == null) { @@ -529,12 +570,12 @@ protected final void onStreamError(ChannelHandlerContext ctx, boolean outbound, } private void onHttp2UnknownStreamError(@SuppressWarnings("unused") ChannelHandlerContext ctx, Throwable cause, - Http2Exception.StreamException streamException) { + Http2Exception.StreamException streamException) { // It is normal to hit a race condition where we still receive frames for a stream that this // peer has deemed closed, such as if this peer sends a RST(CANCEL) to discard the request. // Since this is likely to be normal we log at DEBUG level. InternalLogLevel level = - streamException.error() == Http2Error.STREAM_CLOSED ? InternalLogLevel.DEBUG : InternalLogLevel.WARN; + streamException.error() == Http2Error.STREAM_CLOSED ? InternalLogLevel.DEBUG : InternalLogLevel.WARN; LOG.log(level, "Stream exception thrown for unknown stream {}.", streamException.streamId(), cause); } @@ -592,14 +633,14 @@ public void onHeadersRead(ChannelHandlerContext ctx, int streamId, public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endOfStream) { onHttp2Frame(ctx, new DefaultHttp2HeadersFrame(headers, endOfStream, padding) - .stream(requireStream(streamId))); + .stream(requireStream(streamId))); } @Override public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) { onHttp2Frame(ctx, new DefaultHttp2DataFrame(data, endOfStream, padding) - .stream(requireStream(streamId)).retain()); + .stream(requireStream(streamId)).retain()); // We return the bytes in consumeBytes() once the stream channel consumed the bytes. return 0; } @@ -610,9 +651,10 @@ public void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long error } @Override - public void onPriorityRead( - ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, boolean exclusive) { - // TODO: Maybe handle me + public void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, + short weight, boolean exclusive) { + onHttp2Frame(ctx, new DefaultHttp2PriorityFrame(streamDependency, weight, exclusive) + .stream(requireStream(streamId))); } @Override @@ -621,9 +663,12 @@ public void onSettingsAckRead(ChannelHandlerContext ctx) { } @Override - public void onPushPromiseRead( - ChannelHandlerContext ctx, int streamId, int promisedStreamId, Http2Headers headers, int padding) { - // TODO: Maybe handle me + public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, + Http2Headers headers, int padding) { + onHttp2Frame(ctx, new DefaultHttp2PushPromiseFrame(headers, padding, promisedStreamId) + .pushStream(new DefaultHttp2FrameStream() + .setStreamAndProperty(streamKey, connection().stream(promisedStreamId))) + .stream(requireStream(streamId))); } private Http2FrameStream requireStream(int streamId) { @@ -640,7 +685,7 @@ private void onUpgradeEvent(ChannelHandlerContext ctx, UpgradeEvent evt) { } private void onHttp2StreamWritabilityChanged(ChannelHandlerContext ctx, DefaultHttp2FrameStream stream, - @SuppressWarnings("unused") boolean writable) { + @SuppressWarnings("unused") boolean writable) { ctx.fireUserEventTriggered(stream.writabilityChanged); } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PriorityFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PriorityFrame.java new file mode 100644 index 00000000000..403028fa39e --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PriorityFrame.java @@ -0,0 +1,44 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * HTTP/2 Priority Frame + */ +@UnstableApi +public interface Http2PriorityFrame extends Http2StreamFrame { + + /** + * Parent Stream Id of this Priority request + */ + int streamDependency(); + + /** + * Stream weight + */ + short weight(); + + /** + * Set to {@code true} if this stream is exclusive else set to {@code false} + */ + boolean exclusive(); + + @Override + Http2PriorityFrame stream(Http2FrameStream stream); + +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PushPromiseFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PushPromiseFrame.java new file mode 100644 index 00000000000..dc5d7cb42e6 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2PushPromiseFrame.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * HTTP/2 Push Promise Frame + */ +@UnstableApi +public interface Http2PushPromiseFrame extends Http2StreamFrame { + + /** + * Set the Promise {@link Http2FrameStream} object for this frame. + */ + Http2StreamFrame pushStream(Http2FrameStream stream); + + /** + * Returns the Promise {@link Http2FrameStream} object for this frame, or {@code null} if the + * frame has yet to be associated with a stream. + */ + Http2FrameStream pushStream(); + + /** + * {@link Http2Headers} sent in Push Promise + */ + Http2Headers http2Headers(); + + /** + * Frame padding to use. Will be non-negative and less than 256. + */ + int padding(); + + /** + * Promised Stream ID + */ + int promisedStreamId(); + + @Override + Http2PushPromiseFrame stream(Http2FrameStream stream); + +}
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrameTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrameTest.java new file mode 100644 index 00000000000..7bc598d5570 --- /dev/null +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrameTest.java @@ -0,0 +1,242 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.util.CharsetUtil; +import io.netty.util.ReferenceCountUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static org.junit.Assert.assertEquals; + +public class DefaultHttp2PushPromiseFrameTest { + + private final EventLoopGroup eventLoopGroup = new NioEventLoopGroup(2); + private final ClientHandler clientHandler = new ClientHandler(); + private final Map<Integer, String> contentMap = new ConcurrentHashMap<Integer, String>(); + + private ChannelFuture connectionFuture; + + @Before + public void setup() throws InterruptedException { + ServerBootstrap serverBootstrap = new ServerBootstrap() + .group(eventLoopGroup) + .channel(NioServerSocketChannel.class) + .childHandler(new ChannelInitializer<SocketChannel>() { + @Override + protected void initChannel(SocketChannel ch) { + ChannelPipeline pipeline = ch.pipeline(); + + Http2FrameCodec frameCodec = Http2FrameCodecBuilder.forServer() + .autoAckSettingsFrame(true) + .autoAckPingFrame(true) + .build(); + + pipeline.addLast(frameCodec); + pipeline.addLast(new ServerHandler()); + } + }); + + ChannelFuture channelFuture = serverBootstrap.bind(0).sync(); + + final Bootstrap bootstrap = new Bootstrap() + .group(eventLoopGroup) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer<SocketChannel>() { + @Override + protected void initChannel(SocketChannel ch) { + ChannelPipeline pipeline = ch.pipeline(); + + Http2FrameCodec frameCodec = Http2FrameCodecBuilder.forClient() + .autoAckSettingsFrame(true) + .autoAckPingFrame(true) + .initialSettings(Http2Settings.defaultSettings().pushEnabled(true)) + .build(); + + pipeline.addLast(frameCodec); + pipeline.addLast(clientHandler); + } + }); + + connectionFuture = bootstrap.connect(channelFuture.channel().localAddress()); + } + + @Test + public void send() { + connectionFuture.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) { + clientHandler.write(); + } + }); + } + + @After + public void shutdown() { + eventLoopGroup.shutdownGracefully(); + } + + private final class ServerHandler extends Http2ChannelDuplexHandler { + + @Override + public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception { + + if (msg instanceof Http2HeadersFrame) { + final Http2HeadersFrame receivedFrame = (Http2HeadersFrame) msg; + + Http2Headers pushRequestHeaders = new DefaultHttp2Headers(); + pushRequestHeaders.path("/meow") + .method("GET") + .scheme("https") + .authority("localhost:5555"); + + // Write PUSH_PROMISE request headers + final Http2FrameStream newPushFrameStream = newStream(); + Http2PushPromiseFrame pushPromiseFrame = new DefaultHttp2PushPromiseFrame(pushRequestHeaders); + pushPromiseFrame.stream(receivedFrame.stream()); + pushPromiseFrame.pushStream(newPushFrameStream); + ctx.writeAndFlush(pushPromiseFrame).addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) { + contentMap.put(newPushFrameStream.id(), "Meow, I am Pushed via HTTP/2"); + + // Write headers for actual request + Http2Headers http2Headers = new DefaultHttp2Headers(); + http2Headers.status("200"); + http2Headers.add("push", "false"); + Http2HeadersFrame headersFrame = new DefaultHttp2HeadersFrame(http2Headers, false); + headersFrame.stream(receivedFrame.stream()); + ChannelFuture channelFuture = ctx.writeAndFlush(headersFrame); + + // Write Data of actual request + channelFuture.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + Http2DataFrame dataFrame = new DefaultHttp2DataFrame( + Unpooled.wrappedBuffer("Meow".getBytes()), true); + dataFrame.stream(receivedFrame.stream()); + ctx.writeAndFlush(dataFrame); + } + }); + } + }); + } else if (msg instanceof Http2PriorityFrame) { + Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg; + String content = contentMap.get(priorityFrame.stream().id()); + if (content == null) { + ctx.writeAndFlush(new DefaultHttp2GoAwayFrame(Http2Error.REFUSED_STREAM)); + return; + } + + // Write headers for Priority request + Http2Headers http2Headers = new DefaultHttp2Headers(); + http2Headers.status("200"); + http2Headers.add("push", "true"); + Http2HeadersFrame headersFrame = new DefaultHttp2HeadersFrame(http2Headers, false); + headersFrame.stream(priorityFrame.stream()); + ctx.writeAndFlush(headersFrame); + + // Write Data of Priority request + Http2DataFrame dataFrame = new DefaultHttp2DataFrame(Unpooled.wrappedBuffer(content.getBytes()), true); + dataFrame.stream(priorityFrame.stream()); + ctx.writeAndFlush(dataFrame); + } + } + } + + private static final class ClientHandler extends Http2ChannelDuplexHandler { + + private ChannelHandlerContext ctx; + + @Override + public void channelActive(ChannelHandlerContext ctx) throws InterruptedException { + this.ctx = ctx; + } + + void write() { + Http2Headers http2Headers = new DefaultHttp2Headers(); + http2Headers.path("/") + .authority("localhost") + .method("GET") + .scheme("https"); + + Http2HeadersFrame headersFrame = new DefaultHttp2HeadersFrame(http2Headers, true); + headersFrame.stream(newStream()); + ctx.writeAndFlush(headersFrame); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + + if (msg instanceof Http2PushPromiseFrame) { + Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg; + + assertEquals("/meow", pushPromiseFrame.http2Headers().path().toString()); + assertEquals("GET", pushPromiseFrame.http2Headers().method().toString()); + assertEquals("https", pushPromiseFrame.http2Headers().scheme().toString()); + assertEquals("localhost:5555", pushPromiseFrame.http2Headers().authority().toString()); + + Http2PriorityFrame priorityFrame = new DefaultHttp2PriorityFrame(pushPromiseFrame.stream().id(), + Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT, true); + priorityFrame.stream(pushPromiseFrame.pushStream()); + ctx.writeAndFlush(priorityFrame); + } else if (msg instanceof Http2HeadersFrame) { + Http2HeadersFrame headersFrame = (Http2HeadersFrame) msg; + + if (headersFrame.stream().id() == 3) { + assertEquals("200", headersFrame.headers().status().toString()); + assertEquals("false", headersFrame.headers().get("push").toString()); + } else if (headersFrame.stream().id() == 2) { + assertEquals("200", headersFrame.headers().status().toString()); + assertEquals("true", headersFrame.headers().get("push").toString()); + } else { + ctx.writeAndFlush(new DefaultHttp2GoAwayFrame(Http2Error.REFUSED_STREAM)); + } + } else if (msg instanceof Http2DataFrame) { + Http2DataFrame dataFrame = (Http2DataFrame) msg; + + try { + if (dataFrame.stream().id() == 3) { + assertEquals("Meow", dataFrame.content().toString(CharsetUtil.UTF_8)); + } else if (dataFrame.stream().id() == 2) { + assertEquals("Meow, I am Pushed via HTTP/2", dataFrame.content().toString(CharsetUtil.UTF_8)); + } else { + ctx.writeAndFlush(new DefaultHttp2GoAwayFrame(Http2Error.REFUSED_STREAM)); + } + } finally { + ReferenceCountUtil.release(dataFrame); + } + } + } + } +}
train
test
"2020-12-23T09:32:04"
"2020-10-29T14:09:34Z"
hyperxpro
val
netty/netty/10772_10773
netty/netty
netty/netty/10772
netty/netty/10773
[ "keyword_issue_to_pr", "keyword_pr_to_issue" ]
c6e2934357ca7284c474f9da2823b54936d3addc
23864d25b993e9f75cc910d6318d831a01730a83
[ "Thanks for reporting. #10773 PR will fix this.", "@hyandell doh! Thanks for catching this." ]
[]
"2020-11-04T07:17:15Z"
[]
Incorrect license id in NOTICE.txt
The NOTICE.txt file ends with: ``` This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. This private header is also used by Apple's open source mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). * LICENSE: * license/LICENSE.dnsinfo.txt (Apache License 2.0) * HOMEPAGE: * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h ``` This license is the Apple Public Source License 2.0, not the Apache License 2.0. ---
[ "NOTICE.txt" ]
[ "NOTICE.txt" ]
[]
diff --git a/NOTICE.txt b/NOTICE.txt index b702bb72403..cd84a8afe20 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -243,6 +243,6 @@ This private header is also used by Apple's open source mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). * LICENSE: - * license/LICENSE.dnsinfo.txt (Apache License 2.0) + * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h \ No newline at end of file + * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h
null
train
test
"2020-11-03T21:12:29"
"2020-11-03T23:09:45Z"
hyandell
val
netty/netty/10670_10775
netty/netty
netty/netty/10670
netty/netty/10775
[ "keyword_pr_to_issue" ]
b63e2dfb1baa7dbe3da381458f6d9ab004777090
027a68604273032f4cb6b181b40c47ced2108239
[ "@ejona86 this is the best I can think of as well... I guess this is \"good enough\" ? ", "I think it will be good enough. I'll work on a fix after I'm back in the office on the 26th.", "ok cool... ping me once I should review some code ", "I don't see a good way to update `incrementExpectedStreamId()` from `DefaultHttp2FramReader`. It can't call the listener to do it and when an exception is thrown the caller doesn't know which frame it was processing nor the streamid for it.\r\n\r\nAgain, \"I don't see a good way.\" But I see several variations via exceptions:\r\n1. Http2ConnectionHandler is pessimistic and treats all connection errors with ShutdownHint.HARD_SHUTDOWN as a possible header parsing failure. Since it doesn't know the proper stream id it uses MAX_INT for the GOAWAY. This is safe, but obviously can cause unnecessary errors for streams that were racing on the wire\r\n2. Introduce a new ShutdownHint.ALL_BETS_OFF_SHUTDOWN. Http2ConnectionHandler processes this hint as in (1), using MAX_INT for the GOAWAY. Http2FrameReader can catch any connection error and wrap it with a new exception with ALL_BETS_OFF_SHUTDOWN. Compared to (1), this reduces the number of failures that will use the MAX_INT GOAWAY behavior. A variation on this approach would be to create a new exception type or introduce a new field\r\n3. Introduce a \"processingStreamId\" field to Http2Exception. It would be able to be set by anyone after the exception is created. DefaultHttp2FrameReader would set the value when the exception is propagating. Http2ConnectionHandler would take the max of the processingStreamId in the exception and the current `remote().lastStreamCreated()` when sending the GOAWAY. This won't work for \"static\" exceptions\r\n\r\nWe could change some interfaces:\r\n\r\n4. We could add a method to both Http2ConnectionDecoder and Http2FrameReader to return the largest stream id seen. Http2ConnectionHandler would take the max of this method and the current `remote().lastStreamCreated()` when sending the GOAWAY. This breaks some interfaces. We could introduce new interfaces and have instanceof checks in Http2ConnectionHandler and Http2ConnectionDecoder; if those checks fail we could use MAX_INT\r\n5. We could add a method to both Http2ConnectionDecoder and Http2FrameReader to set the Http2Connection. Http2FrameReader could then create a stream manually. This is a clear layering violation. Maybe we could just do this hack on DefaultHttp2FrameReader so no interfaces would change, but it'd still be a layering violation\r\n6. Add a method to the listener, to allow creating the stream without any headers. DefaultHttp2FrameReader would guarantee to either call the normal onHeadersRead or this onUnprocessableHeadersRead. This breaks an interface; I don't see a way around that with instanceof checks\r\n\r\nOr we can investigate \"overly creative ideas:\"\r\n\r\n7. If a connection error is triggered during Http2FrameReader.readFrame(), throw a stream error. The next call to Http2FrameReader.readFrame() throw the connection error, and just let the GOAWAY work like it does today. It's unclear how much confusion this would cause on-the-wire, but no specific problems come to mind\r\n\r\nI'm not wild about any of them, but my current preference is toward (1), as it is simple (like 3 lines of code), and these HARD_SHUTDOWN cases should only happen when there are other horrible errors going on. At the very least it is very quick to implement and would \"stop the bleeding\" and give us time to fix things up \"properly\"... assuming we had an idea of what \"properly\" is, which I've got no clue.\r\n\r\n(2) is probably my second pick, but it does further complicate Http2Exception.", "I think I would go for 1) to keep it simple... 2) doesn't sound too bad as well. " ]
[]
"2020-11-04T17:00:40Z"
[]
http2: wrong Last-Stream-Id for connection error during header parsing
### Expected behavior A channel error occurring during HEADERS decoding triggers a GOAWAY that includes the stream id including the stream for the header being processed. ### Actual behavior Streams are allocated within `DefaultHttp2ConnectionDecoder.onHeadersRead()`. If a channel error occurs during header parsing within `DefaultHttp2FrameReader`, `DefaultHttp2ConnectionDecoder.onHeadersRead()` is not called. When the exception propagates to [`Http2ConnectionHandler.onConnectionError()` which triggers goAway](https://github.com/netty/netty/blob/d01471917b94d15fbe8b3b2e0f0ed8f24ee2954a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java#L672), it uses `connection().remote().lastStreamCreated()` for the Last-Stream-Id which does not include the stream being processed. This exposes the netty server to clients processing the stream closure similar to REFUSED_STREAM and transparently replaying the request on a new connection. ### Steps to reproduce Send large enough HEADERS (+CONTINUATIONS) such that the encoded form exceeds the MAX_HEADER_LIST_SIZE. [In my test](https://github.com/grpc/grpc-java/blob/f9b428ab40cd6eec2f45dce15a16ff0c9390fbb2/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java#L538), I used a max size of 1 byte. The server-side exception triggered, for reference: ``` io.netty.handler.codec.http2.Http2Exception: Header size exceeded max allowed size (1) at io.netty.handler.codec.http2.Http2Exception.connectionError(Http2Exception.java:103) at io.netty.handler.codec.http2.Http2CodecUtil.headerListSizeExceeded(Http2CodecUtil.java:245) at io.netty.handler.codec.http2.DefaultHttp2FrameReader$HeadersBlockBuilder.headerSizeExceeded(DefaultHttp2FrameReader.java:694) at io.netty.handler.codec.http2.DefaultHttp2FrameReader$HeadersBlockBuilder.addFragment(DefaultHttp2FrameReader.java:710) at io.netty.handler.codec.http2.DefaultHttp2FrameReader$1.processFragment(DefaultHttp2FrameReader.java:455) at io.netty.handler.codec.http2.DefaultHttp2FrameReader.readHeadersFrame(DefaultHttp2FrameReader.java:464) at io.netty.handler.codec.http2.DefaultHttp2FrameReader.processPayloadState(DefaultHttp2FrameReader.java:254) at io.netty.handler.codec.http2.DefaultHttp2FrameReader.readFrame(DefaultHttp2FrameReader.java:160) at io.netty.handler.codec.http2.Http2InboundFrameLogger.readFrame(Http2InboundFrameLogger.java:41) at io.netty.handler.codec.http2.DefaultHttp2ConnectionDecoder.decodeFrame(DefaultHttp2ConnectionDecoder.java:174) at io.netty.handler.codec.http2.Http2ConnectionHandler$FrameDecoder.decode(Http2ConnectionHandler.java:378) at io.netty.handler.codec.http2.Http2ConnectionHandler.decode(Http2ConnectionHandler.java:438) at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501) at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440) at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1526) at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1275) at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1322) at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501) at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440) at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:650) at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:576) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) ``` ### Netty version 4.1.51.Final ------ I'm open for ideas on how to resolve. We could have a catch in `DefaultHttp2FrameReader.readHeadersFrame()` that calls `DefaultHttp2Connection.remote().incrementExpectedSteramId()` before rethrowing. That seems sort of wrong, but we expect the connection to be destroyed so maybe it makes sense.
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java" ]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java", "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ControlFrameLimitEncoderTest.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java index 4b7ad517644..cbb923f169d 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java @@ -876,7 +876,16 @@ private void checkCloseConnection(ChannelFuture future) { */ private ChannelFuture goAway(ChannelHandlerContext ctx, Http2Exception cause, ChannelPromise promise) { long errorCode = cause != null ? cause.error().code() : NO_ERROR.code(); - int lastKnownStream = connection().remote().lastStreamCreated(); + int lastKnownStream; + if (cause != null && cause.shutdownHint() == Http2Exception.ShutdownHint.HARD_SHUTDOWN) { + // The hard shutdown could have been triggered during header processing, before updating + // lastStreamCreated(). Specifically, any connection errors encountered by Http2FrameReader or HPACK + // decoding will fail to update the last known stream. So we must be pessimistic. + // https://github.com/netty/netty/issues/10670 + lastKnownStream = Integer.MAX_VALUE; + } else { + lastKnownStream = connection().remote().lastStreamCreated(); + } return goAway(ctx, lastKnownStream, errorCode, Http2CodecUtil.toByteBuf(ctx, cause), promise); }
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java index 4f5af5411e4..da9b1221b3b 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java @@ -309,7 +309,7 @@ public void serverReceivingInvalidClientPrefaceStringShouldHandleException() thr handler = newHandler(); handler.channelRead(ctx, copiedBuffer("BAD_PREFACE", UTF_8)); ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class); - verify(frameWriter).writeGoAway(eq(ctx), eq(0), eq(PROTOCOL_ERROR.code()), + verify(frameWriter).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(PROTOCOL_ERROR.code()), captor.capture(), eq(promise)); assertEquals(0, captor.getValue().refCnt()); } @@ -320,7 +320,7 @@ public void serverReceivingHttp1ClientPrefaceStringShouldIncludePreface() throws handler = newHandler(); handler.channelRead(ctx, copiedBuffer("GET /path HTTP/1.1", US_ASCII)); ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class); - verify(frameWriter).writeGoAway(eq(ctx), eq(0), eq(PROTOCOL_ERROR.code()), + verify(frameWriter).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(PROTOCOL_ERROR.code()), captor.capture(), eq(promise)); assertEquals(0, captor.getValue().refCnt()); assertTrue(goAwayDebugCap.contains("/path")); @@ -336,7 +336,7 @@ public void serverReceivingClientPrefaceStringFollowedByNonSettingsShouldHandleE ByteBuf buf = Unpooled.buffer().writeBytes(connectionPrefaceBuf()).writeZero(10); handler.channelRead(ctx, buf); ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class); - verify(frameWriter, atLeastOnce()).writeGoAway(eq(ctx), eq(0), eq(PROTOCOL_ERROR.code()), + verify(frameWriter, atLeastOnce()).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(PROTOCOL_ERROR.code()), captor.capture(), eq(promise)); assertEquals(0, captor.getValue().refCnt()); } @@ -384,10 +384,13 @@ public void channelInactiveShouldCloseStreams() throws Exception { public void connectionErrorShouldStartShutdown() throws Exception { handler = newHandler(); Http2Exception e = new Http2Exception(PROTOCOL_ERROR); + // There's no guarantee that lastStreamCreated in correct, as the error could have occurred during header + // processing before it was updated. Thus, it should _not_ be used for the GOAWAY. + // https://github.com/netty/netty/issues/10670 when(remote.lastStreamCreated()).thenReturn(STREAM_ID); handler.exceptionCaught(ctx, e); ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class); - verify(frameWriter).writeGoAway(eq(ctx), eq(STREAM_ID), eq(PROTOCOL_ERROR.code()), + verify(frameWriter).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(PROTOCOL_ERROR.code()), captor.capture(), eq(promise)); captor.getValue().release(); } diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ControlFrameLimitEncoderTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ControlFrameLimitEncoderTest.java index d80ae0bd054..6f0b34fd32c 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ControlFrameLimitEncoderTest.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ControlFrameLimitEncoderTest.java @@ -266,7 +266,7 @@ private void verifyFlushAndClose(int invocations, boolean failed) { verify(ctx, atLeast(invocations)).flush(); verify(ctx, times(invocations)).close(); if (failed) { - verify(writer, times(1)).writeGoAway(eq(ctx), eq(0), eq(ENHANCE_YOUR_CALM.code()), + verify(writer, times(1)).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(ENHANCE_YOUR_CALM.code()), any(ByteBuf.class), any(ChannelPromise.class)); } }
train
test
"2020-11-04T14:01:08"
"2020-10-09T19:00:52Z"
ejona86
val
netty/netty/10797_10799
netty/netty
netty/netty/10797
netty/netty/10799
[ "keyword_issue_to_pr", "keyword_pr_to_issue" ]
944a0205862e32f1020647bbf4ef4eca5587446e
80c28a3c332ad3c557f5853987b3f3d58c80de69
[ "Hi @ArjanSchouten.\r\n\r\nI tried to build your project and reproduced the problem. (Stangely enough, the build downloads GraalVM 20.2.0 on each run, aren't you aware of a way to make it cache the downloaded file? The download is pretty slow).\r\n\r\nTo be honest, I'm not an expert in GraalVM (nor its Native Builder feature). Anyway, it seems strange that any field store could be possible because `NetUtil.LOCALHOST4` field is final. Also, the build of `netty-testsuite-native-image-client` does not trigger this problem, even if the field is referenced directly from the main class.\r\n\r\nWhen I checked out the current 4.1 branch of Netty (the substitutions are already there), added the 3 dummy setters, built the patched Netty version and changed `netty-common` version to the newly-built version, the error you demonstrate has gone, but the build still failed with the following error:\r\n\r\n```\r\n [creator] [/layers/paketo-buildpacks_spring-boot-native-image/native-image/com.example.springnative.Application:153] analysis: 39,857.64 ms, 3.71 GB\r\n [creator] Error: Classes that should be initialized at run time got initialized during image building:\r\n [creator] io.netty.channel.socket.InternetProtocolFamily the class was requested to be initialized at run time (from feature org.springframework.graalvm.support.InitializationHandler.lambda$registerInitializationDescriptor$4). To see why io.netty.channel.socket.InternetProtocolFamily got initialized use -H:+TraceClassInitialization\r\n [creator] \r\n [creator] Error: Use -H:+ReportExceptionStackTraces to print stacktrace of underlying exception\r\n [creator] Error: Image build request failed with exit status 1\r\n [creator] unable to invoke layer creator\r\n [creator] unable to contribute native-image layer\r\n [creator] error running build\r\n [creator] exit status 1\r\n [creator] ERROR: failed to build: exit status 1\r\n\r\n> Task :bootBuildImage FAILED\r\n```\r\n\r\nAlso, when I downgrade `netty-common` version to the previous release `4.1.53` (which does not contain the substitutions) with\r\n\r\n```\r\n implementation 'io.netty:netty-common:4.1.53.Final'\r\n```\r\n\r\nthen the build fails with the same message as it does with a patched `4.1.54`.\r\n\r\nIt is probable that the suggested addition of setters could help fix the version with substitutions, but it would be great to get rid of all other build errors (like the highlighted problem with `InternetProtocolFamily`) because for now such a fix looks like a blind shot. I still don't understand why a setter should be needed for a final field.", "#10630 was actually authored to make the changes described in the stackoverflow post obsolete. The solutions differ n details, because access to Netty code allowed to avoid using set-and-store strategy and just use setter. So, ideally, having #10630 included in Netty, one doesn't need any modifications suggested in the stackoverflow post.", "BTW, why do you believe that https://github.com/spring-projects-experimental/spring-graalvm-native/issues/184 has any relation to this one?", "Ok, I think I see what causes the problem.\r\n\r\nThe 3 fields (LOCALHOST4, LOCALHOST6, LOCALHOST) are only written in `NetUtil` static initialization block.\r\n\r\nWhen `NetUtil` gets initialized at build time, the initialization block does not get initialized at runtime, so the initialization block does not run (at runtime), and the field is only accessed with reads. Getters on substitutions are enough.\r\n\r\nBut when `NetUtil` gets initialized at run-time, the initialization block gets executed and tries to write to the field. There is a substitution, so a setter is looked up, and there it does not exist. BOOM.\r\n\r\nThis is embarassing, but with #10630 I broke Netty for the cases when `NetUtil` is initialized at run-time.\r\n\r\nI'm preparing a fix, thank you for letting me know @ArjanSchouten ", "@ArjanSchouten could you please check Netty built from #10799 to see whether it fixes the problem for you?", "> This is embarassing, but with #10630 I broke Netty for the cases when NetUtil is initialized at run-time.\r\n\r\nFYI this change broke native Netty support in Spring Boot 2.4.0. I will adapt our support accordingly.\r\n\r\n> @ArjanSchouten could you please check Netty built from #10799 to see whether it fixes the problem for you?\r\n\r\n@rpuch Thanks for working on a fix, but looking at the changes in #10630, I understand the problem you are trying to solve but maybe we should take a step back on why those changes are needed.\r\n\r\n@normanmaurer Just to give more context, as you know GraalVM native started its life with build time initialization by default, but since is clearly moving towards runtime initialization by default to favor compatibility, avoid to have 2 distinct behaviors of Java to support in the ecosystem, and because build time init is really required in just a few places where it is critical for performances or footprint:\r\n - [GraalVM native 19.0.0](https://www.graalvm.org/docs/release-notes/19_0/#1900) switched to runtime init by default\r\n - [GraalVM native 20.2.0](https://www.graalvm.org/release-notes/20_2/) moved most of the JDK to runtime init\r\n - I think GraalVM and Project Leyden teams are trying to limit the differences of behavior in order to avoid to have 2 different behaviors of Java, in order to avoid a maintainability nightmare well illustrated by those changes\r\n - I think @vjovanov from GraalVM team is about to provide a PR to restore by default runtime init by default in Netty.\r\n\r\nIn a nustshell, my proposal is to:\r\n - Rollback #10630 changes\r\n - Merge @vjovanov upcoming PR in order to restore defaults consistent with GraalVM one, with more consistency between JVM and native\r\n - Evolve Netty native support with a goal of limiting substitutions, favor consistency about JVM and native flavors, make Netty consistent with other libraries that are by default initialized at runtime in GraalVM native\r\n - Use build time init if needed on the few classes where there are needed to perform build time code removal or when critical for performances\r\n - Document with @rpuch and others interested how to use Netty with build time init and sync with them in order to allow them to adapt to those changes", "The GraalVM Native Image team is preparing a PR that will revert build-time initialization for Netty. By aggregating all the issues and request that we get due to this feature, it is clear to us that the default setting should be run time (the same as GraalVM): the maintenance overhead far outweighs the startup performance benefits. \r\n\r\nProjects that want to keep build-time initialization for `netty` can always take the existing config and use it in their projects, or make a separate repo. Of course, all issues related to build-time initialization from GraalVM shall be redirected to those project or that repo. ", "Thanks all. It's working fine since I use the new version of spring-graalvm-native.", "I believe spring graalvm native implemented some workarounds. Something still needs to be fixed I guess.\r\nThings are either tracked by @vjovanov or @sdeleuze and https://github.com/netty/netty/pull/10799 is also still open. If you feel that this is the issue that keeps track of the things that still need to be done to get rid of the workarounds I can reopen the issue (or someone else can do)." ]
[]
"2020-11-15T13:33:28Z"
[]
Netty on GraalVM with substitutions not fully working as expected
### Expected behavior The `NetUtilSubstitutions` implement a set InetAddress, Inet4Address and Inet6Address dummy implementation. ### Actual behavior See logging with exceptions below. I expect no exceptions related to netty. ### Steps to reproduce Have a running docker engine since it uses the buildpacks from spring. ``` git clone git@github.com:ArjanSchouten/spring-native-netty-reproduce.git cd spring-native-netty-reproduce ./gradlew bootBuildImage ``` ### Minimal yet complete reproducer code (or URL to code) See steps to reproduce. ### Netty version 4.1.54.Final ### JVM version (e.g. `java -version`) 11.0 ### OS version (e.g. `uname -a`) Darwin computername 19.6.0 Darwin Kernel Version 19.6.0: Mon Aug 31 22:12:52 PDT 2020; root:xnu-6153.141.2~1/RELEASE_X86_64 x86_64 In PR https://github.com/netty/netty/pull/10630 the NetUtilSubstitutions where added. I try to use Netty with https://github.com/spring-projects-experimental/spring-graalvm-native and it fails with: ``` [creator] 22:25:44.836 [ForkJoinPool-2-worker-15] DEBUG io.netty.buffer.AdvancedLeakAwareByteBuf - -Dio.netty.leakDetection.acquireAndReleaseOnly: false [creator] Warning: class initialization of class io.netty.util.internal.logging.Log4JLogger failed with exception java.lang.NoClassDefFoundError: org/apache/log4j/Priority. This class will be initialized at run time because option --allow-incomplete-classpath is used for image building. Use the option --initialize-at-run-time=io.netty.util.internal.logging.Log4JLogger to explicitly request delayed initialization of this class. [creator] 22:25:49.486 [ForkJoinPool-2-worker-3] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false [creator] 22:25:49.486 [ForkJoinPool-2-worker-3] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 [creator] 22:25:49.605 [ForkJoinPool-2-worker-7] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.maxCapacityPerThread: 4096 [creator] 22:25:49.606 [ForkJoinPool-2-worker-7] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.maxSharedCapacityFactor: 2 [creator] 22:25:49.606 [ForkJoinPool-2-worker-7] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.linkCapacity: 16 [creator] 22:25:49.606 [ForkJoinPool-2-worker-7] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.ratio: 8 [creator] 22:25:49.607 [ForkJoinPool-2-worker-7] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.delayedQueue.ratio: 8 [creator] 22:25:54.431 [ForkJoinPool-2-worker-11] DEBUG io.netty.handler.codec.compression.ZlibCodecFactory - -Dio.netty.noJdkZlibDecoder: false [creator] 22:25:54.432 [ForkJoinPool-2-worker-11] DEBUG io.netty.handler.codec.compression.ZlibCodecFactory - -Dio.netty.noJdkZlibEncoder: false [creator] 22:25:55.835 [ForkJoinPool-2-worker-9] DEBUG io.netty.handler.ssl.OpenSsl - netty-tcnative not in the classpath; OpenSslEngine will be unavailable. [creator] 22:25:57.117 [ForkJoinPool-2-worker-7] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 16 [creator] 22:25:58.173 [ForkJoinPool-2-worker-15] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available [creator] [/layers/paketo-buildpacks_spring-boot-native-image/native-image/com.example.springnative.Application:169] analysis: 42,689.86 ms, 2.63 GB [creator] Fatal error:com.oracle.graal.pointsto.util.AnalysisError$ParsingError: Error encountered while parsing io.netty.util.NetUtil.<clinit>() [creator] Parsing context: <no parsing context available> [creator] [creator] at com.oracle.graal.pointsto.util.AnalysisError.parsingError(AnalysisError.java:138) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlow.doParse(MethodTypeFlow.java:327) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlow.ensureParsed(MethodTypeFlow.java:302) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlow.addContext(MethodTypeFlow.java:103) [creator] at com.oracle.graal.pointsto.BigBang$1.run(BigBang.java:428) [creator] at com.oracle.graal.pointsto.util.CompletionExecutor.lambda$execute$0(CompletionExecutor.java:173) [creator] at java.base/java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1426) [creator] at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290) [creator] at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020) [creator] at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656) [creator] at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594) [creator] at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:183) [creator] Caused by: org.graalvm.compiler.java.BytecodeParser$BytecodeParserError: com.oracle.svm.core.util.VMError$HostedError: Error in @InjectAccessors handling of field io.netty.util.NetUtil.LOCALHOST4, accessors class io.netty.util.NetUtilSubstitutions$NetUtilLocalhost4Accessor: found no method named set or setLOCALHOST4 [creator] at parsing io.netty.util.NetUtil.<clinit>(NetUtil.java:139) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.throwParserError(BytecodeParser.java:2580) [creator] at com.oracle.svm.hosted.phases.SharedGraphBuilderPhase$SharedBytecodeParser.throwParserError(SharedGraphBuilderPhase.java:100) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.iterateBytecodesForBlock(BytecodeParser.java:3418) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.processBlock(BytecodeParser.java:3220) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.build(BytecodeParser.java:1090) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.buildRootMethod(BytecodeParser.java:984) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.GraphBuilderPhase$Instance.run(GraphBuilderPhase.java:84) [creator] at com.oracle.svm.hosted.phases.SharedGraphBuilderPhase.run(SharedGraphBuilderPhase.java:74) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.phases.Phase.run(Phase.java:49) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.phases.BasePhase.apply(BasePhase.java:214) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.phases.Phase.apply(Phase.java:42) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.phases.Phase.apply(Phase.java:38) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlowBuilder.parse(MethodTypeFlowBuilder.java:223) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlowBuilder.apply(MethodTypeFlowBuilder.java:357) [creator] at com.oracle.graal.pointsto.flow.MethodTypeFlow.doParse(MethodTypeFlow.java:313) [creator] ... 10 more [creator] Caused by: com.oracle.svm.core.util.VMError$HostedError: Error in @InjectAccessors handling of field io.netty.util.NetUtil.LOCALHOST4, accessors class io.netty.util.NetUtilSubstitutions$NetUtilLocalhost4Accessor: found no method named set or setLOCALHOST4 [creator] at com.oracle.svm.core.util.VMError.shouldNotReachHere(VMError.java:68) [creator] at com.oracle.svm.hosted.phases.InjectedAccessorsPlugin.error(InjectedAccessorsPlugin.java:152) [creator] at com.oracle.svm.hosted.phases.InjectedAccessorsPlugin.handleField(InjectedAccessorsPlugin.java:89) [creator] at com.oracle.svm.hosted.phases.InjectedAccessorsPlugin.handleStoreStaticField(InjectedAccessorsPlugin.java:62) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.genPutStatic(BytecodeParser.java:4970) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.genPutStatic(BytecodeParser.java:4941) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.processBytecode(BytecodeParser.java:5335) [creator] at jdk.internal.vm.compiler/org.graalvm.compiler.java.BytecodeParser.iterateBytecodesForBlock(BytecodeParser.java:3413) [creator] ... 22 more [creator] Error: Image build request failed with exit status 1 ``` I tried to add a substitute myself but it looks like there can only be one substitute. It looks like the author of the stackoverflow answer posted the PR: https://stackoverflow.com/questions/63328298/how-do-you-debug-a-no-instances-of-are-allowed-in-the-image-heap-when-buil (which is great!!) I think that a set implementation like the stackoverflow answer from @rpuch like in part 4 of the answer will solve my issue?! @rpuch can you validate that my assumption is correct? _edit: according to the documentation: https://javadoc.io/doc/org.graalvm.nativeimage/svm/latest/com/oracle/svm/core/annotate/InjectAccessors.html_ > Inject accessors methods for the field denoted using a Alias annotation. All loads and stores to the original field are redirected to accessor methods located in the class provided in the InjectAccessors.value() property. The class must implement the marker interface InjectAccessors. The accessor methods are static methods in that class, named either get / set or getFoo / setFoo for a field name foo. Depending on the kind of accessor (get / set for a static / non-static field), the accessor must have 0, 1, or 2 parameters. If the field is non-static, the first method parameter is the accessed object. The type of the parameter must be the class that declared the field. The null check on the object is performed before the accessor is called, in the same way as the null check for a regular field access. For get-accessors, the return type of the method must be the type of the field. For set-accessors, the last method parameter must be the type of the field and denotes the value stored to the field. **If no set-accessor is provided, stores to the field lead to a fatal error during image generation**. If no get-accessor is provided, loads of the field lead to a fatal error during image generation. The injected accessors must not access the original field. Since all field accesses use the accessors, that would lead to a recursive call of the accessors. Instead, data must be stored in either a new static field, or an injected instance field. According to the documentation, is the setLOCALHOST4 accessed illegally or do we have to implement the static set method as an empty method? (I believe this will be a blocker for https://github.com/spring-projects-experimental/spring-graalvm-native/issues/184 as well)
[ "common/src/main/java/io/netty/util/NetUtilSubstitutions.java", "pom.xml" ]
[ "common/src/main/java/io/netty/util/NetUtilSubstitutions.java", "pom.xml" ]
[ "testsuite-native-image-client-runtime-init/pom.xml", "testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/NativeClientWithNettyInitAtRuntime.java", "testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/package-info.java", "testsuite-native-image-client/src/main/java/io/netty/testsuite/svm/client/package-info.java" ]
diff --git a/common/src/main/java/io/netty/util/NetUtilSubstitutions.java b/common/src/main/java/io/netty/util/NetUtilSubstitutions.java index 9d9740392ca..7894037e4e9 100644 --- a/common/src/main/java/io/netty/util/NetUtilSubstitutions.java +++ b/common/src/main/java/io/netty/util/NetUtilSubstitutions.java @@ -45,6 +45,10 @@ static Inet4Address get() { // using https://en.wikipedia.org/wiki/Initialization-on-demand_holder_idiom return NetUtilLocalhost4LazyHolder.LOCALHOST4; } + + static void set(Inet4Address ignored) { + // a no-op setter to avoid exceptions when NetUtil is initialized at run-time + } } private static final class NetUtilLocalhost4LazyHolder { @@ -56,6 +60,10 @@ static Inet6Address get() { // using https://en.wikipedia.org/wiki/Initialization-on-demand_holder_idiom return NetUtilLocalhost6LazyHolder.LOCALHOST6; } + + static void set(Inet6Address ignored) { + // a no-op setter to avoid exceptions when NetUtil is initialized at run-time + } } private static final class NetUtilLocalhost6LazyHolder { @@ -67,6 +75,10 @@ static InetAddress get() { // using https://en.wikipedia.org/wiki/Initialization-on-demand_holder_idiom return NetUtilLocalhostLazyHolder.LOCALHOST; } + + static void set(InetAddress ignored) { + // a no-op setter to avoid exceptions when NetUtil is initialized at run-time + } } private static final class NetUtilLocalhostLazyHolder { diff --git a/pom.xml b/pom.xml index 55154158cb5..feb2d7a8bde 100644 --- a/pom.xml +++ b/pom.xml @@ -444,6 +444,7 @@ <module>testsuite-shading</module> <module>testsuite-native-image</module> <module>testsuite-native-image-client</module> + <module>testsuite-native-image-client-runtime-init</module> <module>transport-blockhound-tests</module> <module>microbench</module> <module>bom</module>
diff --git a/testsuite-native-image-client-runtime-init/pom.xml b/testsuite-native-image-client-runtime-init/pom.xml new file mode 100644 index 00000000000..eb22b7aa33d --- /dev/null +++ b/testsuite-native-image-client-runtime-init/pom.xml @@ -0,0 +1,100 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + ~ Copyright 2020 The Netty Project + ~ + ~ The Netty Project licenses this file to you under the Apache License, + ~ version 2.0 (the "License"); you may not use this file except in compliance + ~ with the License. You may obtain a copy of the License at: + ~ + ~ https://www.apache.org/licenses/LICENSE-2.0 + ~ + ~ Unless required by applicable law or agreed to in writing, software + ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + ~ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + ~ License for the specific language governing permissions and limitations + ~ under the License. + --> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd"> + + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>io.netty</groupId> + <artifactId>netty-parent</artifactId> + <version>4.1.55.Final-SNAPSHOT</version> + </parent> + + <artifactId>netty-testsuite-native-image-client-runtime-init</artifactId> + <packaging>jar</packaging> + + <name>Netty/Testsuite/NativeImage/ClientRuntimeInit</name> + + <properties> + <skipJapicmp>true</skipJapicmp> + </properties> + + <dependencies> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>netty-common</artifactId> + <version>${project.version}</version> + </dependency> + </dependencies> + + <profiles> + <profile> + <id>skipTests</id> + <activation> + <property> + <name>skipTests</name> + </property> + </activation> + <properties> + <skipNativeImageTestsuite>true</skipNativeImageTestsuite> + </properties> + </profile> + </profiles> + + <build> + <plugins> + <plugin> + <groupId>com.oracle.substratevm</groupId> + <artifactId>native-image-maven-plugin</artifactId> + <version>${graalvm.version}</version> + <executions> + <execution> + <goals> + <goal>native-image</goal> + </goals> + <phase>package</phase> + </execution> + </executions> + <configuration> + <skip>${skipNativeImageTestsuite}</skip> + <imageName>${project.artifactId}</imageName> + <mainClass>io.netty.testsuite.svm.client.NativeClientWithNettyInitAtRuntime</mainClass> + <buildArgs>--report-unsupported-elements-at-runtime --allow-incomplete-classpath --no-fallback --initialize-at-run-time=io.netty.util.NetUtil</buildArgs> + </configuration> + </plugin> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>exec-maven-plugin</artifactId> + <version>1.6.0</version> + <executions> + <!-- This will do a whitesmoke test: if the substitutions are missing the binary will fail to run --> + <!-- If the metadata is missing the build above will fail --> + <execution> + <id>verify-native-image</id> + <phase>verify</phase> + <goals> + <goal>exec</goal> + </goals> + </execution> + </executions> + <configuration> + <skip>${skipNativeImageTestsuite}</skip> + <executable>${project.build.directory}/${project.artifactId}</executable> + </configuration> + </plugin> + </plugins> + </build> +</project> diff --git a/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/NativeClientWithNettyInitAtRuntime.java b/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/NativeClientWithNettyInitAtRuntime.java new file mode 100644 index 00000000000..6c4aa22ba80 --- /dev/null +++ b/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/NativeClientWithNettyInitAtRuntime.java @@ -0,0 +1,36 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.testsuite.svm.client; + +import io.netty.util.NetUtil; + +/** + * A client that triggers runtime initialization of NetUtil when + * built to a native image. + */ +public final class NativeClientWithNettyInitAtRuntime { + /** + * Main entry point (not instantiable) + */ + private NativeClientWithNettyInitAtRuntime() { + } + + public static void main(String[] args) { + System.out.println(NetUtil.LOCALHOST4); + System.out.println(NetUtil.LOCALHOST6); + System.out.println(NetUtil.LOCALHOST); + } +} diff --git a/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/package-info.java b/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/package-info.java new file mode 100644 index 00000000000..c5d8a74b31e --- /dev/null +++ b/testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * A client that triggers runtime initialization of NetUtil when + * built to a native image. + */ +package io.netty.testsuite.svm.client; diff --git a/testsuite-native-image-client/src/main/java/io/netty/testsuite/svm/client/package-info.java b/testsuite-native-image-client/src/main/java/io/netty/testsuite/svm/client/package-info.java index 0426d41985c..7b575e68359 100644 --- a/testsuite-native-image-client/src/main/java/io/netty/testsuite/svm/client/package-info.java +++ b/testsuite-native-image-client/src/main/java/io/netty/testsuite/svm/client/package-info.java @@ -15,6 +15,6 @@ */ /** - * A hello world server that should be compiled to native. + * A client that uses netty-dns and gets compiled to a native image. */ package io.netty.testsuite.svm.client;
train
test
"2020-11-11T06:47:51"
"2020-11-13T22:48:56Z"
ArjanSchouten
val
netty/netty/10777_10807
netty/netty
netty/netty/10777
netty/netty/10807
[ "keyword_pr_to_issue" ]
8b2ed77042b5a3023974d7a6d2f22fc0d3edef34
3354c7b0bf977cabfd0097f80973a9175788a8a6
[ "Enable `DEBUG` log level.", "if you mean:\r\n\r\np.addLast(new LoggingHandler(LogLevel.DEBUG));\r\n\r\ndo not help. No information is printed. **(using non-serialized object - that dont work)**\r\n\r\n--------------------------------------------------------------------------------------------\r\n--------------------------------------------------------------------------------------------\r\n**BELOW IS WORKING CASE(when i use serialized object), BUT PROBABLY FOUND SOME new BUG**:\r\n\r\nAnyway im not sure if i found some BUG or what.\r\nim having Client+Server in same application(just in 2 threads),\r\n\r\nby default without logging i have:\r\n\r\n> lis 07, 2020 1:08:56 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> INFO: [id: 0x70dbfa79] REGISTERED\r\n> lis 07, 2020 1:08:56 PM io.netty.handler.logging.LoggingHandler bind\r\n> INFO: [id: 0x70dbfa79] BIND: 0.0.0.0/0.0.0.0:6143\r\n> lis 07, 2020 1:08:56 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> INFO: [id: 0x70dbfa79, L:/0:0:0:0:0:0:0:0:6143] ACTIVE\r\n> lis 07, 2020 1:08:56 PM io.netty.handler.logging.LoggingHandler channelRead\r\n> INFO: [id: 0x70dbfa79, L:/0:0:0:0:0:0:0:0:6143] READ: [id: 0x1ff7f916, L:/127.0.0.1:6143 - R:/127.0.0.1:59428]\r\n> lis 07, 2020 1:08:56 PM io.netty.handler.logging.LoggingHandler channelReadComplete\r\n> INFO: [id: 0x70dbfa79, L:/0:0:0:0:0:0:0:0:6143] READ COMPLETE\r\n> Server Channel Active\r\n> ByteBuf out size: 256\r\n\r\nwhen i add for example \r\n\r\np.addLast(new LoggingHandler(LogLevel.WARN));\r\n\r\ni have:\r\n\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> INFO: [id: 0x47da6a0b] REGISTERED\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> WARNING: [id: 0x107f76a8] REGISTERED\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler connect\r\n> WARNING: [id: 0x107f76a8] CONNECT: localhost/127.0.0.1:6143\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler bind\r\n> INFO: [id: 0x47da6a0b] BIND: 0.0.0.0/0.0.0.0:6143\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> INFO: [id: 0x47da6a0b, L:/0:0:0:0:0:0:0:0:6143] ACTIVE\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> WARNING: [id: 0x107f76a8, L:/127.0.0.1:59414 - R:localhost/127.0.0.1:6143] ACTIVE\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelRead\r\n> INFO: [id: 0x47da6a0b, L:/0:0:0:0:0:0:0:0:6143] READ: [id: 0x1c26ccdb, L:/127.0.0.1:6143 - R:/127.0.0.1:59414]\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelReadComplete\r\n> INFO: [id: 0x47da6a0b, L:/0:0:0:0:0:0:0:0:6143] READ COMPLETE\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> WARNING: [id: 0x1c26ccdb, L:/127.0.0.1:6143 - R:/127.0.0.1:59414] REGISTERED\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> WARNING: [id: 0x1c26ccdb, L:/127.0.0.1:6143 - R:/127.0.0.1:59414] ACTIVE\r\n> Server Channel Active\r\n> ByteBuf out size: 256\r\n> lis 07, 2020 1:05:02 PM io.netty.handler.logging.LoggingHandler write\r\n> WARNING: [id: 0x1c26ccdb, L:/127.0.0.1:6143 - R:/127.0.0.1:59414] WRITE: 98B\r\n> +-------------------------------------------------+\r\n> | 0 1 2 3 4 5 6 7 8 9 a b c d e f |\r\n> +--------+-------------------------------------------------+----------------+\r\n> \r\n\r\nand when i set \r\n\r\np.addLast(new LoggingHandler(LogLevel.ERROR));\r\n\r\ni see:\r\n\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> INFO: [id: 0xc67da6c8] REGISTERED\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> SEVERE: [id: 0x80004f41] REGISTERED\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler connect\r\n> SEVERE: [id: 0x80004f41] CONNECT: localhost/127.0.0.1:6143\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler bind\r\n> INFO: [id: 0xc67da6c8] BIND: 0.0.0.0/0.0.0.0:6143\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> INFO: [id: 0xc67da6c8, L:/0:0:0:0:0:0:0:0:6143] ACTIVE\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> SEVERE: [id: 0x80004f41, L:/127.0.0.1:59420 - R:localhost/127.0.0.1:6143] ACTIVE\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelRead\r\n> INFO: [id: 0xc67da6c8, L:/0:0:0:0:0:0:0:0:6143] READ: [id: 0x223834f5, L:/127.0.0.1:6143 - R:/127.0.0.1:59420]\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelReadComplete\r\n> INFO: [id: 0xc67da6c8, L:/0:0:0:0:0:0:0:0:6143] READ COMPLETE\r\n> Server Channel Active\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelRegistered\r\n> SEVERE: [id: 0x223834f5, L:/127.0.0.1:6143 - R:/127.0.0.1:59420] REGISTERED\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler channelActive\r\n> SEVERE: [id: 0x223834f5, L:/127.0.0.1:6143 - R:/127.0.0.1:59420] ACTIVE\r\n> ByteBuf out size: 256\r\n> lis 07, 2020 1:06:43 PM io.netty.handler.logging.LoggingHandler write\r\n> SEVERE: [id: 0x223834f5, L:/127.0.0.1:6143 - R:/127.0.0.1:59420] WRITE: 98B\r\n> +-------------------------------------------------+\r\n> | 0 1 2 3 4 5 6 7 8 9 a b c d e f |\r\n> +--------+-------------------------------------------------+----------------+\r\n\r\nWhile EVERYTHING WORK FINE (since i use serialialized object in this example)\r\n\r\nhow can i know if this is SEVERE or WARN or INFO, while it just depends what level i set, it anyway print same info that is harmless...\r\n\r\n**or myabe i just dont understand what \"LoggingHandler\" is for**\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "This should help: https://github.com/netty/netty/pull/10807" ]
[ "@chrisvest this doesn't work as this will block the eventloop. Use a `ChannelFutureListener` ", "please revert this change ^^" ]
"2020-11-18T13:00:07Z"
[]
suggestion: netty/example/objectecho inform about missing "implements Serializable" on client side
Hello, as new user i wanted to try Objectecho "Example", and was confused when i sent **Serializable Object** that were **containing** an **"non-serializable" Object** as some depth field(that i didnt notice earlier), it was not sent at all, and i received no information at all. It took me time to debug and notice that one of its fields was an object that didnt implement Serializable. Thats why i just throw some suggestion to add into "Objectecho Example" some new user friendly code to inform about it like: **1)** some "Objectecho Example" class extending ObjectEncoder: @Override public boolean acceptOutboundMessage(Object msg) throws Exception { if(!super.acceptOutboundMessage(msg)){ // some information } return true; } **OR** (i didnt try enable Asserts, so if you use some, this option might be better) **2)** **Just add comment about it**, to let new users know they will receive NO information when trying to send complex Objects that in some depth contains non-serializable Object. **Please note this refer only to "Objectecho Example" and Encoder (so its client side).** Just thought it could be helpfull for new users having same problem. ### Expected behavior - some warning/error/information for "Objectecho Example" if there is missing Serializable ### Actual behavior - Demo give NO information if sent object do not match, client and server print **no information/warning/error at all.** ### Steps to reproduce - in "Objectecho Example" try sent Serializable object with not serializable Object in it, it will not be sent and no information will be printed anywhere. Also comments do not mention anything about it. ### Netty version 4.1.43 And ofc, hello to netty users/devs ;) Please close it if "Objectecho Example" is not prepared for new users and its for testing purpose, or if you think its not needed, since its just suggestion.
[ "example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java" ]
[ "example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java" ]
[]
diff --git a/example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java b/example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java index 07f2f6bcf01..314747180fb 100644 --- a/example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java +++ b/example/src/main/java/io/netty/example/objectecho/ObjectEchoClientHandler.java @@ -15,12 +15,16 @@ */ package io.netty.example.objectecho; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import java.util.ArrayList; import java.util.List; +import static io.netty.channel.ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE; + /** * Handler implementation for the object echo client. It initiates the * ping-pong traffic between the object echo client and server by sending the @@ -43,7 +47,8 @@ public ObjectEchoClientHandler() { @Override public void channelActive(ChannelHandlerContext ctx) { // Send the first message if this handler is a client-side handler. - ctx.writeAndFlush(firstMessage); + ChannelFuture future = ctx.writeAndFlush(firstMessage); + future.addListener(FIRE_EXCEPTION_ON_FAILURE); // Let object serialisation exceptions propagate. } @Override
null
test
test
"2020-11-16T09:03:37"
"2020-11-06T06:53:27Z"
oxplay2
val
netty/netty/10816_10819
netty/netty
netty/netty/10816
netty/netty/10819
[ "keyword_pr_to_issue" ]
02cd85181aef2502c2f90b978453d210d08781ef
b27f0fccced002e700d8f3bb7a88c1349d83d8ee
[ "@normanmaurer I wrapped the `addListener` call in a try-catch to extract the stacktrace:\r\n\r\n```\r\n2020-11-30 12:54:06,441 ERROR [epollEventLoopGroup-3-5] io.netty.util.internal.logging.AbstractInternalLogger | Unexpected exception: \r\njava.lang.IllegalStateException: void future\r\n\tat io.netty.channel.VoidChannelPromise.fail(VoidChannelPromise.java:198)\r\n\tat io.netty.channel.VoidChannelPromise.addListener(VoidChannelPromise.java:58)\r\n\tat io.netty.channel.VoidChannelPromise.addListener(VoidChannelPromise.java:26)\r\n\tat io.netty.handler.codec.http2.Http2ConnectionHandler.closeStream(Http2ConnectionHandler.java:623)\r\n\tat io.netty.handler.codec.http2.Http2ConnectionHandler.closeStreamLocal(Http2ConnectionHandler.java:587)\r\n\tat io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder$FlowControlledBase.writeComplete(DefaultHttp2ConnectionEncoder.java:622)\r\n\tat io.netty.handler.codec.http2.DefaultHttp2RemoteFlowController$FlowState.writeAllocatedBytes(DefaultHttp2RemoteFlowController.java:368)\r\n\tat io.netty.handler.codec.http2.DefaultHttp2RemoteFlowController$WritabilityMonitor.write(DefaultHttp2RemoteFlowController.java:547)\r\n\tat io.netty.handler.codec.http2.WeightedFairQueueByteDistributor$State.write(WeightedFairQueueByteDistributor.java:604)\r\n\tat io.netty.handler.codec.http2.WeightedFairQueueByteDistributor.distribute(WeightedFairQueueByteDistributor.java:292)\r\n\tat io.netty.handler.codec.http2.WeightedFairQueueByteDistributor.distributeToChildren(WeightedFairQueueByteDistributor.java:325)\r\n\tat io.netty.handler.codec.http2.WeightedFairQueueByteDistributor.distribute(WeightedFairQueueByteDistributor.java:273)\r\n\tat io.netty.handler.codec.http2.DefaultHttp2RemoteFlowController$WritabilityMonitor.writePendingBytes(DefaultHttp2RemoteFlowController.java:627)\r\n\tat io.netty.handler.codec.http2.DefaultHttp2RemoteFlowController.writePendingBytes(DefaultHttp2RemoteFlowController.java:267)\r\n\tat io.netty.handler.codec.http2.Http2ConnectionHandler.flush(Http2ConnectionHandler.java:189)\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:750)\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:765)\r\n\tat io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:790)\r\n\tat io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:758)\r\n at (my handler) ...\r\n```" ]
[ "Based on the other tests I wanted to do something like this:\r\n```java\r\n verify(voidPromise, never()).addListener(any(GenericFutureListener.class));\r\n verify(ctx.channel(), times(1)).newPromise();\r\n verify(promise, times(1)).addListener(any(GenericFutureListener.class));\r\n```\r\nI'm not used to this testing suite, so please advise how can I get these checks to work... :pray: " ]
"2020-11-24T11:03:07Z"
[]
Cannot write/flush HTTP/2 with voidPromise
### Expected behavior Can `write` / `writeAndFlush` frames that may close the stream from a HTTP/2 context ### Actual behavior `Http2ConnectionHandler#closeStream` tries to `addListener` to the future even if it's void. If void, that will fail. ### Steps to reproduce `writeAndFlush` a Http2DataFrame to the context with Http2FrameCodec present ### Minimal yet complete reproducer code (or URL to code) Ran into this using netty [from Clojure](https://github.com/valerauko/iny/blob/7e71ddf649d5b64626139beb784eabf9b5ad88a5/src/iny/http2/handler.clj#L86-L98). ### Netty version 4.1.54 ### JVM version (e.g. `java -version`) ### OS version (e.g. `uname -a`)
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java" ]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java index 97a7239fb86..d8611c71b8e 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java @@ -120,6 +120,7 @@ public void remoteSettings(Http2Settings settings) throws Http2Exception { @Override public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, final boolean endOfStream, ChannelPromise promise) { + promise = promise.unvoid(); final Http2Stream stream; try { stream = requireStream(streamId);
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java index da9b1221b3b..0eb34f47200 100644 --- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java @@ -691,6 +691,14 @@ public ChannelFuture answer(InvocationOnMock invocation) throws Throwable { verify(pipeline).fireExceptionCaught(cause); } + @Test + public void canCloseStreamWithVoidPromise() throws Exception { + handler = newHandler(); + handler.closeStream(stream, ctx.voidPromise()); + verify(stream, times(1)).close(); + verifyNoMoreInteractions(stream); + } + @Test public void channelReadCompleteTriggersFlush() throws Exception { handler = newHandler();
train
test
"2020-11-23T19:20:18"
"2020-11-24T10:00:23Z"
valerauko
val
netty/netty/10837_10844
netty/netty
netty/netty/10837
netty/netty/10844
[ "keyword_pr_to_issue" ]
44f85bba5f47df885dbbe5243d008220bfbab5ca
8a7c580bf679628617fde26c3913fa62f0895e18
[ "We tell block hound to permit blocking calls in `SSLEngine.wrap`: https://github.com/netty/netty/blob/4.1/common/src/main/java/io/netty/util/internal/Hidden.java#L108-L112\r\n\r\nDoes block hound also bark when you use a different `SSLEngine`, like `ReferenceCountedOpenSslEngine`?", "@chrisvest Isn't it related to `unwrap` and not `wrap`\r\n\r\n```\r\n builder.allowBlockingCallsInside(\r\n \"sun.security.ssl.SSLEngineImpl\",\r\n \"unwrap\");\r\n```", "@violetagg Oh, right. I misread it.", "@violetagg are you interested in providing a PR ?", "> @violetagg are you interested in providing a PR ?\r\n\r\nok" ]
[]
"2020-12-06T11:59:35Z"
[]
Blocking call found with Spring Webflux and r2dbc-mysql driver.
### Overview Hi, So I am using Spring Webflux 5.3.1(which internally uses Netty) and I am integrating it with a MySql R2DBC driver(r2dbc-mysql) to make a reactive application. It works fine and everything looks good. But when I tried to use BlockHound(a tool created by the Spring team to detect blocking calls), to make sure that my API is non-blocking end-to-end but it shows that there is a blocking call when I installed BlockHound. ### Expected behavior It should have run smoothly with/without BlockHound. ### Actual behavior BlockHound says that there is a blocking call. PFB the full stack trace and below it the part of stack trace where block hound tells the blocking call is. ``` 2020-12-03 15:24:23,442 INFO [main] o.s.b.w.e.n.NettyWebServer:109:[, ] - Netty started on port(s): 8080 2020-12-03 15:24:23,461 INFO [main] c.m.d.DdReactiveApplication:61:[, ] - Started DdReactiveApplication in 3.946 seconds (JVM running for 4.569) 2020-12-03 15:24:35,151 ERROR [reactor-tcp-epoll-2] o.s.b.a.w.r.e.AbstractErrorWebExceptionHandler:122:[, ] - [f05df953-2] 500 Server Error for HTTP GET "/getAllExpFromDB" org.springframework.dao.DataAccessResourceFailureException: executeMany; SQL [SELECT dd_reactive.* FROM dd_reactive]; null; nested exception is dev.miku.r2dbc.mysql.client.MySqlConnectionException at org.springframework.r2dbc.connection.ConnectionFactoryUtils.convertR2dbcException(ConnectionFactoryUtils.java:226) Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: Error has been observed at the following site(s): |_ checkpoint ⇢ Handler com.mmt.ddreactive.controller.DDController#getAllExperimentsFromDB(String) [DispatcherHandler] |_ checkpoint ⇢ HTTP GET "/getAllExpFromDB" [ExceptionHandlingWebHandler] Stack trace: at org.springframework.r2dbc.connection.ConnectionFactoryUtils.convertR2dbcException(ConnectionFactoryUtils.java:226) at org.springframework.r2dbc.core.DefaultDatabaseClient.lambda$inConnectionMany$8(DefaultDatabaseClient.java:147) at reactor.core.publisher.Flux.lambda$onErrorMap$28(Flux.java:6579) at reactor.core.publisher.Flux.lambda$onErrorResume$29(Flux.java:6632) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:94) at reactor.core.publisher.FluxUsingWhen$UsingWhenSubscriber.deferredError(FluxUsingWhen.java:411) at reactor.core.publisher.FluxUsingWhen$RollbackInner.onComplete(FluxUsingWhen.java:488) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onComplete(Operators.java:2018) at reactor.core.publisher.FluxPeek$PeekSubscriber.onComplete(FluxPeek.java:259) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onComplete(Operators.java:2018) at reactor.core.publisher.Operators$MonoSubscriber.onComplete(Operators.java:1826) at reactor.core.publisher.MonoIgnoreThen$ThenAcceptInner.onComplete(MonoIgnoreThen.java:323) at reactor.core.publisher.Operators$MonoSubscriber.onComplete(Operators.java:1826) at reactor.core.publisher.MonoIgnoreThen$ThenAcceptInner.onComplete(MonoIgnoreThen.java:323) at reactor.pool.SimpleDequePool$QueuePoolRecyclerInner.onComplete(SimpleDequePool.java:624) at reactor.core.publisher.Operators.complete(Operators.java:135) at reactor.core.publisher.MonoEmpty.subscribe(MonoEmpty.java:45) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at reactor.pool.SimpleDequePool$QueuePoolRecyclerMono.subscribe(SimpleDequePool.java:736) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.drain(MonoIgnoreThen.java:154) at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:56) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.drain(MonoIgnoreThen.java:154) at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:56) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:103) at reactor.core.publisher.MonoIgnoreElements$IgnoreElementsSubscriber.onError(MonoIgnoreElements.java:83) at reactor.core.publisher.FluxMap$MapSubscriber.onError(FluxMap.java:132) at reactor.core.publisher.FluxFilter$FilterSubscriber.onError(FluxFilter.java:157) at reactor.core.publisher.FluxFilter$FilterConditionalSubscriber.onError(FluxFilter.java:291) at reactor.core.publisher.FluxMap$MapConditionalSubscriber.onError(FluxMap.java:259) at reactor.core.publisher.Operators.error(Operators.java:196) at reactor.core.publisher.MonoError.subscribe(MonoError.java:52) at reactor.core.publisher.MonoDeferContextual.subscribe(MonoDeferContextual.java:55) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at reactor.core.publisher.FluxUsingWhen$UsingWhenSubscriber.onError(FluxUsingWhen.java:377) at reactor.core.publisher.FluxFlatMap$FlatMapMain.checkTerminated(FluxFlatMap.java:841) at reactor.core.publisher.FluxFlatMap$FlatMapMain.drainLoop(FluxFlatMap.java:607) at reactor.core.publisher.FluxFlatMap$FlatMapMain.drain(FluxFlatMap.java:587) at reactor.core.publisher.FluxFlatMap$FlatMapMain.onError(FluxFlatMap.java:450) at reactor.core.publisher.FluxMap$MapSubscriber.onError(FluxMap.java:132) at reactor.core.publisher.FluxMap$MapSubscriber.onError(FluxMap.java:132) at reactor.core.publisher.FluxWindowPredicate$WindowPredicateMain.signalAsyncError(FluxWindowPredicate.java:341) at reactor.core.publisher.FluxWindowPredicate$WindowPredicateMain.checkTerminated(FluxWindowPredicate.java:523) at reactor.core.publisher.FluxWindowPredicate$WindowPredicateMain.drainLoop(FluxWindowPredicate.java:475) at reactor.core.publisher.FluxWindowPredicate$WindowPredicateMain.drain(FluxWindowPredicate.java:419) at reactor.core.publisher.FluxWindowPredicate$WindowPredicateMain.onError(FluxWindowPredicate.java:278) at reactor.core.publisher.FluxHandleFuseable$HandleFuseableSubscriber.onError(FluxHandleFuseable.java:219) at reactor.core.publisher.FluxContextWrite$ContextWriteSubscriber.onError(FluxContextWrite.java:121) at dev.miku.r2dbc.mysql.util.DiscardOnCancelSubscriber.onError(DiscardOnCancelSubscriber.java:87) at reactor.core.publisher.FluxPeekFuseable$PeekConditionalSubscriber.onError(FluxPeekFuseable.java:903) at reactor.core.publisher.MonoFlatMapMany$FlatMapManyInner.onError(MonoFlatMapMany.java:255) at reactor.core.publisher.FluxPeek$PeekSubscriber.onError(FluxPeek.java:221) at reactor.core.publisher.FluxHandle$HandleSubscriber.onError(FluxHandle.java:202) at reactor.core.publisher.FluxPeekFuseable$PeekConditionalSubscriber.onError(FluxPeekFuseable.java:903) at reactor.core.publisher.EmitterProcessor.checkTerminated(EmitterProcessor.java:535) at reactor.core.publisher.EmitterProcessor.drain(EmitterProcessor.java:402) at reactor.core.publisher.EmitterProcessor.tryEmitError(EmitterProcessor.java:238) at reactor.core.publisher.InternalManySink.emitError(InternalManySink.java:98) at reactor.core.publisher.EmitterProcessor.onError(EmitterProcessor.java:227) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.drainError(ReactorNettyClient.java:254) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.resumeError(ReactorNettyClient.java:214) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:94) at reactor.core.publisher.FluxPeek$PeekSubscriber.onError(FluxPeek.java:221) at reactor.netty.channel.FluxReceive.terminateReceiver(FluxReceive.java:447) at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:256) at reactor.netty.channel.FluxReceive.onInboundError(FluxReceive.java:435) at reactor.netty.channel.ChannelOperations.onInboundError(ChannelOperations.java:467) at reactor.netty.channel.ChannelOperationsHandler.exceptionCaught(ChannelOperationsHandler.java:127) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:281) at io.netty.channel.AbstractChannelHandlerContext.fireExceptionCaught(AbstractChannelHandlerContext.java:273) at io.netty.handler.ssl.SslHandler.exceptionCaught(SslHandler.java:1151) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:752) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) Caused by: dev.miku.r2dbc.mysql.client.MySqlConnectionException: null at dev.miku.r2dbc.mysql.client.ClientExceptions.wrap(ClientExceptions.java:47) Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: Error has been observed at the following site(s): |_ checkpoint ⇢ SQL "SELECT dd_reactive.* FROM dd_reactive" [DatabaseClient] Stack trace: at dev.miku.r2dbc.mysql.client.ClientExceptions.wrap(ClientExceptions.java:47) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.resumeError(ReactorNettyClient.java:214) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:94) at reactor.core.publisher.FluxPeek$PeekSubscriber.onError(FluxPeek.java:221) at reactor.netty.channel.FluxReceive.terminateReceiver(FluxReceive.java:447) at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:256) at reactor.netty.channel.FluxReceive.onInboundError(FluxReceive.java:435) at reactor.netty.channel.ChannelOperations.onInboundError(ChannelOperations.java:467) at reactor.netty.channel.ChannelOperationsHandler.exceptionCaught(ChannelOperationsHandler.java:127) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:281) at io.netty.channel.AbstractChannelHandlerContext.fireExceptionCaught(AbstractChannelHandlerContext.java:273) at io.netty.handler.ssl.SslHandler.exceptionCaught(SslHandler.java:1151) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:752) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) Caused by: reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.io.FileInputStream.readBytes(FileInputStream.java) at java.io.FileInputStream.read(FileInputStream.java:255) at sun.security.provider.NativePRNG$RandomIO.readFully(NativePRNG.java:424) at sun.security.provider.NativePRNG$RandomIO.ensureBufferValid(NativePRNG.java:526) at sun.security.provider.NativePRNG$RandomIO.implNextBytes(NativePRNG.java:545) at sun.security.provider.NativePRNG$RandomIO.access$400(NativePRNG.java:331) at sun.security.provider.NativePRNG.engineNextBytes(NativePRNG.java:220) at java.security.SecureRandom.nextBytes(SecureRandom.java:468) at sun.security.ssl.CipherBox.createExplicitNonce(CipherBox.java:1025) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:287) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:225) at sun.security.ssl.EngineWriter.writeRecord(EngineWriter.java:186) at sun.security.ssl.SSLEngineImpl.writeRecord(SSLEngineImpl.java:1281) at sun.security.ssl.SSLEngineImpl.writeAppRecord(SSLEngineImpl.java:1252) at sun.security.ssl.SSLEngineImpl.wrap(SSLEngineImpl.java:1165) at javax.net.ssl.SSLEngine.wrap(SSLEngine.java:509) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:1086) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:843) at io.netty.handler.ssl.SslHandler.wrapAndFlush(SslHandler.java:811) at io.netty.handler.ssl.SslHandler.flush(SslHandler.java:792) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:750) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) 2020-12-03 15:24:35,195 WARN [reactor-tcp-epoll-2] d.m.r.m.c.ReactorNettyClient:259:[, ] - Connection has been closed by peer 2020-12-03 15:24:35,196 ERROR [reactor-tcp-epoll-2] r.c.p.Operators:319:[, ] - Operator called default onErrorDropped dev.miku.r2dbc.mysql.client.MySqlConnectionClosedException: Connection unexpectedly closed at dev.miku.r2dbc.mysql.client.ClientExceptions.unexpectedClosed(ClientExceptions.java:32) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.handleClose(ReactorNettyClient.java:260) at reactor.core.publisher.FluxPeek$PeekSubscriber.onComplete(FluxPeek.java:264) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onComplete(Operators.java:2018) at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:366) at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onComplete(FluxConcatMap.java:275) at reactor.core.publisher.EmitterProcessor.checkTerminated(EmitterProcessor.java:541) at reactor.core.publisher.EmitterProcessor.drain(EmitterProcessor.java:402) at reactor.core.publisher.EmitterProcessor.tryEmitComplete(EmitterProcessor.java:221) at reactor.core.publisher.EmitterProcessor.onComplete(EmitterProcessor.java:212) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.resumeError(ReactorNettyClient.java:215) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:94) at reactor.core.publisher.FluxPeek$PeekSubscriber.onError(FluxPeek.java:221) at reactor.netty.channel.FluxReceive.terminateReceiver(FluxReceive.java:447) at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:256) at reactor.netty.channel.FluxReceive.onInboundError(FluxReceive.java:435) at reactor.netty.channel.ChannelOperations.onInboundError(ChannelOperations.java:467) at reactor.netty.channel.ChannelOperationsHandler.exceptionCaught(ChannelOperationsHandler.java:127) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:281) at io.netty.channel.AbstractChannelHandlerContext.fireExceptionCaught(AbstractChannelHandlerContext.java:273) at io.netty.handler.ssl.SslHandler.exceptionCaught(SslHandler.java:1151) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:752) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) 2020-12-03 15:24:35,197 ERROR [reactor-tcp-epoll-2] d.m.r.m.c.ReactorNettyClient:217:[, ] - Error: Blocking call! java.io.FileInputStream#readBytes reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.io.FileInputStream.readBytes(FileInputStream.java) at java.io.FileInputStream.read(FileInputStream.java:255) at sun.security.provider.NativePRNG$RandomIO.readFully(NativePRNG.java:424) at sun.security.provider.NativePRNG$RandomIO.ensureBufferValid(NativePRNG.java:526) at sun.security.provider.NativePRNG$RandomIO.implNextBytes(NativePRNG.java:545) at sun.security.provider.NativePRNG$RandomIO.access$400(NativePRNG.java:331) at sun.security.provider.NativePRNG.engineNextBytes(NativePRNG.java:220) at java.security.SecureRandom.nextBytes(SecureRandom.java:468) at sun.security.ssl.CipherBox.createExplicitNonce(CipherBox.java:1025) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:287) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:225) at sun.security.ssl.EngineWriter.writeRecord(EngineWriter.java:186) at sun.security.ssl.SSLEngineImpl.writeRecord(SSLEngineImpl.java:1281) at sun.security.ssl.SSLEngineImpl.writeAppRecord(SSLEngineImpl.java:1252) at sun.security.ssl.SSLEngineImpl.wrap(SSLEngineImpl.java:1165) at javax.net.ssl.SSLEngine.wrap(SSLEngine.java:509) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:1086) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:843) at io.netty.handler.ssl.SslHandler.wrapAndFlush(SslHandler.java:811) at io.netty.handler.ssl.SslHandler.flush(SslHandler.java:792) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:750) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) 2020-12-03 15:24:35,207 ERROR [reactor-tcp-epoll-2] r.c.p.Operators:319:[, ] - Operator called default onErrorDropped dev.miku.r2dbc.mysql.client.MySqlConnectionClosedException: Connection closed at dev.miku.r2dbc.mysql.client.ClientExceptions.expectedClosed(ClientExceptions.java:36) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.handleClose(ReactorNettyClient.java:262) at dev.miku.r2dbc.mysql.client.ReactorNettyClient.access$400(ReactorNettyClient.java:53) at dev.miku.r2dbc.mysql.client.ReactorNettyClient$ResponseSubscriber.onComplete(ReactorNettyClient.java:306) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onComplete(Operators.java:2018) at reactor.core.publisher.Operators$MonoSubscriber.onComplete(Operators.java:1826) at reactor.core.publisher.MonoIgnoreThen$ThenAcceptInner.onComplete(MonoIgnoreThen.java:323) at reactor.core.publisher.Operators.complete(Operators.java:135) at reactor.netty.FutureMono$DeferredFutureMono.subscribe(FutureMono.java:131) at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.drain(MonoIgnoreThen.java:154) at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:56) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onError(FluxOnErrorResume.java:103) at reactor.core.publisher.FluxPeek$PeekSubscriber.onError(FluxPeek.java:221) at reactor.netty.channel.FluxReceive.terminateReceiver(FluxReceive.java:447) at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:256) at reactor.netty.channel.FluxReceive.onInboundError(FluxReceive.java:435) at reactor.netty.channel.ChannelOperations.onInboundError(ChannelOperations.java:467) at reactor.netty.channel.ChannelOperationsHandler.exceptionCaught(ChannelOperationsHandler.java:127) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:281) at io.netty.channel.AbstractChannelHandlerContext.fireExceptionCaught(AbstractChannelHandlerContext.java:273) at io.netty.handler.ssl.SslHandler.exceptionCaught(SslHandler.java:1151) at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:302) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:752) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) at reactor.core.publisher.FluxCreate$BaseSink.complete(FluxCreate.java:439) at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:784) at reactor.core.publisher.FluxCreate$BufferAsyncSink.complete(FluxCreate.java:732) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drainLoop(FluxCreate.java:240) at reactor.core.publisher.FluxCreate$SerializedFluxSink.drain(FluxCreate.java:206) at reactor.core.publisher.FluxCreate$SerializedFluxSink.complete(FluxCreate.java:197) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onComplete(LargeMessageSlicer.java:99) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2348) at dev.miku.r2dbc.mysql.message.client.LargeMessageSlicer.onSubscribe(LargeMessageSlicer.java:48) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:54) at reactor.core.publisher.Mono.subscribe(Mono.java:3987) at dev.miku.r2dbc.mysql.message.client.LargeClientMessage.lambda$encode$0(LargeClientMessage.java:52) at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) at reactor.core.publisher.Flux.subscribe(Flux.java:8095) at dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:111) at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764) at io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1071) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) ``` Part of stack trace where blockhound found the blocking call: ``` 2020-12-03 15:24:35,197 ERROR [reactor-tcp-epoll-2] d.m.r.m.c.ReactorNettyClient:217:[, ] - Error: Blocking call! java.io.FileInputStream#readBytes reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.io.FileInputStream.readBytes(FileInputStream.java) at java.io.FileInputStream.read(FileInputStream.java:255) at sun.security.provider.NativePRNG$RandomIO.readFully(NativePRNG.java:424) at sun.security.provider.NativePRNG$RandomIO.ensureBufferValid(NativePRNG.java:526) at sun.security.provider.NativePRNG$RandomIO.implNextBytes(NativePRNG.java:545) at sun.security.provider.NativePRNG$RandomIO.access$400(NativePRNG.java:331) at sun.security.provider.NativePRNG.engineNextBytes(NativePRNG.java:220) at java.security.SecureRandom.nextBytes(SecureRandom.java:468) at sun.security.ssl.CipherBox.createExplicitNonce(CipherBox.java:1025) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:287) at sun.security.ssl.EngineOutputRecord.write(EngineOutputRecord.java:225) at sun.security.ssl.EngineWriter.writeRecord(EngineWriter.java:186) at sun.security.ssl.SSLEngineImpl.writeRecord(SSLEngineImpl.java:1281) at sun.security.ssl.SSLEngineImpl.writeAppRecord(SSLEngineImpl.java:1252) at sun.security.ssl.SSLEngineImpl.wrap(SSLEngineImpl.java:1165) at javax.net.ssl.SSLEngine.wrap(SSLEngine.java:509) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:1086) at io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:843) at io.netty.handler.ssl.SslHandler.wrapAndFlush(SslHandler.java:811) at io.netty.handler.ssl.SslHandler.flush(SslHandler.java:792) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:750) at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742) at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728) at dev.miku.r2dbc.mysql.client.WriteSubscriber.onComplete(WriteSubscriber.java:72) ``` ### Steps to reproduce I made a few DB calls here and there and was able to produce the blockhound exception, an overview of what I did was, first I hit an API which fetched few rows from a table from DB using the @Query("select * from dd_reactive where id in (:listOfIds)") (without blockhound) this works fine, then I hit another API which made another call to DB to fetch all the rows of the table (with blockhound), it breaks here with the stack trace pasted above. ### Minimal yet complete reproducer code (or URL to code) If needed I can provide the code later on. ### Netty version 4.1.54.Final ### JVM version (e.g. `java -version`) Oracle JDK java version "1.8.0_231" Java(TM) SE Runtime Environment (build 1.8.0_231-b11) Java HotSpot(TM) 64-Bit Server VM (build 25.231-b11, mixed mode) ### OS version (e.g. `uname -a`) Linux mmt-ubt-8341 5.4.0-53-generic #59~18.04.1-Ubuntu SMP Wed Oct 21 12:14:56 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux **P.S.: First I thought it was due to the R2DBC MySQL driver(r2dbc-mysql) I was using and I raised it in their github issues, but they suggested me that it isn't on their side but on Netty's side. Issue Link: https://github.com/mirromutth/r2dbc-mysql/issues/153**
[ "common/src/main/java/io/netty/util/internal/Hidden.java" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java" ]
[ "transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index 5ce04918656..ac97323573e 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -111,6 +111,10 @@ public void applyTo(BlockHound.Builder builder) { "sun.security.ssl.SSLEngineImpl", "unwrap"); + builder.allowBlockingCallsInside( + "sun.security.ssl.SSLEngineImpl", + "wrap"); + builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() { @Override public Predicate<Thread> apply(final Predicate<Thread> p) {
diff --git a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java index 68858b127dd..ade2def4703 100644 --- a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java +++ b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java @@ -20,6 +20,7 @@ import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; @@ -63,9 +64,13 @@ import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; +import static io.netty.buffer.Unpooled.wrappedBuffer; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; @@ -238,6 +243,77 @@ public void testTrustManagerVerifyTLSv13() throws Exception { testTrustManagerVerify("TLSv1.3"); } + @Test + public void testSslHandlerWrapAllowsBlockingCalls() throws Exception { + final SslContext sslClientCtx = + SslContextBuilder.forClient() + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .sslProvider(SslProvider.JDK) + .build(); + final SslHandler sslHandler = sslClientCtx.newHandler(UnpooledByteBufAllocator.DEFAULT); + final EventLoopGroup group = new NioEventLoopGroup(); + final CountDownLatch activeLatch = new CountDownLatch(1); + final AtomicReference<Throwable> error = new AtomicReference<>(); + + Channel sc = null; + Channel cc = null; + try { + sc = new ServerBootstrap() + .group(group) + .channel(NioServerSocketChannel.class) + .childHandler(new ChannelInboundHandlerAdapter()) + .bind(new InetSocketAddress(0)) + .syncUninterruptibly() + .channel(); + + cc = new Bootstrap() + .group(group) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer<Channel>() { + + @Override + protected void initChannel(Channel ch) { + ch.pipeline().addLast(sslHandler); + ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { + + @Override + public void channelActive(ChannelHandlerContext ctx) { + activeLatch.countDown(); + } + + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (evt instanceof SslHandshakeCompletionEvent && + ((SslHandshakeCompletionEvent) evt).cause() != null) { + Throwable cause = ((SslHandshakeCompletionEvent) evt).cause(); + cause.printStackTrace(); + error.set(cause); + } + ctx.fireUserEventTriggered(evt); + } + }); + } + }) + .connect(sc.localAddress()) + .addListener((ChannelFutureListener) future -> + future.channel().writeAndFlush(wrappedBuffer(new byte [] { 1, 2, 3, 4 }))) + .syncUninterruptibly() + .channel(); + + assertTrue(activeLatch.await(5, TimeUnit.SECONDS)); + assertNull(error.get()); + } finally { + if (cc != null) { + cc.close().syncUninterruptibly(); + } + if (sc != null) { + sc.close().syncUninterruptibly(); + } + group.shutdownGracefully(); + ReferenceCountUtil.release(sslClientCtx); + } + } + private static void testTrustManagerVerify(String tlsVersion) throws Exception { final SslContext sslClientCtx = SslContextBuilder.forClient()
test
test
"2020-12-05T07:01:03"
"2020-12-03T14:49:02Z"
theakshaypilania
val
netty/netty/10838_10846
netty/netty
netty/netty/10838
netty/netty/10846
[ "keyword_pr_to_issue" ]
abd5a7d922dadc24bcfc6f3f34802d7de115445d
05093de0d6fe5787447439836e2ca159e79c88bf
[ "Also for `CloseWebSocketFrame` constructors where you can provide directly `int` for the status code, I see that the status code is checked when reading the frame, but isn't it better to check it also when constructing `CloseWebSocketFrame`? (My Netty server was able to create and send such `CloseWebSocketFrame` and only my Netty client complained about the status code)\r\n\r\n`new CloseWebSocketFrame(1006, \"Abnormal closure\")`\r\n\r\n```\r\nio.netty.handler.codec.http.websocketx.CorruptedWebSocketFrameException: Invalid close frame getStatus code: 1006\r\n\tat io.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder.protocolViolation(WebSocket08FrameDecoder.java:426)\r\n\tat io.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder.protocolViolation(WebSocket08FrameDecoder.java:422)\r\n\tat io.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder.checkCloseFrameBody(WebSocket08FrameDecoder.java:479)\r\n\tat io.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder.decode(WebSocket08FrameDecoder.java:339)\r\n\tat io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501)\r\n```" ]
[]
"2020-12-07T10:45:45Z"
[]
Check for valid status codes should be done in CloseWebSocketFrame instead of WebSocketCloseStatus
According to the specification https://tools.ietf.org/html/rfc6455#section-7.4 ``` 1006 is a reserved value and MUST NOT be set as a status code in a Close control frame by an endpoint. It is designated for use in applications expecting a status code to indicate that the connection was closed abnormally, e.g., without sending or receiving a Close control frame. ``` I want to use `WebSocketCloseStatus` in the applications in order to indicate that the connection was closed abnormally. I do not intent to send `CloseWebSocketFrame` with such `WebSocketCloseStatus` (with 1006 status code). However the current implementation checks the validity of the status codes in `WebSocketCloseStatus` instead of `CloseWebSocketFrame`. Isn't it more correct if the check for a valid status code is in `CloseWebSocketFrame`? ### Expected behavior To be able to create `WebSocketCloseStatus` with status code 1006 and `CloseWebSocketFrame ` to fail when constructed with `WebSocketCloseStatus` with status code 1006 ### Actual behavior `WebSocketCloseStatus` fails when created with status code 1006 ### Proposed solution I can create a PR with the patch below ``` diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java index 7231bde8f4..834d0e6eee 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java @@ -40,7 +40,7 @@ public class CloseWebSocketFrame extends WebSocketFrame { * example, <tt>1000</tt> indicates normal closure. */ public CloseWebSocketFrame(WebSocketCloseStatus status) { - this(status.code(), status.reasonText()); + this(requireValidStatusCode(status), status.reasonText()); } /** @@ -53,7 +53,7 @@ public class CloseWebSocketFrame extends WebSocketFrame { * Reason text. Set to null if no text. */ public CloseWebSocketFrame(WebSocketCloseStatus status, String reasonText) { - this(status.code(), reasonText); + this(requireValidStatusCode(status), reasonText); } /** @@ -201,4 +201,13 @@ public class CloseWebSocketFrame extends WebSocketFrame { super.touch(hint); return this; } + + static int requireValidStatusCode(WebSocketCloseStatus status) { + if (WebSocketCloseStatus.isValidStatusCode(status.code())) { + return status.code(); + } + else { + throw new IllegalArgumentException("WebSocket close status code does NOT comply with RFC-6455: " + status.code()); + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java index 92b1dbaef5..f634240ef5 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java @@ -208,16 +208,26 @@ public final class WebSocketCloseStatus implements Comparable<WebSocketCloseStat // 1004, 1005, 1006, 1015 are reserved and should never be used by user //public static final WebSocketCloseStatus SPECIFIC_MEANING = register(1004, "..."); - //public static final WebSocketCloseStatus EMPTY = register(1005, "Empty"); - //public static final WebSocketCloseStatus ABNORMAL_CLOSURE = register(1006, "Abnormal closure"); - //public static final WebSocketCloseStatus TLS_HANDSHAKE_FAILED(1015, "TLS handshake failed"); + + public static final WebSocketCloseStatus EMPTY = + new WebSocketCloseStatus(1005, "Empty"); + + public static final WebSocketCloseStatus ABNORMAL_CLOSURE = + new WebSocketCloseStatus(1006, "Abnormal closure"); + + public static final WebSocketCloseStatus TLS_HANDSHAKE_FAILED = + new WebSocketCloseStatus(1015, "TLS handshake failed"); private final int statusCode; private final String reasonText; private String text; public WebSocketCloseStatus(int statusCode, String reasonText) { - if (!isValidStatusCode(statusCode)) { + this(statusCode, reasonText, true); + } + + public WebSocketCloseStatus(int statusCode, String reasonText, boolean validate) { + if (validate && !isValidStatusCode(statusCode)) { throw new IllegalArgumentException( "WebSocket close status code does NOT comply with RFC-6455: " + statusCode); } ``` ### Netty version 4.1.55.Final-SNAPSHOT
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java", "codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java index 7231bde8f4a..1703203902e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java @@ -40,7 +40,7 @@ public CloseWebSocketFrame() { * example, <tt>1000</tt> indicates normal closure. */ public CloseWebSocketFrame(WebSocketCloseStatus status) { - this(status.code(), status.reasonText()); + this(requireValidStatusCode(status.code()), status.reasonText()); } /** @@ -53,7 +53,7 @@ public CloseWebSocketFrame(WebSocketCloseStatus status) { * Reason text. Set to null if no text. */ public CloseWebSocketFrame(WebSocketCloseStatus status, String reasonText) { - this(status.code(), reasonText); + this(requireValidStatusCode(status.code()), reasonText); } /** @@ -66,7 +66,7 @@ public CloseWebSocketFrame(WebSocketCloseStatus status, String reasonText) { * Reason text. Set to null if no text. */ public CloseWebSocketFrame(int statusCode, String reasonText) { - this(true, 0, statusCode, reasonText); + this(true, 0, requireValidStatusCode(statusCode), reasonText); } /** @@ -95,7 +95,7 @@ public CloseWebSocketFrame(boolean finalFragment, int rsv) { * Reason text. Set to null if no text. */ public CloseWebSocketFrame(boolean finalFragment, int rsv, int statusCode, String reasonText) { - super(finalFragment, rsv, newBinaryData(statusCode, reasonText)); + super(finalFragment, rsv, newBinaryData(requireValidStatusCode(statusCode), reasonText)); } private static ByteBuf newBinaryData(int statusCode, String reasonText) { @@ -201,4 +201,13 @@ public CloseWebSocketFrame touch(Object hint) { super.touch(hint); return this; } + + static int requireValidStatusCode(int statusCode) { + if (WebSocketCloseStatus.isValidStatusCode(statusCode)) { + return statusCode; + } else { + throw new IllegalArgumentException("WebSocket close status code does NOT comply with RFC-6455: " + + statusCode); + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java index 92b1dbaef56..2a0b7edb6da 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java @@ -208,16 +208,26 @@ public final class WebSocketCloseStatus implements Comparable<WebSocketCloseStat // 1004, 1005, 1006, 1015 are reserved and should never be used by user //public static final WebSocketCloseStatus SPECIFIC_MEANING = register(1004, "..."); - //public static final WebSocketCloseStatus EMPTY = register(1005, "Empty"); - //public static final WebSocketCloseStatus ABNORMAL_CLOSURE = register(1006, "Abnormal closure"); - //public static final WebSocketCloseStatus TLS_HANDSHAKE_FAILED(1015, "TLS handshake failed"); + + public static final WebSocketCloseStatus EMPTY = + new WebSocketCloseStatus(1005, "Empty", false); + + public static final WebSocketCloseStatus ABNORMAL_CLOSURE = + new WebSocketCloseStatus(1006, "Abnormal closure", false); + + public static final WebSocketCloseStatus TLS_HANDSHAKE_FAILED = + new WebSocketCloseStatus(1015, "TLS handshake failed", false); private final int statusCode; private final String reasonText; private String text; public WebSocketCloseStatus(int statusCode, String reasonText) { - if (!isValidStatusCode(statusCode)) { + this(statusCode, reasonText, true); + } + + public WebSocketCloseStatus(int statusCode, String reasonText, boolean validate) { + if (validate && !isValidStatusCode(statusCode)) { throw new IllegalArgumentException( "WebSocket close status code does NOT comply with RFC-6455: " + statusCode); } @@ -290,6 +300,10 @@ public static WebSocketCloseStatus valueOf(int code) { return PROTOCOL_ERROR; case 1003: return INVALID_MESSAGE_TYPE; + case 1005: + return EMPTY; + case 1006: + return ABNORMAL_CLOSURE; case 1007: return INVALID_PAYLOAD_DATA; case 1008: @@ -306,6 +320,8 @@ public static WebSocketCloseStatus valueOf(int code) { return TRY_AGAIN_LATER; case 1014: return BAD_GATEWAY; + case 1015: + return TLS_HANDSHAKE_FAILED; default: return new WebSocketCloseStatus(code, "Close status #" + code); }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java new file mode 100644 index 00000000000..f1bcc3fc3d8 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and limitations under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import org.assertj.core.api.ThrowableAssert; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + +class CloseWebSocketFrameTest { + + @Test + void testInvalidCode() { + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(WebSocketCloseStatus.ABNORMAL_CLOSURE); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(WebSocketCloseStatus.ABNORMAL_CLOSURE, "invalid code"); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(1006, "invalid code"); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(true, 0, 1006, "invalid code"); + } + }); + } + + @Test + void testValidCode() { + doTestValidCode(new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE), + WebSocketCloseStatus.NORMAL_CLOSURE.code(), WebSocketCloseStatus.NORMAL_CLOSURE.reasonText()); + + doTestValidCode(new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE, "valid code"), + WebSocketCloseStatus.NORMAL_CLOSURE.code(), "valid code"); + + doTestValidCode(new CloseWebSocketFrame(1000, "valid code"), 1000, "valid code"); + + doTestValidCode(new CloseWebSocketFrame(true, 0, 1000, "valid code"), 1000, "valid code"); + } + + private static void doTestInvalidCode(ThrowableAssert.ThrowingCallable callable) { + assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(callable); + } + + private static void doTestValidCode(CloseWebSocketFrame frame, int expectedCode, String expectedReason) { + assertThat(frame.statusCode()).isEqualTo(expectedCode); + assertThat(frame.reasonText()).isEqualTo(expectedReason); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java index d8ab7f9bc98..223c12ce3d0 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java @@ -18,9 +18,11 @@ import java.util.SortedSet; import java.util.TreeSet; +import org.assertj.core.api.ThrowableAssert; import org.hamcrest.Matchers; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotSame; @@ -56,6 +58,8 @@ public void testKnownStatuses() { assertSame(ENDPOINT_UNAVAILABLE, valueOf(1001)); assertSame(PROTOCOL_ERROR, valueOf(1002)); assertSame(INVALID_MESSAGE_TYPE, valueOf(1003)); + assertSame(EMPTY, valueOf(1005)); + assertSame(ABNORMAL_CLOSURE, valueOf(1006)); assertSame(INVALID_PAYLOAD_DATA, valueOf(1007)); assertSame(POLICY_VIOLATION, valueOf(1008)); assertSame(MESSAGE_TOO_BIG, valueOf(1009)); @@ -64,6 +68,7 @@ public void testKnownStatuses() { assertSame(SERVICE_RESTART, valueOf(1012)); assertSame(TRY_AGAIN_LATER, valueOf(1013)); assertSame(BAD_GATEWAY, valueOf(1014)); + assertSame(TLS_HANDSHAKE_FAILED, valueOf(1015)); } @Test @@ -127,4 +132,23 @@ public void testRfc6455CodeValidation() { invalidCodes.retainAll(knownCodes); assertEquals(invalidCodes, Collections.emptySet()); } + + @Test + public void testValidationEnabled() { + assertThatExceptionOfType(IllegalArgumentException.class) + .isThrownBy(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new WebSocketCloseStatus(1006, "validation disabled"); + } + }); + } + + @Test + public void testValidationDisabled() { + WebSocketCloseStatus status = new WebSocketCloseStatus(1006, "validation disabled", false); + assertEquals(1006, status.code()); + assertEquals("validation disabled", status.reasonText()); + } }
train
test
"2020-12-07T10:49:09"
"2020-12-03T17:15:08Z"
violetagg
val
netty/netty/10851_10855
netty/netty
netty/netty/10851
netty/netty/10855
[ "keyword_pr_to_issue" ]
b4479353e2485aa2f0293bc748920560c0ddeaf2
c0674cff29ae017e9fd144b6728ac0521874e883
[ "Can you share the code to reproduce?", "Experiencing this as well\r\n\r\nIn this code:\r\n\r\n```\r\n private static int findDelimiter(ByteBuf undecodedChunk, String delimiter, int offset) {\r\n ....\r\n while (delimiterNotFound && newOffset + delimeterLength <= toRead) {\r\n```\r\n\r\n`newOffset` bounces between 9 and 4\r\n\r\nThe unencoded chunk is:\r\n\r\n```\r\nbar-stream\r\n--c2b0cb9ac1cf7cc7\r\ncontent-disposition: form-data; name=\"data\"; filename=\"data.json\"\r\ncontent-length: 16\r\ncontent-type: application/json; charset=UTF-8\r\n\r\n{\"title\":\"Test\"}\r\n--c2b0cb9ac1cf7cc7--\r\n```\r\n\r\nDelimiter is `--c2b0cb9ac1cf7cc7`\r\n\r\nOffset is `0`\r\n\r\nI think the issue is `newOffset = posFirstChar + offset;` should be `newOffset = posFirstChar + newOffset;`", "@jameskleeh You might be right ! Sorry for that...\r\nCould you check the Junit test with this change?", "Same here! 100% CPU on 4.1.55, but same code works fine on 4.1.54! 🤷🏻‍♂️" ]
[]
"2020-12-09T20:16:13Z"
[ "regression" ]
Netty Threads consume 100% of the CPU after update to the latest version
Hello, after the update: netty 4.1.54.Final -> 4.1.55.Final boring ssl 2.0.34.Final -> 2.0.35.Final During one of the test cases with file upload, we found netty epoll threads start consuming 100% of the CPU. Stack trace: ``` "epollEventLoopGroup-3-1" #20 prio=10 os_prio=0 cpu=597400.03ms elapsed=15396.13s tid=0x00007f53c800a000 nid=0x664a0 runnable [0x00007f53c33fc000] java.lang.Thread.State: RUNNABLE at io.netty.buffer.AbstractReferenceCountedByteBuf$1.unsafeOffset(AbstractReferenceCountedByteBuf.java:40) at io.netty.util.internal.ReferenceCountUpdater.isLiveNonVolatile(ReferenceCountUpdater.java:88) at io.netty.buffer.AbstractReferenceCountedByteBuf.isAccessible(AbstractReferenceCountedByteBuf.java:56) at io.netty.buffer.AbstractByteBuf.ensureAccessible(AbstractByteBuf.java:1489) at io.netty.buffer.AbstractByteBuf.checkIndex(AbstractByteBuf.java:1419) at io.netty.buffer.AbstractByteBuf.checkIndex(AbstractByteBuf.java:1415) at io.netty.buffer.AbstractByteBuf.getByte(AbstractByteBuf.java:357) at io.netty.buffer.CompositeByteBuf._getByte(CompositeByteBuf.java:950) at io.netty.buffer.UnpooledSlicedByteBuf._getByte(UnpooledSlicedByteBuf.java:39) at io.netty.buffer.AbstractByteBuf.firstIndexOf(AbstractByteBuf.java:1268) at io.netty.buffer.AbstractByteBuf.indexOf(AbstractByteBuf.java:1254) at io.netty.buffer.AbstractByteBuf.bytesBefore(AbstractByteBuf.java:1306) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findDelimiter(HttpPostMultipartRequestDecoder.java:1129) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.loadDataMultipart(HttpPostMultipartRequestDecoder.java:1171) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:552) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findMultipartDisposition(HttpPostMultipartRequestDecoder.java:795) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:504) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findMultipartDelimiter(HttpPostMultipartRequestDecoder.java:656) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:491) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.parseBodyMultipart(HttpPostMultipartRequestDecoder.java:456) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.parseBody(HttpPostMultipartRequestDecoder.java:425) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:353) "epollEventLoopGroup-3-2" #21 prio=10 os_prio=0 cpu=434998.54ms elapsed=15396.10s tid=0x00007f53c800c800 nid=0x664a1 runnable [0x00007f53c32fb000] java.lang.Thread.State: RUNNABLE at io.netty.buffer.AbstractReferenceCountedByteBuf$1.unsafeOffset(AbstractReferenceCountedByteBuf.java:40) at io.netty.util.internal.ReferenceCountUpdater.isLiveNonVolatile(ReferenceCountUpdater.java:88) at io.netty.buffer.AbstractReferenceCountedByteBuf.isAccessible(AbstractReferenceCountedByteBuf.java:56) at io.netty.buffer.AbstractByteBuf.ensureAccessible(AbstractByteBuf.java:1489) at io.netty.buffer.AbstractByteBuf.checkIndex(AbstractByteBuf.java:1419) at io.netty.buffer.AbstractByteBuf.checkIndex(AbstractByteBuf.java:1415) at io.netty.buffer.AbstractByteBuf.getByte(AbstractByteBuf.java:357) at io.netty.buffer.CompositeByteBuf._getByte(CompositeByteBuf.java:950) at io.netty.buffer.UnpooledSlicedByteBuf._getByte(UnpooledSlicedByteBuf.java:39) at io.netty.buffer.AbstractByteBuf.firstIndexOf(AbstractByteBuf.java:1268) at io.netty.buffer.AbstractByteBuf.indexOf(AbstractByteBuf.java:1254) at io.netty.buffer.AbstractByteBuf.bytesBefore(AbstractByteBuf.java:1306) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findDelimiter(HttpPostMultipartRequestDecoder.java:1129) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.loadDataMultipart(HttpPostMultipartRequestDecoder.java:1171) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:552) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findMultipartDisposition(HttpPostMultipartRequestDecoder.java:795) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:504) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.findMultipartDelimiter(HttpPostMultipartRequestDecoder.java:656) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.decodeMultipart(HttpPostMultipartRequestDecoder.java:491) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.parseBodyMultipart(HttpPostMultipartRequestDecoder.java:456) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.parseBody(HttpPostMultipartRequestDecoder.java:425) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:353) "epollEventLoopGroup-2-1" #19 prio=10 os_prio=0 cpu=461.08ms elapsed=15396.27s tid=0x00007f53fcd00800 nid=0x6649f runnable [0x00007f53e0902000] java.lang.Thread.State: RUNNABLE at io.netty.channel.epoll.Native.epollWait(Native Method) at io.netty.channel.epoll.Native.epollWait(Native.java:148) at io.netty.channel.epoll.Native.epollWait(Native.java:141) at io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:290) at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:347) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(java.base@11.0.9.1/Thread.java:834) "Common-Cleaner" #9 daemon prio=8 os_prio=0 cpu=16.86ms elapsed=15451.77s tid=0x00007f53fc120800 nid=0x66489 in Object.wait() [0x00007f53e3323000] java.lang.Thread.State: TIMED_WAITING (on object monitor) at java.lang.Object.wait(java.base@11.0.9.1/Native Method) - waiting on <no object reference available> at java.lang.ref.ReferenceQueue.remove(java.base@11.0.9.1/ReferenceQueue.java:155) - waiting to re-lock in wait() <0x00000000eb5d8040> (a java.lang.ref.ReferenceQueue$Lock) at jdk.internal.ref.CleanerImpl.run(java.base@11.0.9.1/CleanerImpl.java:148) at java.lang.Thread.run(java.base@11.0.9.1/Thread.java:834) at jdk.internal.misc.InnocuousThread.run(java.base@11.0.9.1/InnocuousThread.java:134) "Finalizer" #3 daemon prio=8 os_prio=0 cpu=4.08ms elapsed=15451.86s tid=0x00007f53fc0c2000 nid=0x66482 in Object.wait() [0x00007f53e3d11000] java.lang.Thread.State: WAITING (on object monitor) at java.lang.Object.wait(java.base@11.0.9.1/Native Method) - waiting on <no object reference available> at java.lang.ref.ReferenceQueue.remove(java.base@11.0.9.1/ReferenceQueue.java:155) - waiting to re-lock in wait() <0x00000000eb5d7850> (a java.lang.ref.ReferenceQueue$Lock) at java.lang.ref.ReferenceQueue.remove(java.base@11.0.9.1/ReferenceQueue.java:176) at java.lang.ref.Finalizer$FinalizerThread.run(java.base@11.0.9.1/Finalizer.java:170) "Reference Handler" #2 daemon prio=10 os_prio=0 cpu=5.52ms elapsed=15451.86s tid=0x00007f53fc0c0000 nid=0x66481 waiting on condition [0x00007f53e3e12000] java.lang.Thread.State: RUNNABLE at java.lang.ref.Reference.waitForReferencePendingList(java.base@11.0.9.1/Native Method) at java.lang.ref.Reference.processPendingReferences(java.base@11.0.9.1/Reference.java:241) at java.lang.ref.Reference$ReferenceHandler.run(java.base@11.0.9.1/Reference.java:213) ``` I'll continue to investigate, however, maybe you have some clues. openjdk version "11.0.9.1" 2020-11-04
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index b125dfae0e7..9e82ad7caf0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -1132,7 +1132,7 @@ private static int findDelimiter(ByteBuf undecodedChunk, String delimiter, int o newOffset = toRead; return -newOffset; } - newOffset = posFirstChar + offset; + newOffset = posFirstChar + newOffset; if (newOffset + delimeterLength > toRead) { return -newOffset; }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 45b8461ce72..acbcf1219d3 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -938,4 +938,42 @@ public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo() assertTrue(req.release()); } } + + @Test + public void testDecodeMultipartRequest() { + byte[] bodyBytes = ("--be38b42a9ad2713f\n" + + "content-disposition: form-data; name=\"title\"\n" + + "content-length: 10\n" + + "content-type: text/plain; charset=UTF-8\n" + + "\n" + + "bar-stream\n" + + "--be38b42a9ad2713f\n" + + "content-disposition: form-data; name=\"data\"; filename=\"data.json\"\n" + + "content-length: 16\n" + + "content-type: application/json; charset=UTF-8\n" + + "\n" + + "{\"title\":\"Test\"}\n" + + "--be38b42a9ad2713f--").getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + req.headers().add("Content-Type", "multipart/form-data;boundary=be38b42a9ad2713f"); + + try { + HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(new DefaultHttpDataFactory(false), req); + InterfaceHttpData data = decoder.getBodyHttpData("title"); + assertTrue(data instanceof MemoryAttribute); + assertEquals("bar-stream", ((MemoryAttribute) data).getString()); + assertTrue(data.release()); + data = decoder.getBodyHttpData("data"); + assertTrue(data instanceof MemoryFileUpload); + assertEquals("{\"title\":\"Test\"}", ((MemoryFileUpload) data).getString()); + assertTrue(data.release()); + decoder.destroy(); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + fail("Was not expecting an exception"); + } finally { + assertTrue(req.release()); + } + } }
train
test
"2020-12-10T14:19:39"
"2020-12-09T14:42:38Z"
doom369
val
netty/netty/10887_10893
netty/netty
netty/netty/10887
netty/netty/10893
[ "keyword_issue_to_pr" ]
4f6e62d50c434509a7c42e1080ad0c6d6795c63d
42145902a72da415c8505042285802c592ceb874
[ "@violetagg I wonder how we can archive this as we need to build on different platforms. Any idea ? ", "@normanmaurer I didn't have such issue till now, but is it possible to build first the artifacts and then to upload them all?\r\n\r\nOn the link below it is described how to deploy many files with different classifiers. Will this help?\r\nhttp://maven.apache.org/plugins/maven-deploy-plugin/examples/deploying-with-classifiers.html\r\n", "@violetagg thats because we didn't deploy these artefacts as snapshot until now... I will investigate. Thanks!", "@violetagg This should be fixed now... https://github.com/netty/netty/pull/10893", "@normanmaurer Thanks a lot. My build is green again :)\r\nHappy holidays!", "> @violetagg这应该现在修复... #10893\r\nCould not find netty-transport-native-epoll-4.1.57.Final-SNAPSHOT-linux-aarch_64.jar (io.netty:netty-transport-native-epoll:4.1.57.Final) \r\nhow to fix it?\r\ni work at gradle and idea when i flush denpendencis \r\nversion: io.netty:netty-all:4.1.77.Final phenomenon:normal before 4.1.68" ]
[]
"2020-12-24T08:09:50Z"
[]
Could not find netty-transport-native-epoll-4.1.57.Final-SNAPSHOT-linux-x86_64.jar
With the recent changes for Netty SNAPSHOTS deployment, the message below is thrown when one tries to build using Netty SNAPSHOTS ``` Could not find netty-transport-native-epoll-4.1.57.Final-SNAPSHOT-linux-x86_64.jar (io.netty:netty-transport-native-epoll:4.1.57.Final-SNAPSHOT:20201223.100457-23). Searched in the following locations: https://oss.sonatype.org/content/repositories/snapshots/io/netty/netty-transport-native-epoll/4.1.57.Final-SNAPSHOT/netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-linux-x86_64.jar ``` The `oss.sonatype.org` currently contains these artifacts. The SNAPSHOTS upload contains either `linux-x86_64` or `linux-aarch_64`: ``` netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-linux-x86_64.jar Wed Dec 23 09:59:13 UTC 2020 152114 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-linux-x86_64.jar.md5 Wed Dec 23 09:59:13 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-linux-x86_64.jar.sha1 Wed Dec 23 09:59:13 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-sources.jar Wed Dec 23 09:59:09 UTC 2020 85770 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-sources.jar.md5 Wed Dec 23 09:59:09 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-sources.jar.sha1 Wed Dec 23 09:59:09 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-test-sources.jar Wed Dec 23 09:59:10 UTC 2020 225891 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-test-sources.jar.md5 Wed Dec 23 09:59:11 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-test-sources.jar.sha1 Wed Dec 23 09:59:10 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-tests.jar Wed Dec 23 09:59:11 UTC 2020 275714 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-tests.jar.md5 Wed Dec 23 09:59:12 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22-tests.jar.sha1 Wed Dec 23 09:59:12 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.jar Wed Dec 23 09:59:06 UTC 2020 122951 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.jar.md5 Wed Dec 23 09:59:07 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.jar.sha1 Wed Dec 23 09:59:07 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.pom Wed Dec 23 09:59:07 UTC 2020 4955 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.pom.md5 Wed Dec 23 09:59:07 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.095906-22.pom.sha1 Wed Dec 23 09:59:07 UTC 2020 40 ``` ``` netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-linux-aarch_64.jar Wed Dec 23 10:05:02 UTC 2020 148585 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-linux-aarch_64.jar.md5 Wed Dec 23 10:05:02 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-linux-aarch_64.jar.sha1 Wed Dec 23 10:05:02 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-sources.jar Wed Dec 23 10:04:59 UTC 2020 85767 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-sources.jar.md5 Wed Dec 23 10:05:00 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-sources.jar.sha1 Wed Dec 23 10:04:59 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-test-sources.jar Wed Dec 23 10:05:00 UTC 2020 222361 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-test-sources.jar.md5 Wed Dec 23 10:05:01 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-test-sources.jar.sha1 Wed Dec 23 10:05:00 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-tests.jar Wed Dec 23 10:05:01 UTC 2020 272184 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-tests.jar.md5 Wed Dec 23 10:05:01 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23-tests.jar.sha1 Wed Dec 23 10:05:01 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.jar Wed Dec 23 10:04:57 UTC 2020 122946 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.jar.md5 Wed Dec 23 10:04:57 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.jar.sha1 Wed Dec 23 10:04:57 UTC 2020 40 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.pom Wed Dec 23 10:04:58 UTC 2020 4959 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.pom.md5 Wed Dec 23 10:04:58 UTC 2020 32 netty-transport-native-epoll-4.1.57.Final-20201223.100457-23.pom.sha1 Wed Dec 23 10:04:58 UTC 2020 40 ``` ### Expected behavior To be able to build using Netty SNAPSHOTS ### Actual behavior Could not find netty-transport-native-epoll-4.1.57.Final-SNAPSHOT-linux-x86_64.jar ### Possible solution Deploy the artifacts with different classifiers with one SNAPSHOT deploy ### Netty version 4.1.57.Final-SNAPSHOT
[ ".github/workflows/ci-deploy.yml", "docker/docker-compose.centos-6.18.yaml", "docker/docker-compose.centos-7.yaml", "docker/docker-compose.yaml" ]
[ ".github/workflows/ci-deploy.yml", "docker/docker-compose.centos-6.18.yaml", "docker/docker-compose.centos-7.yaml", "docker/docker-compose.yaml" ]
[]
diff --git a/.github/workflows/ci-deploy.yml b/.github/workflows/ci-deploy.yml index 7208df32f5c..513dee25a2d 100644 --- a/.github/workflows/ci-deploy.yml +++ b/.github/workflows/ci-deploy.yml @@ -11,69 +11,75 @@ on: workflow_dispatch: jobs: - deploy-linux-x86_64: + stage-snapshot: runs-on: ubuntu-latest - steps: - - uses: s4u/maven-settings-action@v2.2.0 - with: - servers: | - [{ - "id": "sonatype-nexus-snapshots", - "username": "${{ secrets.SONATYPE_USERNAME }}", - "password": "${{ secrets.SONATYPE_PASSWORD }}" - }] + strategy: + matrix: + include: + - setup: linux-x86_64-java8 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.18.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.18.yaml run stage-snapshot" + - setup: linux-aarch64 + docker-compose-build: "-f docker/docker-compose.centos-7.yaml build" + docker-compose-run: "-f docker/docker-compose.centos-7.yaml run cross-compile-aarch64-stage-snapshot" + name: stage-snapshot-${{ matrix.setup }} + steps: - uses: actions/checkout@v2 # Cache .m2/repository - uses: actions/cache@v2 env: - cache-name: deploy-linux-x86_64-cache-m2-repository + cache-name: staging-${{ matrix.setup }}-cache-m2-repository with: path: ~/.m2/repository - key: ${{ runner.os }}-deploy-${{ env.cache-name }}-${{ hashFiles('**/pom.xml') }} + key: ${{ runner.os }}-staging-${{ env.cache-name }}-${{ hashFiles('**/pom.xml') }} restore-keys: | - ${{ runner.os }}-deploy-${{ env.cache-name }}- - ${{ runner.os }}-deploy- + ${{ runner.os }}-staging-${{ env.cache-name }}- + ${{ runner.os }}-staging- # Enable caching of Docker layers - uses: satackey/action-docker-layer-caching@v0.0.8 env: - docker-cache-name: deploy-linux-x86_64-cache-docker + docker-cache-name: staging-${{ matrix.setup }}-cache-docker continue-on-error: true with: - key: ${{ runner.os }}-deploy-${{ env.docker-cache-name }}-{hash} + key: ${{ runner.os }}-staging-${{ env.docker-cache-name }}-{hash} restore-keys: | - ${{ runner.os }}-deploy-${{ env.docker-cache-name }}- + ${{ runner.os }}-staging-${{ env.docker-cache-name }}- + + - name: Create local staging directory + run: mkdir -p ~/local-staging - name: Build docker image - run: docker-compose -f docker/docker-compose.yaml -f docker/docker-compose.centos-6.18.yaml build + run: docker-compose ${{ matrix.docker-compose-build }} - - name: Deploy snapshots - run: docker-compose -f docker/docker-compose.yaml -f docker/docker-compose.centos-6.18.yaml run deploy + - name: Stage snapshots to local staging directory + run: docker-compose ${{ matrix.docker-compose-run }} - deploy-linux-aarch64: - runs-on: ubuntu-latest - # We depend on the deploy of linux-x86_64 so we can download the dependencies - needs: deploy-linux-x86_64 - # Skip for now until we figured out how to deploy SNAPSHOTS with the the same timestamps - if: ${{ false }} - steps: - - uses: s4u/maven-settings-action@v2.2.0 + - name: Upload local staging directory + uses: actions/upload-artifact@v2 with: - servers: | - [{ - "id": "sonatype-nexus-snapshots", - "username": "${{ secrets.SONATYPE_USERNAME }}", - "password": "${{ secrets.SONATYPE_PASSWORD }}" - }] + name: ${{ matrix.setup }}-local-staging + path: ~/local-staging + if-no-files-found: error + deploy-staged-snapshots: + runs-on: ubuntu-18.04 + # Wait until we have staged everything + needs: stage-snapshot + steps: - uses: actions/checkout@v2 + - name: Set up JDK 8 + uses: actions/setup-java@v1 + with: + java-version: 8 + # Cache .m2/repository - uses: actions/cache@v2 env: - cache-name: deploy-linux-aarch64-cache-m2-repository + cache-name: deploy-staging-cache-m2-repository with: path: ~/.m2/repository key: ${{ runner.os }}-deploy-${{ env.cache-name }}-${{ hashFiles('**/pom.xml') }} @@ -81,18 +87,41 @@ jobs: ${{ runner.os }}-deploy-${{ env.cache-name }}- ${{ runner.os }}-deploy- - # Enable caching of Docker layers - - uses: satackey/action-docker-layer-caching@v0.0.8 - env: - docker-cache-name: deploy-linux-aarch64-cache-docker - continue-on-error: true + # Setup some env to re-use later. + - name: Prepare enviroment variables + run: | + echo "LOCAL_STAGING_DIR=$HOME/local-staging" >> $GITHUB_ENV + + # Hardcode the staging artifacts that need to be downloaded. + # These must match the matrix setups. There is currently no way to pull this out of the config. + - name: Download linux-aarch64 staging directory + uses: actions/download-artifact@v2 with: - key: ${{ runner.os }}-deploy-${{ env.docker-cache-name }}-{hash} - restore-keys: | - ${{ runner.os }}-deploy-${{ env.docker-cache-name }}- + name: linux-aarch64-local-staging + path: ~/linux-aarch64-local-staging - - name: Build docker image - run: docker-compose -f docker/docker-compose.centos-7.yaml build + - name: Download linux-x86_64-java8 staging directory + uses: actions/download-artifact@v2 + with: + name: linux-x86_64-java8-local-staging + path: ~/linux-x86_64-java8-local-staging + + - name: Merge staging repositories + run: | + mkdir -p ~/local-staging/deferred + cat ~/linux-aarch64-local-staging/deferred/.index >> ~/local-staging/deferred/.index + cp -r ~/linux-aarch64-local-staging/deferred/* ~/local-staging/deferred/ + cat ~/linux-x86_64-java8-local-staging/deferred/.index >> ~/local-staging/deferred/.index + cp -r ~/linux-x86_64-java8-local-staging/deferred/* ~/local-staging/deferred/ + + - uses: s4u/maven-settings-action@v2.2.0 + with: + servers: | + [{ + "id": "sonatype-nexus-snapshots", + "username": "${{ secrets.SONATYPE_USERNAME }}", + "password": "${{ secrets.SONATYPE_PASSWORD }}" + }] - - name: Deploy snapshots - run: docker-compose -f docker/docker-compose.centos-7.yaml run cross-compile-aarch64-deploy + - name: Deploy local staged artifacts + run: mvn -B --file pom.xml org.sonatype.plugins:nexus-staging-maven-plugin:deploy-staged -DaltStagingDirectory=$LOCAL_STAGING_DIR \ No newline at end of file diff --git a/docker/docker-compose.centos-6.18.yaml b/docker/docker-compose.centos-6.18.yaml index ff1aa5154c9..e522f6348b4 100644 --- a/docker/docker-compose.centos-6.18.yaml +++ b/docker/docker-compose.centos-6.18.yaml @@ -20,6 +20,9 @@ services: build-leak-boringssl-static: image: netty:centos-6-1.8 + stage-snapshot: + image: netty:centos-6-1.8 + deploy: image: netty:centos-6-1.8 diff --git a/docker/docker-compose.centos-7.yaml b/docker/docker-compose.centos-7.yaml index f4d99054937..5b2d324ea4e 100644 --- a/docker/docker-compose.centos-7.yaml +++ b/docker/docker-compose.centos-7.yaml @@ -24,7 +24,17 @@ services: cross-compile-aarch64-deploy: <<: *cross-compile-aarch64-common - command: /bin/bash -cl "./mvnw -pl transport-native-unix-common,transport-native-epoll clean deploy -Plinux-aarch64 -DskipTests=true" + command: /bin/bash -cl "./mvnw -Plinux-aarch64 -pl transport-native-unix-common,transport-native-epoll clean deploy -DskipTests=true" + + cross-compile-aarch64-stage-snapshot: + <<: *cross-compile-aarch64-common + volumes: + - ~/.ssh:/root/.ssh + - ~/.gnupg:/root/.gnupg + - ~/.m2:/root/.m2 + - ~/local-staging:/root/local-staging + - ..:/code + command: /bin/bash -cl "./mvnw -Plinux-aarch64 -pl transport-native-unix-common,transport-native-epoll clean package org.sonatype.plugins:nexus-staging-maven-plugin:deploy -DaltStagingDirectory=/root/local-staging -DskipRemoteStaging=true -DskipTests=true" cross-compile-aarch64-shell: <<: *cross-compile-aarch64-common diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index e2dc92fc105..e516a26bc44 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -30,6 +30,16 @@ services: <<: *common command: /bin/bash -cl "./mvnw clean deploy -DskipTests=true" + stage-snapshot: + <<: *common + volumes: + - ~/.ssh:/root/.ssh + - ~/.gnupg:/root/.gnupg + - ~/.m2:/root/.m2 + - ~/local-staging:/root/local-staging + - ..:/code + command: /bin/bash -cl "./mvnw clean package org.sonatype.plugins:nexus-staging-maven-plugin:deploy -DaltStagingDirectory=/root/local-staging -DskipRemoteStaging=true -DskipTests=true" + build-boringssl-static: <<: *common command: /bin/bash -cl "./mvnw -P boringssl clean install -Dio.netty.testsuite.badHost=netty.io -Dxml.skip=true"
null
test
test
"2020-12-24T11:37:42"
"2020-12-23T13:22:52Z"
violetagg
val
netty/netty/10929_10930
netty/netty
netty/netty/10929
netty/netty/10930
[ "keyword_pr_to_issue" ]
342f52e8130085648b676bfb86973310ebe6d03d
f54bf7aeb06c6a4cf9350482f7814c3fb9c4fd82
[]
[ "I think we need to keep the old as well as otherwise it will be a breaking change. ", "I think we need to keep the old method as well as otherwise this would be a breaking change", "I think we need to keep the old method as well as otherwise this would be a breaking change" ]
"2021-01-13T15:56:04Z"
[]
mqtt-codec: negative packet-ids
### Behavior It looks like it's not possible to create an mqtt message (with packetId) through the provided _builder classes_ in `io.netty.handler.codec.mqtt.MqttMessageBuilders` The reason why this is happening is because the internal representation of the packetId (in `PubAckBuilder`, `SubAckBuilder` and `UnsubAckBuilder`) is a `short`, not an `int`. So this means that whatever value >32767 is converted to negative when entering the builder classes, and remains a negative `int` in the underlying `MqttMessageIdVariableHeader` of the message. ### Steps to reproduce Just try to create an mqtt SubAck message using `io.netty.handler.codec.mqtt.SubAckBuilder` with `.packetId(36000)` It will throw an exception saying the packetId needs to be between 1 and 65535. ### Minimal yet complete reproducer code (or URL to code) https://github.com/netty/netty/pull/10930 ### Netty version 4.1.58.Final ### JVM version (e.g. `java -version`) ``` openjdk version "1.8.0_275" OpenJDK Runtime Environment (build 1.8.0_275-8u275-b01-0ubuntu1~20.04-b01) OpenJDK 64-Bit Server VM (build 25.275-b01, mixed mode) ``` ### OS version (e.g. `uname -a`) ``` Linux 5.8.0-36-generic #40~20.04.1-Ubuntu SMP x86_64 x86_64 x86_64 GNU/Linux ```
[ "codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java" ]
[ "codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java" ]
[ "codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttMessageBuildersPacketIdTest.java" ]
diff --git a/codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java b/codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java index 8c854f0bf07..a2eea437740 100644 --- a/codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java +++ b/codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttMessageBuilders.java @@ -524,7 +524,7 @@ public ConnAckPropertiesBuilder authenticationData(byte[] rawData) { public static final class PubAckBuilder { - private short packetId; + private int packetId; private byte reasonCode; private MqttProperties properties; @@ -536,11 +536,19 @@ public PubAckBuilder reasonCode(byte reasonCode) { return this; } - public PubAckBuilder packetId(short packetId) { + public PubAckBuilder packetId(int packetId) { this.packetId = packetId; return this; } + /** + * @deprecated use {@link PubAckBuilder#packetId(int)} instead + */ + @Deprecated + public PubAckBuilder packetId(short packetId) { + return packetId(packetId & 0xFFFF); + } + public PubAckBuilder properties(MqttProperties properties) { this.properties = properties; return this; @@ -557,18 +565,26 @@ public MqttMessage build() { public static final class SubAckBuilder { - private short packetId; + private int packetId; private MqttProperties properties; private final List<MqttQoS> grantedQoses = new ArrayList<MqttQoS>(); SubAckBuilder() { } - public SubAckBuilder packetId(short packetId) { + public SubAckBuilder packetId(int packetId) { this.packetId = packetId; return this; } + /** + * @deprecated use {@link SubAckBuilder#packetId(int)} instead + */ + @Deprecated + public SubAckBuilder packetId(short packetId) { + return packetId(packetId & 0xFFFF); + } + public SubAckBuilder properties(MqttProperties properties) { this.properties = properties; return this; @@ -604,18 +620,26 @@ public MqttSubAckMessage build() { public static final class UnsubAckBuilder { - private short packetId; + private int packetId; private MqttProperties properties; private final List<Short> reasonCodes = new ArrayList<Short>(); UnsubAckBuilder() { } - public UnsubAckBuilder packetId(short packetId) { + public UnsubAckBuilder packetId(int packetId) { this.packetId = packetId; return this; } + /** + * @deprecated use {@link UnsubAckBuilder#packetId(int)} instead + */ + @Deprecated + public UnsubAckBuilder packetId(short packetId) { + return packetId(packetId & 0xFFFF); + } + public UnsubAckBuilder properties(MqttProperties properties) { this.properties = properties; return this;
diff --git a/codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttMessageBuildersPacketIdTest.java b/codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttMessageBuildersPacketIdTest.java new file mode 100644 index 00000000000..c63e771f000 --- /dev/null +++ b/codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttMessageBuildersPacketIdTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.mqtt; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; + +import static org.junit.Assert.*; + +@RunWith(value = Parameterized.class) +public class MqttMessageBuildersPacketIdTest { + @Parameterized.Parameter + public Integer id; + + @Parameterized.Parameters(name = "{index}: {0}") + public static Iterable<Integer> data() { + // we take a subset of valid packetIds + return Arrays.asList( + 0x0001, + 0x000F, + 0x00FF, + 0x0FFF, + 0xFFFF + ); + } + + @Test + public void testUnsubAckMessageIdAsShort() { + final MqttUnsubAckMessage msg = MqttMessageBuilders.unsubAck() + .packetId(id.shortValue()) + .build(); + + assertEquals( + id.intValue(), + msg.variableHeader().messageId() + ); + } + + @Test + public void testSubAckMessageIdAsShort() { + final MqttSubAckMessage msg = MqttMessageBuilders.subAck() + .packetId(id.shortValue()) + .build(); + + assertEquals( + id.intValue(), + msg.variableHeader().messageId() + ); + } + + @Test + public void testPubAckMessageIdAsShort() { + final MqttMessage msg = MqttMessageBuilders.pubAck() + .packetId(id.shortValue()) + .build(); + + assertEquals( + id.intValue(), + ((MqttMessageIdVariableHeader) msg.variableHeader()).messageId() + ); + } + + @Test + public void testUnsubAckMessageIdAsInt() { + final MqttUnsubAckMessage msg = MqttMessageBuilders.unsubAck() + .packetId(id) + .build(); + + assertEquals( + id.intValue(), + msg.variableHeader().messageId() + ); + } + + @Test + public void testSubAckMessageIdAsInt() { + final MqttSubAckMessage msg = MqttMessageBuilders.subAck() + .packetId(id) + .build(); + + assertEquals( + id.intValue(), + msg.variableHeader().messageId() + ); + } + + @Test + public void testPubAckMessageIdAsInt() { + final MqttMessage msg = MqttMessageBuilders.pubAck() + .packetId(id) + .build(); + + assertEquals( + id.intValue(), + ((MqttMessageIdVariableHeader) msg.variableHeader()).messageId() + ); + } +}
train
test
"2021-01-15T16:47:49"
"2021-01-13T15:14:06Z"
duke-bartholomew
val
netty/netty/10925_10935
netty/netty
netty/netty/10925
netty/netty/10935
[ "keyword_pr_to_issue" ]
a137ce204264e0b7411c5aa853e4459291e75b36
5b699b722c25a3c34656fc8bbb8e36bee27a5244
[ "@violetagg can you provide a PR ?", "@normanmaurer I was thinking for allowing blocking calls when loading `etcResolverFiles`, is that what you are thinking also?", "This sounds good " ]
[ "you need to also shutdown the executor ", "oops ... done" ]
"2021-01-14T10:07:09Z"
[]
Blocking call in UnixResolverDnsServerAddressStreamProvider.parse
### Expected behavior No blocking calls reported by BlockHound ### Actual behavior The exception below appears when BlockHound is installed ``` reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.io.FileInputStream.readBytes(FileInputStream.java) at java.io.FileInputStream.read(FileInputStream.java:255) at sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:284) at sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:326) at sun.nio.cs.StreamDecoder.read(StreamDecoder.java:178) at java.io.InputStreamReader.read(InputStreamReader.java:184) at java.io.BufferedReader.fill(BufferedReader.java:161) at java.io.BufferedReader.readLine(BufferedReader.java:324) at java.io.BufferedReader.readLine(BufferedReader.java:389) at io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.parse(UnixResolverDnsServerAddressStreamProvider.java:175) at io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.<init>(UnixResolverDnsServerAddressStreamProvider.java:98) at io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.<init>(UnixResolverDnsServerAddressStreamProvider.java:133) at io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider.parseSilently(UnixResolverDnsServerAddressStreamProvider.java:72) at io.netty.resolver.dns.DnsServerAddressStreamProviders$DefaultProviderHolder$1.provider(DnsServerAddressStreamProviders.java:139) at io.netty.resolver.dns.DnsServerAddressStreamProviders$DefaultProviderHolder$1.nameServerAddressStream(DnsServerAddressStreamProviders.java:129) at io.netty.resolver.dns.DnsNameResolver.doResolveAllUncached0(DnsNameResolver.java:1070) at io.netty.resolver.dns.DnsNameResolver.access$600(DnsNameResolver.java:90) at io.netty.resolver.dns.DnsNameResolver$6.run(DnsNameResolver.java:1053) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:500) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.lang.Thread.run(Thread.java:748) ``` ### Netty version 4.1.57.Final ### JVM version (e.g. `java -version`) JDK 8 ### OS version (e.g. `uname -a`) MacOS
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "transport-blockhound-tests/pom.xml" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "transport-blockhound-tests/pom.xml" ]
[ "transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index ac97323573e..18a03c68ead 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -115,6 +115,10 @@ public void applyTo(BlockHound.Builder builder) { "sun.security.ssl.SSLEngineImpl", "wrap"); + builder.allowBlockingCallsInside( + "io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider", + "parse"); + builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() { @Override public Predicate<Thread> apply(final Predicate<Thread> p) { diff --git a/transport-blockhound-tests/pom.xml b/transport-blockhound-tests/pom.xml index 81240d329d9..77c36f40e90 100644 --- a/transport-blockhound-tests/pom.xml +++ b/transport-blockhound-tests/pom.xml @@ -89,6 +89,11 @@ <artifactId>netty-handler</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>${project.groupId}</groupId> + <artifactId>netty-resolver-dns</artifactId> + <version>${project.version}</version> + </dependency> <dependency> <groupId>${project.groupId}</groupId> <artifactId>${tcnative.artifactId}</artifactId>
diff --git a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java index ade2def4703..2d5121a0781 100644 --- a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java +++ b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java @@ -35,6 +35,8 @@ import io.netty.handler.ssl.SslProvider; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.netty.resolver.dns.DnsServerAddressStreamProvider; +import io.netty.resolver.dns.DnsServerAddressStreamProviders; import io.netty.util.HashedWheelTimer; import io.netty.util.ReferenceCountUtil; import io.netty.util.concurrent.DefaultThreadFactory; @@ -53,6 +55,8 @@ import reactor.blockhound.integration.BlockHoundIntegration; import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import java.util.Queue; import java.util.ServiceLoader; import java.util.concurrent.CountDownLatch; @@ -69,6 +73,7 @@ import static io.netty.buffer.Unpooled.wrappedBuffer; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -314,6 +319,40 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { } } + @Test(timeout = 5000L) + public void testParseEtcResolverFilesAllowsBlockingCalls() throws InterruptedException { + SingleThreadEventExecutor executor = + new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) { + @Override + protected void run() { + while (!confirmShutdown()) { + Runnable task = takeTask(); + if (task != null) { + task.run(); + } + } + } + }; + try { + CountDownLatch latch = new CountDownLatch(1); + List<DnsServerAddressStreamProvider> result = new ArrayList<>(); + List<Throwable> error = new ArrayList<>(); + executor.execute(() -> { + try { + result.add(DnsServerAddressStreamProviders.unixDefault()); + } catch (Throwable t) { + error.add(t); + } + latch.countDown(); + }); + latch.await(); + assertEquals(0, error.size()); + assertEquals(1, result.size()); + } finally { + executor.shutdownGracefully(); + } + } + private static void testTrustManagerVerify(String tlsVersion) throws Exception { final SslContext sslClientCtx = SslContextBuilder.forClient()
train
test
"2021-01-13T11:28:54"
"2021-01-12T17:48:36Z"
violetagg
val
netty/netty/10914_10942
netty/netty
netty/netty/10914
netty/netty/10942
[ "keyword_pr_to_issue" ]
5b699b722c25a3c34656fc8bbb8e36bee27a5244
bba0017d6e9f0fcf87284fd5618c153f45d8a9da
[ "What's wrong about it?", "` /**\r\n * Sets the value to uninitialized; a proceeding call to get() will trigger a call to initialValue().\r\n */\r\n public final void remove() {\r\n remove(InternalThreadLocalMap.getIfSet());\r\n }`\r\n\r\n@chrisvest,\r\nThe remove() method call to getIfSet() not call to get().\r\n", "What it's meant to say is that after you've called `remove()`, if you then call `get()`, you'll get a new `initialValue()` out.", "@chrisvest,\r\nI got it. I think to more meant should change the word `proceeding` by other words.\r\n" ]
[]
"2021-01-15T10:17:58Z"
[]
Wrong the comment for the remove method on FastThreadLocal class.
https://github.com/netty/netty/blob/0dc246eb129796313b58c1dbdd674aa289f72cad/common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java#L234
[ "common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java" ]
[ "common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java b/common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java index 9b31dd4504e..750b12d8b49 100644 --- a/common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java +++ b/common/src/main/java/io/netty/util/concurrent/FastThreadLocal.java @@ -208,7 +208,7 @@ public final void set(InternalThreadLocalMap threadLocalMap, V value) { } /** - * @return see {@link InternalThreadLocalMap#setIndexedVariable(int, Object)}. + * @see InternalThreadLocalMap#setIndexedVariable(int, Object). */ private void setKnownNotUnset(InternalThreadLocalMap threadLocalMap, V value) { if (threadLocalMap.setIndexedVariable(index, value)) { @@ -231,15 +231,16 @@ public final boolean isSet(InternalThreadLocalMap threadLocalMap) { return threadLocalMap != null && threadLocalMap.isIndexedVariableSet(index); } /** - * Sets the value to uninitialized; a proceeding call to get() will trigger a call to initialValue(). + * Sets the value to uninitialized for the specified thread local map. + * After this, any subsequent call to get() will trigger a new call to initialValue(). */ public final void remove() { remove(InternalThreadLocalMap.getIfSet()); } /** - * Sets the value to uninitialized for the specified thread local map; - * a proceeding call to get() will trigger a call to initialValue(). + * Sets the value to uninitialized for the specified thread local map. + * After this, any subsequent call to get() will trigger a new call to initialValue(). * The specified thread local map must be for the current thread. */ @SuppressWarnings("unchecked")
null
test
test
"2021-01-14T18:27:12"
"2021-01-08T06:30:26Z"
andrewvo148
val
netty/netty/10792_10951
netty/netty
netty/netty/10792
netty/netty/10951
[ "keyword_pr_to_issue" ]
41e79835f20719ca368b23b7ebbbbc96e6b6b666
4fbbcf8702a98f4477cb0c733eab736eca988979
[ "Will provide a PR, probably next week.", "@slandelle any update ?", "@normanmaurer PTAL #10951" ]
[ "Does this need to be a `LinkedHashSet` ?", "is the default size of 64 good ?", "Parameters order is not supposed to be significant. Moreover, `extractExtensions` (in the same class) uses `HashMap` for the parsing side, so using one here too is consistent.", "The size greatly varies depending on the parameters. The real world example (client side deflate) is have in the tests is 149 chars with 4 extensions ranging from 15 to 70 chars.\r\nDo you have any suggestion? Hardcode 150?", "yep +1 ", "done" ]
"2021-01-19T16:59:27Z"
[]
WebSocketClientExtensionHandler doesn't check if extensions are already present in sec-websocket-extensions header
### Expected behavior Netty verifies that extensions are not already present in sec-websocket-extensions header before appending them. ### Actual behavior Netty doesn't check, possibly resulting in duplicated values. ### Steps to reproduce Just read existing code: https://github.com/netty/netty/blob/netty-4.1.54.Final/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java#L65-L73 ### Netty version 4.1.54 ### JVM version (e.g. `java -version`) Irrelevant ### OS version (e.g. `uname -a`) Irrelevant
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java index 165c1cdfc88..47073a7b234 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java @@ -63,14 +63,15 @@ public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise pr if (msg instanceof HttpRequest && WebSocketExtensionUtil.isWebsocketUpgrade(((HttpRequest) msg).headers())) { HttpRequest request = (HttpRequest) msg; String headerValue = request.headers().getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - + List<WebSocketExtensionData> extraExtensions = + new ArrayList<WebSocketExtensionData>(extensionHandshakers.size()); for (WebSocketClientExtensionHandshaker extensionHandshaker : extensionHandshakers) { - WebSocketExtensionData extensionData = extensionHandshaker.newRequestData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), extensionData.parameters()); + extraExtensions.add(extensionHandshaker.newRequestData()); } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); - request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); + request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); } super.write(ctx, msg, promise); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java index 567e49e6bf3..01f1c0036c1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java @@ -72,25 +72,53 @@ public static List<WebSocketExtensionData> extractExtensions(String extensionHea } } - static String appendExtension(String currentHeaderValue, String extensionName, - Map<String, String> extensionParameters) { + static String computeMergeExtensionsHeaderValue(String userDefinedHeaderValue, + List<WebSocketExtensionData> extraExtensions) { + List<WebSocketExtensionData> userDefinedExtensions = + userDefinedHeaderValue != null ? + extractExtensions(userDefinedHeaderValue) : + Collections.<WebSocketExtensionData>emptyList(); - StringBuilder newHeaderValue = new StringBuilder( - currentHeaderValue != null ? currentHeaderValue.length() : extensionName.length() + 1); - if (currentHeaderValue != null && !currentHeaderValue.trim().isEmpty()) { - newHeaderValue.append(currentHeaderValue); - newHeaderValue.append(EXTENSION_SEPARATOR); + for (WebSocketExtensionData userDefined: userDefinedExtensions) { + WebSocketExtensionData matchingExtra = null; + int i; + for (i = 0; i < extraExtensions.size(); i ++) { + WebSocketExtensionData extra = extraExtensions.get(i); + if (extra.name().equals(userDefined.name())) { + matchingExtra = extra; + break; + } + } + if (matchingExtra == null) { + extraExtensions.add(userDefined); + } else { + // merge with higher precedence to user defined parameters + Map<String, String> mergedParameters = new HashMap<String, String>(matchingExtra.parameters()); + mergedParameters.putAll(userDefined.parameters()); + extraExtensions.set(i, new WebSocketExtensionData(matchingExtra.name(), mergedParameters)); + } } - newHeaderValue.append(extensionName); - for (Entry<String, String> extensionParameter : extensionParameters.entrySet()) { - newHeaderValue.append(PARAMETER_SEPARATOR); - newHeaderValue.append(extensionParameter.getKey()); - if (extensionParameter.getValue() != null) { - newHeaderValue.append(PARAMETER_EQUAL); - newHeaderValue.append(extensionParameter.getValue()); + + StringBuilder sb = new StringBuilder(150); + + for (WebSocketExtensionData data: extraExtensions) { + sb.append(data.name()); + for (Entry<String, String> parameter : data.parameters().entrySet()) { + sb.append(PARAMETER_SEPARATOR); + sb.append(parameter.getKey()); + if (parameter.getValue() != null) { + sb.append(PARAMETER_EQUAL); + sb.append(parameter.getValue()); + } } + sb.append(EXTENSION_SEPARATOR); + } + + if (!extraExtensions.isEmpty()) { + sb.setLength(sb.length() - EXTENSION_SEPARATOR.length()); } - return newHeaderValue.toString(); + + return sb.toString(); } private WebSocketExtensionUtil() { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java index 9e45bcfa630..e00090bb79d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java @@ -124,13 +124,13 @@ private void handlePotentialUpgrade(final ChannelHandlerContext ctx, if (validExtensions != null) { String headerValue = headers.getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - + List<WebSocketExtensionData> extraExtensions = + new ArrayList<WebSocketExtensionData>(extensionHandshakers.size()); for (WebSocketServerExtension extension : validExtensions) { - WebSocketExtensionData extensionData = extension.newReponseData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), - extensionData.parameters()); + extraExtensions.add(extension.newReponseData()); } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); promise.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { @@ -148,7 +148,7 @@ public void operationComplete(ChannelFuture future) { }); if (headerValue != null) { - headers.set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); + headers.set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); } }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java index 12aa06ed660..eab3b3ba46c 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java @@ -21,19 +21,63 @@ import io.netty.handler.codec.http.HttpHeaders; import org.junit.Test; +import java.util.List; + import static org.junit.Assert.*; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionUtil.*; public class WebSocketExtensionUtilTest { @Test public void testIsWebsocketUpgrade() { HttpHeaders headers = new DefaultHttpHeaders(); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.CONNECTION, "Keep-Alive, Upgrade"); - assertTrue(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertTrue(isWebsocketUpgrade(headers)); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue(null, extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoConflictingUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("foo, bar", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame," + + "foo," + + "bar", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenConflictingUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("permessage-deflate; client_max_window_bits", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); } }
test
test
"2021-01-21T11:12:54"
"2020-11-11T13:25:27Z"
slandelle
val
netty/netty/10792_10956
netty/netty
netty/netty/10792
netty/netty/10956
[ "keyword_pr_to_issue" ]
f54bf7aeb06c6a4cf9350482f7814c3fb9c4fd82
ccd01934f5a30c62f352fe888d45dbf7ad2352c8
[ "Will provide a PR, probably next week.", "@slandelle any update ?", "@normanmaurer PTAL #10951" ]
[ "@normanmaurer This was missing in the previous PR." ]
"2021-01-21T13:45:52Z"
[]
WebSocketClientExtensionHandler doesn't check if extensions are already present in sec-websocket-extensions header
### Expected behavior Netty verifies that extensions are not already present in sec-websocket-extensions header before appending them. ### Actual behavior Netty doesn't check, possibly resulting in duplicated values. ### Steps to reproduce Just read existing code: https://github.com/netty/netty/blob/netty-4.1.54.Final/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java#L65-L73 ### Netty version 4.1.54 ### JVM version (e.g. `java -version`) Irrelevant ### OS version (e.g. `uname -a`) Irrelevant
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java index 165c1cdfc88..47073a7b234 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java @@ -63,14 +63,15 @@ public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise pr if (msg instanceof HttpRequest && WebSocketExtensionUtil.isWebsocketUpgrade(((HttpRequest) msg).headers())) { HttpRequest request = (HttpRequest) msg; String headerValue = request.headers().getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - + List<WebSocketExtensionData> extraExtensions = + new ArrayList<WebSocketExtensionData>(extensionHandshakers.size()); for (WebSocketClientExtensionHandshaker extensionHandshaker : extensionHandshakers) { - WebSocketExtensionData extensionData = extensionHandshaker.newRequestData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), extensionData.parameters()); + extraExtensions.add(extensionHandshaker.newRequestData()); } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); - request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); + request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); } super.write(ctx, msg, promise); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java index 567e49e6bf3..01f1c0036c1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java @@ -72,25 +72,53 @@ public static List<WebSocketExtensionData> extractExtensions(String extensionHea } } - static String appendExtension(String currentHeaderValue, String extensionName, - Map<String, String> extensionParameters) { + static String computeMergeExtensionsHeaderValue(String userDefinedHeaderValue, + List<WebSocketExtensionData> extraExtensions) { + List<WebSocketExtensionData> userDefinedExtensions = + userDefinedHeaderValue != null ? + extractExtensions(userDefinedHeaderValue) : + Collections.<WebSocketExtensionData>emptyList(); - StringBuilder newHeaderValue = new StringBuilder( - currentHeaderValue != null ? currentHeaderValue.length() : extensionName.length() + 1); - if (currentHeaderValue != null && !currentHeaderValue.trim().isEmpty()) { - newHeaderValue.append(currentHeaderValue); - newHeaderValue.append(EXTENSION_SEPARATOR); + for (WebSocketExtensionData userDefined: userDefinedExtensions) { + WebSocketExtensionData matchingExtra = null; + int i; + for (i = 0; i < extraExtensions.size(); i ++) { + WebSocketExtensionData extra = extraExtensions.get(i); + if (extra.name().equals(userDefined.name())) { + matchingExtra = extra; + break; + } + } + if (matchingExtra == null) { + extraExtensions.add(userDefined); + } else { + // merge with higher precedence to user defined parameters + Map<String, String> mergedParameters = new HashMap<String, String>(matchingExtra.parameters()); + mergedParameters.putAll(userDefined.parameters()); + extraExtensions.set(i, new WebSocketExtensionData(matchingExtra.name(), mergedParameters)); + } } - newHeaderValue.append(extensionName); - for (Entry<String, String> extensionParameter : extensionParameters.entrySet()) { - newHeaderValue.append(PARAMETER_SEPARATOR); - newHeaderValue.append(extensionParameter.getKey()); - if (extensionParameter.getValue() != null) { - newHeaderValue.append(PARAMETER_EQUAL); - newHeaderValue.append(extensionParameter.getValue()); + + StringBuilder sb = new StringBuilder(150); + + for (WebSocketExtensionData data: extraExtensions) { + sb.append(data.name()); + for (Entry<String, String> parameter : data.parameters().entrySet()) { + sb.append(PARAMETER_SEPARATOR); + sb.append(parameter.getKey()); + if (parameter.getValue() != null) { + sb.append(PARAMETER_EQUAL); + sb.append(parameter.getValue()); + } } + sb.append(EXTENSION_SEPARATOR); + } + + if (!extraExtensions.isEmpty()) { + sb.setLength(sb.length() - EXTENSION_SEPARATOR.length()); } - return newHeaderValue.toString(); + + return sb.toString(); } private WebSocketExtensionUtil() { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java index 9e45bcfa630..2013cafd698 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java @@ -124,13 +124,13 @@ private void handlePotentialUpgrade(final ChannelHandlerContext ctx, if (validExtensions != null) { String headerValue = headers.getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - + List<WebSocketExtensionData> extraExtensions = + new ArrayList<WebSocketExtensionData>(extensionHandshakers.size()); for (WebSocketServerExtension extension : validExtensions) { - WebSocketExtensionData extensionData = extension.newReponseData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), - extensionData.parameters()); + extraExtensions.add(extension.newReponseData()); } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); promise.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { @@ -147,8 +147,8 @@ public void operationComplete(ChannelFuture future) { } }); - if (headerValue != null) { - headers.set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); + if (newHeaderValue != null) { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); } }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java index 12aa06ed660..eab3b3ba46c 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java @@ -21,19 +21,63 @@ import io.netty.handler.codec.http.HttpHeaders; import org.junit.Test; +import java.util.List; + import static org.junit.Assert.*; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionUtil.*; public class WebSocketExtensionUtilTest { @Test public void testIsWebsocketUpgrade() { HttpHeaders headers = new DefaultHttpHeaders(); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.CONNECTION, "Keep-Alive, Upgrade"); - assertTrue(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertTrue(isWebsocketUpgrade(headers)); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue(null, extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoConflictingUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("foo, bar", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame," + + "foo," + + "bar", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenConflictingUserDefinedHeader() { + List<WebSocketExtensionData> extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("permessage-deflate; client_max_window_bits", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); } }
val
test
"2021-01-21T14:40:45"
"2020-11-11T13:25:27Z"
slandelle
val
netty/netty/10973_11001
netty/netty
netty/netty/10973
netty/netty/11001
[ "keyword_issue_to_pr" ]
a98293f08403961db8bf3eb8ba66a21cb86ecf08
1529ef1794e0a6654fa4334fd979b769d6940e61
[ "@fredericBregier It could be related to https://github.com/netty/netty/pull/10623 given that right now file isn't written until a delimiter is found?", "I think so too... I think we should never the commit in question for now as while it fixes some \"perf issues\" when running in with paranoid leak detection I don't think it has any other benefits in real world use-cases. @fredericBregier WDYT ?", "Hi, the issue is partially related but yet an issue. \r\n- the file is totally in the buffer while reading: previously it could be in memory but by chunk\r\n- once the file is over, if the \"disk\" based HttpData is used, the buffer is written to a temprary file, therefore leaving the memory free\r\n- Note that another bug was fixed in the same time since before buffers were free (`discardReadBytes`) wrongly\r\n\r\nI agree that this should be changed to adapt the solution with the new way to find the delimiter. Perhaps this?\r\nCurrently:\r\n- The content is added to the HttpData along slices (see https://github.com/netty/netty/blob/5c522916f796aef7a9bf7a7b234bfb81232b52d0/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1191 ).\r\n- But only once all slices are found (https://github.com/netty/netty/blob/5c522916f796aef7a9bf7a7b234bfb81232b52d0/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1171)\r\n\r\nIf it is written (in Disk mode or Mixed mode and if size is greater than limit), the buffer might be cleared (free), so one could try to change this such as:\r\n- each time a slice is found (even if the delimiter is not found), one could add the content to the HttpData, but carefully take into consideration that perhaps not all bufer shall be taken (the end of the buffer could be in the middle of the delimiter, to not take into account)\r\n- therefore, try to release the memory (careful, since if Memory based, must not be released at all or bad content will occur) as in https://github.com/netty/netty/blob/5c522916f796aef7a9bf7a7b234bfb81232b52d0/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L348 https://github.com/netty/netty/blob/5c522916f796aef7a9bf7a7b234bfb81232b52d0/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L349) (we cannot simply reset the writerIndex and readerIndex to the beginning for this HttpData since the HttpData could be in Memory only)\r\n\r\nNot sure it will work or easy to implement, but I think the idea is there...\r\n", "Hi all, I propose a fix for this (adding a test inspired from the one given, testing both in Memory and on Disk behaviors)", "Thanks to both of you. The new version is working well now.\r\n\r\nThe performance degradation with paranoid detection is very noticeable (e.g. a test with a lot of data moving was taking 1 second in `4.1.58` takes 30 seconds in `4.1.59`). For a while I thought there was a performance issue in the new version before I remembered what Frederic said about leak detection performance.\r\n\r\nSo if anyone else is experiencing this and wondering what is going on, try disabling PARANOID for the slow tests.", "@danielflower or maybe wait for #11001 which fixes also this PARANOID issue, and improves without this level also performances (about 4 times)... ;-)", "The #11001 PR is merged now." ]
[ "Calling this method `findLineBreak` would read better with the capitalisation.\r\n\r\nI think calling it \"line break\" also makes sense for the parameters and variables in `findDelimiter`.", "```suggestion\r\n * Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed\r\n```", "As for how this algorithm is implemented… check out the tools in `AbstractSearchProcessorFactory`. You could use them to search for the delimiter directly, then check if they are preceded by a line break or not. That should be less code to write, and probably be faster too.", "Should we check if we really get an LF here?", "Do we need the `while (undecodedChunk.isReadable())`, or can it be an `if`? Looks like we always either throw or return, if we have undecoded data.", "We should give `getBytes` a `Charset` to work with, otherwise it might pick some random default platform encoding.", "I wouldn't mind if this explanation was elaborated a little bit.", "Shouldn't the \"delimiter found\" aka. \"last\" be `true` here?", "The test data is 100 MB. Can't we get meaningful coverage with something smaller?", "I think it should be possible to extract more common code. And perhaps even make each test case its own test method.", "Same here with duplication and splitting into separate test cases.", "Agree", "Agree", "I will check, but not so sure: I need to check that this is not doing getByte method call or equivalent.", "You're probably right. Let me check", "Since this is LF or CRLF, we need to check if CR at first position, which means LF is following.\r\nAnd we need to get after line break, whatever the form.", "Originally, there were none. I was surprised, but it was.\r\nOf course I may use Charset, but hoping it will not break anything.", "I agree. What about:\r\n\r\nRewite buffer in order to skip lengthToSkip bytes from current readerIndex, such that any readable bytes available after readerIndex + lengthToSkip (so before writerIndex) are moved at readerIndex position, therefore decreasing writerIndex of lengthToSkip at the end of the process.", "The delimiter is not found yet. We are in the middle:\r\n- one CRLF is found but not delimiter\r\n- therefore we can at least add the content up to CRLF (not included) but we can't ensure the delimiter is not at all there (could be in the next chunk, using previous CRLF still available)\r\nSo it has to be false", "It was the original code provided by issue submitter.\r\nBut I agree, we can limit the size for something like 10 MB (using 100KB chunk size)", "Not really, since the ending asserts are differents. Only the releasing are the same (due to my fix).\r\n\r\nI can split into several tests of course, one per Factory type.", "Again asserts are differents.\r\nBut agree to split into different tests using each a factory type.", "@chrisvest I test your proposal. The results are not that good using the benchmark (almost 3 times slower).\r\n\r\nFirst, in order to see if what I am doing is correct, here is a copy of the method:\r\n\r\n static int findDelimiter(ByteBuf buffer, int index, SearchProcessorFactory factory, int delimiterLength,\r\n boolean precededByLineBreak) {\r\n SearchProcessor processor = factory.newSearchProcessor();\r\n final int readerIndex = buffer.readerIndex();\r\n final int writerIndex = buffer.writerIndex();\r\n int length = writerIndex - index;\r\n while (delimiterLength <= length) {\r\n int startDelimiter = buffer.forEachByte(index, length, processor);\r\n if (startDelimiter < 0) {\r\n // No delimiter found\r\n return -1;\r\n }\r\n startDelimiter -= delimiterLength - 1;\r\n if (precededByLineBreak) {\r\n if (startDelimiter > readerIndex && buffer.getByte(startDelimiter - 1) == HttpConstants.LF) {\r\n startDelimiter--;\r\n if (startDelimiter > readerIndex && buffer.getByte(startDelimiter - 1) == HttpConstants.CR) {\r\n startDelimiter--;\r\n }\r\n } else {\r\n // A delimiter could be found further, so iterate\r\n index = startDelimiter + 1;\r\n length = writerIndex - index;\r\n continue;\r\n }\r\n }\r\n return startDelimiter - readerIndex;\r\n }\r\n return -1;\r\n }\r\n\r\nI set the searchProcessorFactory once for the request and reuse it as per the doc said.\r\n\r\nI try both KMP and newBitap. Here are the results compared to the previous one. \r\n\r\nNote that your indication makes me change a bit the algorithm also using `bytesBefore` by first searching the delimiter then check if any Line Break is preceding if asked for. \r\nThe results are almost the same (very small improvement), so I will however update using this new algorithm but globally the same (just search are reversed, first delimiter then line break if needed).\r\n\r\nHere are the results:\r\n\r\nOriginal algorithm using `bytesBefore`\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 4,253 ± 0,333 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 4,422 ± 0,250 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,877 ± 0,014 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 4,151 ± 0,481 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 2,167 ± 0,098 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 2,520 ± 0,043 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,177 ± 0,003 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 2,419 ± 0,061 ops/ms\r\n\r\nSearchProcessor version reuse KMP\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 1,309 ± 0,015 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 1,740 ± 0,029 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,710 ± 0,008 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 1,233 ± 0,703 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 1,045 ± 0,008 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 1,544 ± 0,604 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,309 ± 0,012 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 1,104 ± 0,097 ops/ms\r\n\r\nSearchProcessor version reuse newBitap\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 1,029 ± 0,002 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 1,828 ± 0,052 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,703 ± 0,084 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 1,033 ± 0,003 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,872 ± 0,016 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 1,417 ± 0,027 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,300 ± 0,009 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,873 ± 0,011 ops/ms\r\n", "I split into several tests, refactoring code.", "You were right!", "Done and all tests passed", "Done: split and refactoring tests", "Done tests are ok", "@chrisvest I looked into the `forEachByte` method implementation and I understand the reason:\r\n- there is a loop using `_getByte()` for each byte\r\n- while `byteBefore` might use the same `_getByte()` for each byte but might also try to use optimized access using long pattern (8 times reading operation less at most)\r\n\r\nThat's the only clue I can think of to explain the difference between the 2 implementations.", "bytesBefore, if is going forward should use the SWAR indexOf I have referenced in another comment AFAIK https://github.com/netty/netty/commit/9a02832fdb10afbc09f144a462d604176fb11049\r\nIt's branchless and perform batch read so should be pretty fast..\r\nHappy to see that is working as designed :)\r\nAnd, as said, I am sure stomp text can benefit a lot by this ", "@franz1981 So I think I've got the current fastest implementation using bytesBefore and the underlying SWAR indexOf, thanks to you! ;-)\r\n", "I see… we only get here because `findLineBreak` told us there was a line break at this position, so if we read CR then the next byte will necessarily be an LF.", "Looking at this one… both `DiskAttribute` and `MemoryAttribute` already do this check themselves, so I wonder if we can just remove the check here and let the delegate take care of it. Then we won't compute the byte array twice.", "Will have a look to it to check", "You're right! I change it", "Done" ]
"2021-02-07T15:39:43Z"
[]
OutOfDirectMemoryError for large uploads using HttpPostMultipartRequestDecoder
### Expected behavior With a `HttpDataFactory` that has `useDisk=true`, I thought files of any size could potentially be uploaded. ### Actual behavior Example error from below unit test: ``` io.netty.util.internal.OutOfDirectMemoryError: failed to allocate 4194304 byte(s) of direct memory (used: 62914560, max: 64487424) at io.netty.util.internal.PlatformDependent.incrementMemoryCounter(PlatformDependent.java:775) at io.netty.util.internal.PlatformDependent.reallocateDirectNoCleaner(PlatformDependent.java:748) at io.netty.buffer.UnpooledUnsafeNoCleanerDirectByteBuf.reallocateDirect(UnpooledUnsafeNoCleanerDirectByteBuf.java:34) at io.netty.buffer.UnpooledByteBufAllocator$InstrumentedUnpooledUnsafeNoCleanerDirectByteBuf.reallocateDirect(UnpooledByteBufAllocator.java:194) at io.netty.buffer.UnpooledUnsafeNoCleanerDirectByteBuf.capacity(UnpooledUnsafeNoCleanerDirectByteBuf.java:52) at io.netty.buffer.AbstractByteBuf.ensureWritable0(AbstractByteBuf.java:307) at io.netty.buffer.AbstractByteBuf.ensureWritable(AbstractByteBuf.java:282) at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1105) at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1098) at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1089) at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:351) at NettyUploadTest.itCanProcessLargeFiles(NettyUploadTest.java:46) // snip ``` ### Steps to reproduce Set `-Xmx64m` and run below unit test. ### Minimal yet complete reproducer code (or URL to code) ```java import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.*; import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory; import io.netty.handler.codec.http.multipart.FileUpload; import io.netty.handler.codec.http.multipart.HttpDataFactory; import io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder; import org.junit.Test; import java.nio.charset.StandardCharsets; import java.util.Arrays; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; public class NettyUploadTest { @Test public void itCanProcessLargeFiles() throws Exception { int fileSize = 100_000_000; // set Xmx to a number lower than this and it crashes int bytesPerChunk = 1_000_000; String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + "Content-Type: image/jpeg\n" + "Content-Length: " + fileSize + "\n" + "\n"; String suffix = "\n" + "--861fbeab-cd20-470c-9609-d40a0f704466--\n"; HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); request.headers().set("content-length", prefix.length() + fileSize + suffix.length()); HttpDataFactory factory = new DefaultHttpDataFactory(true); HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(StandardCharsets.UTF_8)))); byte[] body = new byte[bytesPerChunk]; Arrays.fill(body, (byte)1); for (int i = 0; i < fileSize / bytesPerChunk; i++) { ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerChunk); decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory here** content.release(); } decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(suffix.getBytes(StandardCharsets.UTF_8)))); decoder.offer(new DefaultLastHttpContent()); FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0); assertThat((int)data.length(), is(fileSize)); assertThat(data.get().length, is(fileSize)); factory.cleanAllHttpData(); } } ``` ### Netty version Tested on `4.1.56` and `4.1.58`. ### JVM version (e.g. `java -version`) jdk1.8.0_162 and 12 ### OS version (e.g. `uname -a`) Windows 10
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java", "microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java", "microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java index 5cc6ec834ef..14071dad606 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java @@ -106,6 +106,9 @@ public void addContent(ByteBuf buffer, boolean last) size += localsize; if (byteBuf == null) { byteBuf = buffer; + } else if (localsize == 0) { + // Nothing to add and byteBuf already exists + buffer.release(); } else if (byteBuf instanceof CompositeByteBuf) { CompositeByteBuf cbb = (CompositeByteBuf) byteBuf; cbb.addComponent(true, buffer); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java index 18a1686635e..13f75f55bf2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java @@ -16,6 +16,7 @@ package io.netty.handler.codec.http.multipart; import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.http.HttpConstants; /** * Shared Static object between HttpMessageDecoder, HttpPostRequestDecoder and HttpPostRequestEncoder @@ -150,4 +151,85 @@ static int findEndOfString(String sb) { return result; } + /** + * Try to find LF or CRLF as Line Breaking + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @return a relative position from index > 0 if LF or CRLF is found + * or < 0 if not found + */ + static int findLineBreak(ByteBuf buffer, int index) { + int toRead = buffer.readableBytes() - (index - buffer.readerIndex()); + int posFirstChar = buffer.bytesBefore(index, toRead, HttpConstants.LF); + if (posFirstChar == -1) { + // No LF, so neither CRLF + return -1; + } + if (posFirstChar > 0 && buffer.getByte(index + posFirstChar - 1) == HttpConstants.CR) { + posFirstChar--; + } + return posFirstChar; + } + + /** + * Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @param delimiter the delimiter as byte array + * @param precededByLineBreak true if it must be preceded by LF or CRLF, else false + * @return a relative position from index > 0 if delimiter found designing the start of it + * (including LF or CRLF is asked) + * or a number < 0 if delimiter is not found + * @throws IndexOutOfBoundsException + * if {@code offset + delimiter.length} is greater than {@code buffer.capacity} + */ + static int findDelimiter(ByteBuf buffer, int index, byte[] delimiter, boolean precededByLineBreak) { + final int delimiterLength = delimiter.length; + final int readerIndex = buffer.readerIndex(); + final int writerIndex = buffer.writerIndex(); + int toRead = writerIndex - index; + int newOffset = index; + boolean delimiterNotFound = true; + while (delimiterNotFound && delimiterLength <= toRead) { + // Find first position: delimiter + int posDelimiter = buffer.bytesBefore(newOffset, toRead, delimiter[0]); + if (posDelimiter < 0) { + return -1; + } + newOffset += posDelimiter; + toRead -= posDelimiter; + // Now check for delimiter + delimiterNotFound = false; + for (int i = 0; i < delimiterLength; i++) { + if (buffer.getByte(newOffset + i) != delimiter[i]) { + newOffset++; + toRead--; + delimiterNotFound = true; + break; + } + } + if (!delimiterNotFound) { + // Delimiter found, find if necessary: LF or CRLF + if (precededByLineBreak && newOffset > readerIndex) { + if (buffer.getByte(newOffset - 1) == HttpConstants.LF) { + newOffset--; + // Check if CR before: not mandatory to be there + if (newOffset > readerIndex && buffer.getByte(newOffset - 1) == HttpConstants.CR) { + newOffset--; + } + } else { + // Delimiter with Line Break could be further: iterate after first char of delimiter + newOffset++; + toRead--; + delimiterNotFound = true; + continue; + } + } + return newOffset - readerIndex; + } + } + return -1; + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index e67c0103d8e..ebf88f23360 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -551,7 +551,7 @@ private InterfaceHttpData decodeMultipart(MultiPartStatus state) { } } // load data - if (!loadDataMultipart(undecodedChunk, multipartDataBoundary, currentAttribute)) { + if (!loadDataMultipartOptimized(undecodedChunk, multipartDataBoundary, currentAttribute)) { // Delimiter is not found. Need more chunks. return null; } @@ -648,7 +648,7 @@ private InterfaceHttpData findMultipartDelimiter(String delimiter, MultiPartStat skipOneLine(); String newline; try { - newline = readDelimiter(undecodedChunk, delimiter); + newline = readDelimiterOptimized(undecodedChunk, delimiter, charset); } catch (NotEnoughDataDecoderException ignored) { undecodedChunk.readerIndex(readerIndex); return null; @@ -688,7 +688,7 @@ private InterfaceHttpData findMultipartDisposition() { String newline; try { skipControlCharacters(undecodedChunk); - newline = readLine(undecodedChunk, charset); + newline = readLineOptimized(undecodedChunk, charset); } catch (NotEnoughDataDecoderException ignored) { undecodedChunk.readerIndex(readerIndex); return null; @@ -918,7 +918,7 @@ protected InterfaceHttpData getFileUpload(String delimiter) { } } // load data as much as possible - if (!loadDataMultipart(undecodedChunk, delimiter, currentFileUpload)) { + if (!loadDataMultipartOptimized(undecodedChunk, delimiter, currentFileUpload)) { // Delimiter is not found. Need more chunks. return null; } @@ -998,83 +998,32 @@ private void cleanMixedAttributes() { * Need more chunks and reset the {@code readerIndex} to the previous * value */ - private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) { + private static String readLineOptimized(ByteBuf undecodedChunk, Charset charset) { int readerIndex = undecodedChunk.readerIndex(); - ByteBuf line = undecodedChunk.alloc().heapBuffer(64); + ByteBuf line = null; try { - while (undecodedChunk.isReadable()) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.CR) { - // check but do not changed readerIndex - nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex()); - if (nextByte == HttpConstants.LF) { - // force read - undecodedChunk.readByte(); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - line.writeByte(HttpConstants.CR); - } - } else if (nextByte == HttpConstants.LF) { - return line.toString(charset); - } else { - line.writeByte(nextByte); + if (undecodedChunk.isReadable()) { + int posLfOrCrLf = HttpPostBodyUtil.findLineBreak(undecodedChunk, undecodedChunk.readerIndex()); + if (posLfOrCrLf <= 0) { + throw new NotEnoughDataDecoderException(); } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); - } finally { - line.release(); - } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } + try { + line = undecodedChunk.alloc().heapBuffer(posLfOrCrLf); + line.writeBytes(undecodedChunk, posLfOrCrLf); - /** - * Read one line up to the CRLF or LF - * - * @return the String from one line - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the {@code readerIndex} to the previous - * value - */ - private static String readLine(ByteBuf undecodedChunk, Charset charset) { - if (!undecodedChunk.hasArray()) { - return readLineStandard(undecodedChunk, charset); - } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - ByteBuf line = undecodedChunk.alloc().heapBuffer(64); - try { - while (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - sao.pos--; - line.writeByte(HttpConstants.CR); - } - } else { - line.writeByte(nextByte); + byte nextByte = undecodedChunk.readByte(); + if (nextByte == HttpConstants.CR) { + // force read next byte since LF is the following one + undecodedChunk.readByte(); } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); return line.toString(charset); - } else { - line.writeByte(nextByte); + } finally { + line.release(); } } } catch (IndexOutOfBoundsException e) { undecodedChunk.readerIndex(readerIndex); throw new NotEnoughDataDecoderException(e); - } finally { - line.release(); } undecodedChunk.readerIndex(readerIndex); throw new NotEnoughDataDecoderException(); @@ -1095,23 +1044,19 @@ private static String readLine(ByteBuf undecodedChunk, Charset charset) { * Need more chunks and reset the {@code readerIndex} to the previous * value */ - private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) { - int readerIndex = undecodedChunk.readerIndex(); + private static String readDelimiterOptimized(ByteBuf undecodedChunk, String delimiter, Charset charset) { + final int readerIndex = undecodedChunk.readerIndex(); + final byte[] bdelimiter = delimiter.getBytes(charset); + final int delimiterLength = bdelimiter.length; try { - StringBuilder sb = new StringBuilder(64); - int delimiterPos = 0; - int len = delimiter.length(); - while (undecodedChunk.isReadable() && delimiterPos < len) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); - } else { - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } + int delimiterPos = HttpPostBodyUtil.findDelimiter(undecodedChunk, readerIndex, bdelimiter, false); + if (delimiterPos < 0) { + // delimiter not found so break here ! + undecodedChunk.readerIndex(readerIndex); + throw new NotEnoughDataDecoderException(); } + StringBuilder sb = new StringBuilder(delimiter); + undecodedChunk.readerIndex(readerIndex + delimiterPos + delimiterLength); // Now check if either opening delimiter or closing delimiter if (undecodedChunk.isReadable()) { byte nextByte = undecodedChunk.readByte(); @@ -1176,127 +1121,28 @@ private static String readDelimiterStandard(ByteBuf undecodedChunk, String delim } /** - * Read one line up to --delimiter or --delimiter-- and if existing the CRLF - * or LF. Note that CRLF or LF are mandatory for opening delimiter - * (--delimiter) but not for closing delimiter (--delimiter--) since some - * clients does not include CRLF in this case. + * Rewrite buffer in order to skip lengthToSkip bytes from current readerIndex, + * such that any readable bytes available after readerIndex + lengthToSkip (so before writerIndex) + * are moved at readerIndex position, + * therefore decreasing writerIndex of lengthToSkip at the end of the process. * - * @param delimiter - * of the form --string, such that '--' is already included - * @return the String from one line as the delimiter searched (opening or - * closing) - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the readerInder to the previous - * value + * @param buffer the buffer to rewrite from current readerIndex + * @param lengthToSkip the size to skip from readerIndex */ - private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) { - if (!undecodedChunk.hasArray()) { - return readDelimiterStandard(undecodedChunk, delimiter); + private static void rewriteCurrentBuffer(ByteBuf buffer, int lengthToSkip) { + if (lengthToSkip == 0) { + return; } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - int delimiterPos = 0; - int len = delimiter.length(); - try { - StringBuilder sb = new StringBuilder(64); - // check conformity with delimiter - while (sao.pos < sao.limit && delimiterPos < len) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); - } else { - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } - // Now check if either opening delimiter or closing delimiter - if (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - // first check for opening delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else { - // error since CR must be followed by LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - // same first check for opening delimiter where LF used with - // no CR - sao.setReadPosition(0); - return sb.toString(); - } else if (nextByte == '-') { - sb.append('-'); - // second check for closing delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == '-') { - sb.append('-'); - // now try to find if CRLF or LF there - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // No CRLF but ok however (Adobe Flash - // uploader) - // minus 1 since we read one char ahead but - // should not - sao.setReadPosition(1); - return sb.toString(); - } - } - // FIXME what do we do here? - // either considering it is fine, either waiting for - // more data to come? - // lets try considering it is fine... - sao.setReadPosition(0); - return sb.toString(); - } - // whatever now => error since incomplete - // only one '-' => not enough or whatever not enough - // element - } - } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); + final int readerIndex = buffer.readerIndex(); + final int readableBytes = buffer.readableBytes(); + if (readableBytes == lengthToSkip) { + buffer.readerIndex(readerIndex); + buffer.writerIndex(readerIndex); + return; } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); + buffer.setBytes(readerIndex, buffer, readerIndex + lengthToSkip, readableBytes - lengthToSkip); + buffer.readerIndex(readerIndex); + buffer.writerIndex(readerIndex + readableBytes - lengthToSkip); } /** @@ -1305,91 +1151,50 @@ private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) { * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks * @throws ErrorDataDecoderException */ - private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { - final int startReaderIndex = undecodedChunk.readerIndex(); - final int delimeterLength = delimiter.length(); - int index = 0; - int lastPosition = startReaderIndex; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (undecodedChunk.isReadable()) { - final byte nextByte = undecodedChunk.readByte(); - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; - break; - } - continue; - } - lastPosition = undecodedChunk.readerIndex(); - if (nextByte == HttpConstants.LF) { - index = 0; - lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1; - } - prevByte = nextByte; - } - if (prevByte == HttpConstants.CR) { - lastPosition--; - } - ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); - try { - httpData.addContent(content, delimiterFound); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); - } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; - } - - /** - * Load the field value from a Multipart request - * - * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks - * @throws ErrorDataDecoderException - */ - private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { - if (!undecodedChunk.hasArray()) { - return loadDataMultipartStandard(undecodedChunk, delimiter, httpData); + private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { + if (!undecodedChunk.isReadable()) { + return false; } - final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); final int startReaderIndex = undecodedChunk.readerIndex(); - final int delimeterLength = delimiter.length(); - int index = 0; - int lastRealPos = sao.pos; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (sao.pos < sao.limit) { - final byte nextByte = sao.bytes[sao.pos++]; - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; - break; + final byte[] bdelimiter = delimiter.getBytes(httpData.getCharset()); + int posDelimiter = HttpPostBodyUtil.findDelimiter(undecodedChunk, startReaderIndex, bdelimiter, true); + if (posDelimiter < 0) { + // Not found but however perhaps because incomplete so search LF or CRLF + posDelimiter = HttpPostBodyUtil.findLineBreak(undecodedChunk, startReaderIndex); + if (posDelimiter < 0) { + // not found so this chunk can be fully added + ByteBuf content = undecodedChunk.copy(); + try { + httpData.addContent(content, false); + } catch (IOException e) { + throw new ErrorDataDecoderException(e); } - continue; - } - lastRealPos = sao.pos; - if (nextByte == HttpConstants.LF) { - index = 0; - lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1; + undecodedChunk.readerIndex(startReaderIndex); + undecodedChunk.writerIndex(startReaderIndex); + return false; + } else if (posDelimiter > 0) { + // Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found + ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); + try { + httpData.addContent(content, false); + } catch (IOException e) { + throw new ErrorDataDecoderException(e); + } + rewriteCurrentBuffer(undecodedChunk, posDelimiter); + return false; } - prevByte = nextByte; - } - if (prevByte == HttpConstants.CR) { - lastRealPos--; + // Empty chunk or so + return false; } - final int lastPosition = sao.getReadPosition(lastRealPos); - final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex); + // Delimiter found at posDelimiter, including LF or CRLF, so httpData has its last chunk + ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); try { - httpData.addContent(content, delimiterFound); + httpData.addContent(content, true); } catch (IOException e) { throw new ErrorDataDecoderException(e); } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; + rewriteCurrentBuffer(undecodedChunk, posDelimiter); + return true; } /** @@ -1529,4 +1334,15 @@ private static String[] splitMultipartHeaderValues(String svalue) { values.add(svalue.substring(start)); return values.toArray(new String[0]); } + + /** + * This method is package private intentionally in order to allow during tests + * to access to the amount of memory allocated (capacity) within the private + * ByteBuf undecodedChunk + * + * @return the number of bytes the internal buffer can contain + */ + int getCurrentAllocatedCapacity() { + return undecodedChunk.capacity(); + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java index 18d0a969b5e..8921fca24f6 100755 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java @@ -943,12 +943,12 @@ private HttpContent encodeNextChunkUrlEncoded(int sizeleft) throws ErrorDataEnco // Set name= if (isKey) { String key = currentData.getName(); - buffer = wrappedBuffer(key.getBytes()); + buffer = wrappedBuffer(key.getBytes(charset)); isKey = false; if (currentBuffer == null) { - currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes())); + currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes(charset))); } else { - currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes())); + currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes(charset))); } // continue size -= buffer.readableBytes() + 1; @@ -969,7 +969,7 @@ private HttpContent encodeNextChunkUrlEncoded(int sizeleft) throws ErrorDataEnco ByteBuf delimiter = null; if (buffer.readableBytes() < size) { isKey = true; - delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes()) : null; + delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes(charset)) : null; } // End for current InterfaceHttpData, need potentially more data diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index 37e41fa9176..d2ef677dbf0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -303,7 +303,17 @@ public HttpPostStandardRequestDecoder offer(HttpContent content) { } parseBody(); if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { - undecodedChunk.discardReadBytes(); + if (undecodedChunk.refCnt() == 1) { + // It's safe to call discardBytes() as we are the only owner of the buffer. + undecodedChunk.discardReadBytes(); + } else { + // There seems to be multiple references of the buffer. Let's copy the data and release the buffer to + // ensure we can give back memory to the system. + ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes()); + buffer.writeBytes(undecodedChunk); + undecodedChunk.release(); + undecodedChunk = buffer; + } } return this; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java index ed1e37434a5..9b89af43fa7 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java @@ -270,9 +270,6 @@ public String getValue() throws IOException { @Override public void setValue(String value) throws IOException { - if (value != null) { - checkSize(value.getBytes().length); - } attribute.setValue(value); } diff --git a/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java b/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java index c570cd67455..29d1013923b 100644 --- a/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java +++ b/microbench/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoderBenchmark.java @@ -24,6 +24,7 @@ import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpVersion; import io.netty.microbench.util.AbstractMicrobenchmark; +import io.netty.util.CharsetUtil; import io.netty.util.ResourceLeakDetector; import io.netty.util.ResourceLeakDetector.Level; import org.openjdk.jmh.annotations.Benchmark; @@ -54,12 +55,12 @@ public double testHighNumberChunks(boolean big, boolean noDisk) { "Content-Disposition: form-data; name=\"msg_id\"\n\n15200\n--" + BOUNDARY + "\nContent-Disposition: form-data; name=\"msg1\"; filename=\"file1.txt\"\n\n" + - data).getBytes(); - byte[] bodyPartBigBytes = data.getBytes(); + data).getBytes(CharsetUtil.UTF_8); + byte[] bodyPartBigBytes = data.getBytes(CharsetUtil.UTF_8); byte[] intermediaryBytes = ("\n--" + BOUNDARY + "\nContent-Disposition: form-data; name=\"msg2\"; filename=\"file2.txt\"\n\n" + - data).getBytes(); - byte[] finalBigBytes = ("\n" + "--" + BOUNDARY + "--\n").getBytes(); + data).getBytes(CharsetUtil.UTF_8); + byte[] finalBigBytes = ("\n" + "--" + BOUNDARY + "--\n").getBytes(CharsetUtil.UTF_8); ByteBuf firstBuf = Unpooled.wrappedBuffer(bodyStartBytes); ByteBuf finalBuf = Unpooled.wrappedBuffer(finalBigBytes); ByteBuf nextBuf;
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index acbcf1219d3..93991c375a5 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -34,6 +34,7 @@ import io.netty.util.CharsetUtil; import org.junit.Test; +import java.io.IOException; import java.net.URLEncoder; import java.nio.charset.UnsupportedCharsetException; import java.util.Arrays; @@ -961,6 +962,7 @@ public void testDecodeMultipartRequest() { try { HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(new DefaultHttpDataFactory(false), req); + assertEquals(2, decoder.getBodyHttpDatas().size()); InterfaceHttpData data = decoder.getBodyHttpData("title"); assertTrue(data instanceof MemoryAttribute); assertEquals("bar-stream", ((MemoryAttribute) data).getString()); @@ -976,4 +978,177 @@ public void testDecodeMultipartRequest() { assertTrue(req.release()); } } + + private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory) + throws IOException { + int nbChunks = 100; + int bytesPerChunk = 100000; + int bytesLastChunk = 10000; + int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes + + String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" + + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + fileSize + "\n" + + "\n"; + + String suffix1 = "\n" + + "--861fbeab-"; + String suffix2 = "cd20-470c-9609-d40a0f704466--\n"; + String suffix = suffix1 + suffix2; + + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", prefix.length() + fileSize + suffix.length()); + + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)))); + + byte[] body = new byte[bytesPerChunk]; + Arrays.fill(body, (byte) 1); + for (int i = 0; i < nbChunks; i++) { + ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerChunk); + decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory here** + content.release(); + } + + byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8); + byte[] lastbody = new byte[bytesLastChunk + bsuffix1.length]; + Arrays.fill(body, (byte) 1); + for (int i = 0; i < bsuffix1.length; i++) { + lastbody[bytesLastChunk + i] = bsuffix1[i]; + } + + ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + content2.release(); + content2 = Unpooled.wrappedBuffer(suffix2.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(content2)); + content2.release(); + decoder.offer(new DefaultLastHttpContent()); + + FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0); + assertEquals(data.length(), fileSize); + assertEquals(inMemory, data.isInMemory()); + if (data.isInMemory()) { + // To be done only if not inMemory: assertEquals(data.get().length, fileSize); + assertFalse("Capacity should be higher than 1M", data.getByteBuf().capacity() + < 1024 * 1024); + } + assertTrue("Capacity should be less than 1M", decoder.getCurrentAllocatedCapacity() + < 1024 * 1024); + for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { + httpData.release(); + factory.removeHttpDataFromClean(request, httpData); + } + factory.cleanAllHttpData(); + decoder.destroy(); + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderDiskFactory() throws IOException { + // Factory using Disk mode + HttpDataFactory factory = new DefaultHttpDataFactory(true); + + commonTestBigFileDelimiterInMiddleChunk(factory, false); + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory() throws IOException { + // Factory using Memory mode + HttpDataFactory factory = new DefaultHttpDataFactory(false); + + commonTestBigFileDelimiterInMiddleChunk(factory, true); + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderMixedFactory() throws IOException { + // Factory using Mixed mode, where file shall be on Disk + HttpDataFactory factory = new DefaultHttpDataFactory(10000); + + commonTestBigFileDelimiterInMiddleChunk(factory, false); + } + + private void commonNotBadReleaseBuffersDuringDecoding(HttpDataFactory factory, boolean inMemory) + throws IOException { + int nbItems = 20; + int bytesPerItem = 1000; + int maxMemory = 500; + + String prefix1 = "\n--861fbeab-cd20-470c-9609-d40a0f704466\n" + + "Content-Disposition: form-data; name=\"image"; + String prefix2 = + "\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + bytesPerItem + "\n" + "\n"; + + String suffix = "\n--861fbeab-cd20-470c-9609-d40a0f704466--\n"; + + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", nbItems * (prefix1.length() + prefix2.length() + 2 + bytesPerItem) + + suffix.length()); + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + decoder.setDiscardThreshold(maxMemory); + for (int rank = 0; rank < nbItems; rank++) { + byte[] bp1 = prefix1.getBytes(CharsetUtil.UTF_8); + byte[] bp2 = prefix2.getBytes(CharsetUtil.UTF_8); + byte[] prefix = new byte[bp1.length + 2 + bp2.length]; + for (int i = 0; i < bp1.length; i++) { + prefix[i] = bp1[i]; + } + byte[] brank = Integer.toString(10 + rank).getBytes(CharsetUtil.UTF_8); + prefix[bp1.length] = brank[0]; + prefix[bp1.length + 1] = brank[1]; + for (int i = 0; i < bp2.length; i++) { + prefix[bp1.length + 2 + i] = bp2[i]; + } + decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix))); + byte[] body = new byte[bytesPerItem]; + Arrays.fill(body, (byte) rank); + ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem); + decoder.offer(new DefaultHttpContent(content)); + content.release(); + } + byte[] lastbody = suffix.getBytes(CharsetUtil.UTF_8); + ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + content2.release(); + decoder.offer(new DefaultLastHttpContent()); + + for (int rank = 0; rank < nbItems; rank++) { + FileUpload data = (FileUpload) decoder.getBodyHttpData("image" + (10 + rank)); + assertEquals(data.length(), bytesPerItem); + assertEquals(inMemory, data.isInMemory()); + byte[] body = new byte[bytesPerItem]; + Arrays.fill(body, (byte) rank); + assertTrue(Arrays.equals(body, data.get())); + } + // To not be done since will load full file on memory: assertEquals(data.get().length, fileSize); + // Not mandatory since implicitely called during destroy of decoder + for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { + httpData.release(); + factory.removeHttpDataFromClean(request, httpData); + } + factory.cleanAllHttpData(); + decoder.destroy(); + } + @Test + public void testNotBadReleaseBuffersDuringDecodingDiskFactory() throws IOException { + // Using Disk Factory + HttpDataFactory factory = new DefaultHttpDataFactory(true); + commonNotBadReleaseBuffersDuringDecoding(factory, false); + } + @Test + public void testNotBadReleaseBuffersDuringDecodingMemoryFactory() throws IOException { + // Using Memory Factory + HttpDataFactory factory = new DefaultHttpDataFactory(false); + commonNotBadReleaseBuffersDuringDecoding(factory, true); + } + @Test + public void testNotBadReleaseBuffersDuringDecodingMixedFactory() throws IOException { + // Using Mixed Factory + HttpDataFactory factory = new DefaultHttpDataFactory(100); + commonNotBadReleaseBuffersDuringDecoding(factory, false); + } }
train
test
"2021-02-11T21:35:36"
"2021-01-28T14:18:39Z"
danielflower
val
netty/netty/11004_11009
netty/netty
netty/netty/11004
netty/netty/11009
[ "keyword_pr_to_issue" ]
ecd7dc3516b5a9fc612330f2dd617576569a256b
6808d7582a06dfb5038ac1dd8fa58190ab359d19
[ "Thanks @violetagg . [MyBlockHoundIntegration](https://github.com/aztosca/web-client-test/blob/master/src/main/java/org/app/webclienttest/MyBlockHoundIntegration.java) had another entry that allows reading the truststore. Was there any reason not to include this (or similar) in the PR? I understand some things shouldn't be allowed by default and maybe folks need to make up their on mind on this one.\r\n\r\nI did some testing with WebClient and can see that the blocking call only happens once on the first http**s** endpoint WebClient calls so it seems like I can safely allow it.\r\n\r\nFor quick reference of the BlockHound setting and blocking call it allows...\r\n\r\n```\r\n builder.allowBlockingCallsInside(\r\n \t\t\"io.netty.handler.ssl.SslContext\",\r\n \t\t\"newClientContextInternal\");\r\n```\r\n\r\n```\r\nCaused by: reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes\r\n\tat java.base/java.io.FileInputStream.readBytes(FileInputStream.java)\r\n\tat java.base/java.io.FileInputStream.read(FileInputStream.java:279)\r\n\tat java.base/java.io.BufferedInputStream.fill(BufferedInputStream.java:252)\r\n\tat java.base/java.io.BufferedInputStream.read(BufferedInputStream.java:271)\r\n\tat java.base/sun.security.util.DerValue.init(DerValue.java:388)\r\n\tat java.base/sun.security.util.DerValue.<init>(DerValue.java:331)\r\n\tat java.base/sun.security.util.DerValue.<init>(DerValue.java:344)\r\n\tat java.base/sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:1993)\r\n\tat java.base/sun.security.util.KeyStoreDelegator.engineLoad(KeyStoreDelegator.java:222)\r\n\tat java.base/java.security.KeyStore.load(KeyStore.java:1479)\r\n\tat java.base/sun.security.ssl.TrustStoreManager$TrustAnchorManager.loadKeyStore(TrustStoreManager.java:365)\r\n\tat java.base/sun.security.ssl.TrustStoreManager$TrustAnchorManager.getTrustedCerts(TrustStoreManager.java:313)\r\n\tat java.base/sun.security.ssl.TrustStoreManager.getTrustedCerts(TrustStoreManager.java:55)\r\n\tat java.base/sun.security.ssl.TrustManagerFactoryImpl.engineInit(TrustManagerFactoryImpl.java:49)\r\n\tat java.base/javax.net.ssl.TrustManagerFactory.init(TrustManagerFactory.java:278)\r\n\tat java.base/sun.security.ssl.SSLContextImpl.engineInit(SSLContextImpl.java:88)\r\n\tat java.base/javax.net.ssl.SSLContext.init(SSLContext.java:297)\r\n\tat io.netty.handler.ssl.JdkSslContext.<clinit>(JdkSslContext.java:75)\r\n\tat io.netty.handler.ssl.SslContext.newClientContextInternal(SslContext.java:821)\r\n\tat io.netty.handler.ssl.SslContextBuilder.build(SslContextBuilder.java:577)\r\n ......\r\n```", "@aztosca In the example, I do not see the SslContext customisation. https://github.com/aztosca/web-client-test/blob/master/src/main/java/org/app/webclienttest/WebClientTestApplication.java\r\n\r\nDo you use the default security in WebClient?", "Yeah, default. The injected WebClient.Builder I'm using is spring's pre-configured one, and I'm not using any other ssl related command line settings when running the app. " ]
[]
"2021-02-09T08:58:21Z"
[]
Unexpected WebClient blocks reported by BlockHound
I'm new to webflux so I posted a [question on stackoverflow](https://stackoverflow.com/questions/66091050/why-is-webclient-blocking-in-this-web-flux-app) thinking I was doing something wrong in my code, but after seeing [this issue](https://github.com/netty/netty/issues/10925) and related [pull request](https://github.com/netty/netty/pull/10935), I'm thinking I just need to tell BlockHound to ignore some calls. I created [my own BlockHoundIntegration](https://github.com/aztosca/web-client-test/blob/master/src/main/java/org/app/webclienttest/MyBlockHoundIntegration.java) that suppresses the BlockHound exceptions when added to the BlockHound.install() call, but wanted to raise an issue here to see if maybe this is something that should be addressed in NettyBlockHoundIntegration. ### Expected behavior No blocking calls reported by BlockHound ### Actual behavior Blocking calls reported by BlockHound ### Steps to reproduce 1. Run the simple boot app linked below in the _reproducer code_ section. 2. target http://localhost:8080/test-web-client ### Minimal yet complete reproducer code (or URL to code) https://github.com/aztosca/web-client-test https://github.com/aztosca/web-client-test/blob/master/src/main/java/org/app/webclienttest/WebClientTestApplication.java ### Netty version 4.1.58 ### JVM version (e.g. `java -version`) openjdk version "11.0.2" ### OS version (e.g. `uname -a`) MacOS 11.2
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java" ]
[ "transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index 18a03c68ead..fab714d8ad6 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -119,6 +119,18 @@ public void applyTo(BlockHound.Builder builder) { "io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider", "parse"); + builder.allowBlockingCallsInside( + "io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider", + "parseEtcResolverSearchDomains"); + + builder.allowBlockingCallsInside( + "io.netty.resolver.dns.UnixResolverDnsServerAddressStreamProvider", + "parseEtcResolverOptions"); + + builder.allowBlockingCallsInside( + "io.netty.resolver.HostsFileParser", + "parse"); + builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() { @Override public Predicate<Thread> apply(final Predicate<Thread> p) { diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java index 84412c58ec3..d6b3875c713 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/UnixResolverDnsServerAddressStreamProvider.java @@ -86,7 +86,7 @@ static DnsServerAddressStreamProvider parseSilently() { * the default DNS server to use, and also overrides for individual domains. Also parse list of files of the format * <a href=" * https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man5/resolver.5.html"> - * /etc/resolver</a> which may contain multiple files to override the name servers used for multimple domains. + * /etc/resolver</a> which may contain multiple files to override the name servers used for multiple domains. * @param etcResolvConf <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a>. * @param etcResolverFiles List of files of the format defined in * <a href=" @@ -121,7 +121,7 @@ public UnixResolverDnsServerAddressStreamProvider(File etcResolvConf, File... et * the default DNS server to use, and also overrides for individual domains. Also parse a directory of the format * <a href=" * https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man5/resolver.5.html"> - * /etc/resolver</a> which may contain multiple files to override the name servers used for multimple domains. + * /etc/resolver</a> which may contain multiple files to override the name servers used for multiple domains. * @param etcResolvConf <a href="https://linux.die.net/man/5/resolver">/etc/resolv.conf</a>. * @param etcResolverDir Directory containing files of the format defined in * <a href=" @@ -379,7 +379,7 @@ static List<String> parseEtcResolverSearchDomains(File etcResolvConf) throws IOE } else if (line.startsWith(SEARCH_ROW_LABEL)) { int i = indexOfNonWhiteSpace(line, SEARCH_ROW_LABEL.length()); if (i >= 0) { - // May contain more then one entry, either seperated by whitespace or tab. + // May contain more then one entry, either separated by whitespace or tab. // See https://linux.die.net/man/5/resolver String[] domains = WHITESPACE_PATTERN.split(line.substring(i)); Collections.addAll(searchDomains, domains);
diff --git a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java index 2d5121a0781..1b454a1802e 100644 --- a/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java +++ b/transport-blockhound-tests/src/test/java/io/netty/util/internal/NettyBlockHoundIntegrationTest.java @@ -26,6 +26,7 @@ import io.netty.channel.ChannelInitializer; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslContext; @@ -35,7 +36,7 @@ import io.netty.handler.ssl.SslProvider; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import io.netty.handler.ssl.util.SelfSignedCertificate; -import io.netty.resolver.dns.DnsServerAddressStreamProvider; +import io.netty.resolver.dns.DnsNameResolverBuilder; import io.netty.resolver.dns.DnsServerAddressStreamProviders; import io.netty.util.HashedWheelTimer; import io.netty.util.ReferenceCountUtil; @@ -59,6 +60,7 @@ import java.util.List; import java.util.Queue; import java.util.ServiceLoader; +import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; @@ -320,7 +322,30 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { } @Test(timeout = 5000L) - public void testParseEtcResolverFilesAllowsBlockingCalls() throws InterruptedException { + public void testUnixResolverDnsServerAddressStreamProvider_Parse() throws InterruptedException { + doTestParseResolverFilesAllowsBlockingCalls(DnsServerAddressStreamProviders::unixDefault); + } + + @Test(timeout = 5000L) + public void testHostsFileParser_Parse() throws InterruptedException { + doTestParseResolverFilesAllowsBlockingCalls(DnsNameResolverBuilder::new); + } + + @Test(timeout = 5000L) + public void testUnixResolverDnsServerAddressStreamProvider_ParseEtcResolverSearchDomainsAndOptions() + throws InterruptedException { + NioEventLoopGroup group = new NioEventLoopGroup(); + try { + DnsNameResolverBuilder builder = new DnsNameResolverBuilder(group.next()) + .channelFactory(NioDatagramChannel::new); + doTestParseResolverFilesAllowsBlockingCalls(builder::build); + } finally { + group.shutdownGracefully(); + } + } + + private static void doTestParseResolverFilesAllowsBlockingCalls(Callable<Object> callable) + throws InterruptedException { SingleThreadEventExecutor executor = new SingleThreadEventExecutor(null, new DefaultThreadFactory("test"), true) { @Override @@ -335,11 +360,11 @@ protected void run() { }; try { CountDownLatch latch = new CountDownLatch(1); - List<DnsServerAddressStreamProvider> result = new ArrayList<>(); + List<Object> result = new ArrayList<>(); List<Throwable> error = new ArrayList<>(); executor.execute(() -> { try { - result.add(DnsServerAddressStreamProviders.unixDefault()); + result.add(callable.call()); } catch (Throwable t) { error.add(t); }
train
test
"2021-02-08T20:55:02"
"2021-02-08T03:06:03Z"
aztosca
val
netty/netty/11021_11022
netty/netty
netty/netty/11021
netty/netty/11022
[ "keyword_pr_to_issue" ]
a98293f08403961db8bf3eb8ba66a21cb86ecf08
9cac18687d15be113cf869426f4bd1aca804b6b0
[]
[]
"2021-02-12T10:59:17Z"
[]
Doc incorrect at io.netty.util.concurrent.AbstractEventExecutor#lazyExecute
### Actual behavior ``` /** * Like {@link #execute(Runnable)} but does not guarantee the task will be run until either * a non-lazy task is executed or the executor is shut down. * * This is equivalent to submitting a {@link EventExecutor.LazyRunnable} to * {@link #execute(Runnable)} but for an arbitrary {@link Runnable}. * * The default implementation just delegates to {@link #execute(Runnable)}. */ @UnstableApi public void lazyExecute(Runnable task) { execute(task); } ``` The lazyRunnable already isn't belong to EventExecutor anymore. ### Netty version branch 4.1
[ "common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java" ]
[ "common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java b/common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java index b1df38a81aa..e4ca3440960 100644 --- a/common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java +++ b/common/src/main/java/io/netty/util/concurrent/AbstractEventExecutor.java @@ -171,7 +171,7 @@ protected static void safeExecute(Runnable task) { * Like {@link #execute(Runnable)} but does not guarantee the task will be run until either * a non-lazy task is executed or the executor is shut down. * - * This is equivalent to submitting a {@link EventExecutor.LazyRunnable} to + * This is equivalent to submitting a {@link AbstractEventExecutor.LazyRunnable} to * {@link #execute(Runnable)} but for an arbitrary {@link Runnable}. * * The default implementation just delegates to {@link #execute(Runnable)}.
null
train
test
"2021-02-11T21:35:36"
"2021-02-12T10:58:50Z"
horizonzy
val
netty/netty/11032_11033
netty/netty
netty/netty/11032
netty/netty/11033
[ "keyword_pr_to_issue" ]
28d4154fffc9c51f73b5969c64d74ca17ca4baba
a60825c3b425892af9be3e9284677aa8a58faa6b
[]
[]
"2021-02-21T09:03:29Z"
[]
Code clean at io.netty.buffer.AbstractByteBuf#adjustMarkers
https://github.com/netty/netty/blob/28d4154fffc9c51f73b5969c64d74ca17ca4baba/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java#L257-L271 It's unnecessary to create stack variable.
[ "buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java" ]
[ "buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java" ]
[]
diff --git a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java index c13efdd0046..fb152502719 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java @@ -255,17 +255,15 @@ public ByteBuf discardSomeReadBytes() { } protected final void adjustMarkers(int decrement) { - int markedReaderIndex = this.markedReaderIndex; if (markedReaderIndex <= decrement) { - this.markedReaderIndex = 0; - int markedWriterIndex = this.markedWriterIndex; + markedReaderIndex = 0; if (markedWriterIndex <= decrement) { - this.markedWriterIndex = 0; + markedWriterIndex = 0; } else { - this.markedWriterIndex = markedWriterIndex - decrement; + markedWriterIndex -= decrement; } } else { - this.markedReaderIndex = markedReaderIndex - decrement; + markedReaderIndex -= decrement; markedWriterIndex -= decrement; } }
null
train
test
"2021-02-19T13:29:47"
"2021-02-21T08:49:21Z"
horizonzy
val
netty/netty/10986_11037
netty/netty
netty/netty/10986
netty/netty/11037
[ "keyword_pr_to_issue" ]
a60825c3b425892af9be3e9284677aa8a58faa6b
dde82f62f07de5e794dda1c3324d8331ca522114
[ "Hmm... I can't see this. Can you make a screenshot ?", "like this.\r\n\r\n<img width=\"669\" alt=\"stack\" src=\"https://user-images.githubusercontent.com/12539730/107210938-d1300080-6a3f-11eb-8760-2e754935ea67.png\">\r\n\r\n\r\n\r\n", "@chrisvest can you have a look ?", "@normanmaurer Yeah, I'll try to spend some time on this." ]
[ "You can just extends `AtomicInteger` that would give you back `Unsafe` perf for free :)\r\n\r\nYeah I know that's bad design so you can ignore me ;)", "I considered it, but then I loose the field name which is quite awkward. Also, I think the JIT is able to trust the finals inside A\\*FUs these days, so they're as fast as VarHandles or Atomic\\*s these days." ]
"2021-02-25T14:11:30Z"
[]
recycler has problems recycling objects in a multi-threaded environment
### Expected behavior An object is collected in a multi-threaded environment and only exists in one queue. ### Actual behavior An object is collected in multiple queues in a multithreaded environment. ### Steps to reproduce ``` public void testMultipleRecycleAtDifferentThread() throws InterruptedException { Recycler<HandledObject> recycler = newRecycler(1024); final HandledObject object = recycler.get(); final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>(); final CountDownLatch countDownLatch = new CountDownLatch(2); final Thread thread1 = new Thread(new Runnable() { @Override public void run() { try{ object.recycle(); }finally { countDownLatch.countDown(); } } }); thread1.start(); //thread1.join(); final Thread thread2 = new Thread(new Runnable() { @Override public void run() { try { object.recycle(); } catch (IllegalStateException e) { exceptionStore.set(e); } finally { countDownLatch.countDown(); } } }); thread2.start(); //thread2.join(); countDownLatch.await(); recycler.get(); IllegalStateException exception = exceptionStore.get(); if (exception != null) { throw exception; } } ``` Breakpoint debugging recycler.get() method, you will find that there are two queues in the stack, holding the same object. ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.59.Final-SNAPSHOT ### JVM version (e.g. `java -version`) 1.8 ### OS version (e.g. `uname -a`) masos
[ "common/src/main/java/io/netty/util/Recycler.java" ]
[ "common/src/main/java/io/netty/util/Recycler.java" ]
[ "common/src/test/java/io/netty/util/RecyclerTest.java" ]
diff --git a/common/src/main/java/io/netty/util/Recycler.java b/common/src/main/java/io/netty/util/Recycler.java index 9e81aa527d0..d64889488ee 100644 --- a/common/src/main/java/io/netty/util/Recycler.java +++ b/common/src/main/java/io/netty/util/Recycler.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.WeakHashMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import static io.netty.util.internal.MathUtil.safeFindNextPositivePowerOfTwo; import static java.lang.Math.max; @@ -209,8 +210,16 @@ final int threadLocalSize() { public interface Handle<T> extends ObjectPool.Handle<T> { } + @SuppressWarnings("unchecked") private static final class DefaultHandle<T> implements Handle<T> { - int lastRecycledId; + private static final AtomicIntegerFieldUpdater<DefaultHandle<?>> LAST_RECYCLED_ID_UPDATER; + static { + AtomicIntegerFieldUpdater<?> updater = AtomicIntegerFieldUpdater.newUpdater( + DefaultHandle.class, "lastRecycledId"); + LAST_RECYCLED_ID_UPDATER = (AtomicIntegerFieldUpdater<DefaultHandle<?>>) updater; + } + + volatile int lastRecycledId; int recycleId; boolean hasBeenRecycled; @@ -235,6 +244,12 @@ public void recycle(Object object) { stack.push(this); } + + public boolean compareAndSetLastRecycledId(int expectLastRecycledId, int updateLastRecycledId) { + // Use "weak…" because we do not need synchronize-with ordering, only atomicity. + // Also, spurious failures are fine, since no code should rely on recycling for correctness. + return LAST_RECYCLED_ID_UPDATER.weakCompareAndSet(this, expectLastRecycledId, updateLastRecycledId); + } } private static final FastThreadLocal<Map<Stack<?>, WeakOrderQueue>> DELAYED_RECYCLED = @@ -371,11 +386,15 @@ void setNext(WeakOrderQueue next) { void reclaimAllSpaceAndUnlink() { head.reclaimAllSpaceAndUnlink(); - this.next = null; + next = null; } void add(DefaultHandle<?> handle) { - handle.lastRecycledId = id; + if (!handle.compareAndSetLastRecycledId(0, id)) { + // Separate threads could be racing to add the handle to each their own WeakOrderQueue. + // We only add the handle to the queue if we win the race and observe that lastRecycledId is zero. + return; + } // While we also enforce the recycling ratio when we transfer objects from the WeakOrderQueue to the Stack // we better should enforce it as well early. Missing to do so may let the WeakOrderQueue grow very fast @@ -649,10 +668,10 @@ void push(DefaultHandle<?> item) { } private void pushNow(DefaultHandle<?> item) { - if ((item.recycleId | item.lastRecycledId) != 0) { + if (item.recycleId != 0 || !item.compareAndSetLastRecycledId(0, OWN_THREAD_ID)) { throw new IllegalStateException("recycled already"); } - item.recycleId = item.lastRecycledId = OWN_THREAD_ID; + item.recycleId = OWN_THREAD_ID; int size = this.size; if (size >= maxCapacity || dropHandle(item)) {
diff --git a/common/src/test/java/io/netty/util/RecyclerTest.java b/common/src/test/java/io/netty/util/RecyclerTest.java index 1d711f5e4a4..0c2d0705606 100644 --- a/common/src/test/java/io/netty/util/RecyclerTest.java +++ b/common/src/test/java/io/netty/util/RecyclerTest.java @@ -18,6 +18,7 @@ import org.junit.Test; import java.util.Random; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -114,12 +115,119 @@ public void run() { }); thread2.start(); thread2.join(); + HandledObject a = recycler.get(); + HandledObject b = recycler.get(); + assertNotSame(a, b); IllegalStateException exception = exceptionStore.get(); if (exception != null) { throw exception; } } + @Test + public void testMultipleRecycleAtDifferentThreadRacing() throws InterruptedException { + Recycler<HandledObject> recycler = newRecycler(1024); + final HandledObject object = recycler.get(); + final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>(); + + final CountDownLatch countDownLatch = new CountDownLatch(2); + final Thread thread1 = new Thread(new Runnable() { + @Override + public void run() { + try { + object.recycle(); + } catch (IllegalStateException e) { + Exception x = exceptionStore.getAndSet(e); + if (x != null) { + e.addSuppressed(x); + } + } finally { + countDownLatch.countDown(); + } + } + }); + thread1.start(); + + final Thread thread2 = new Thread(new Runnable() { + @Override + public void run() { + try { + object.recycle(); + } catch (IllegalStateException e) { + Exception x = exceptionStore.getAndSet(e); + if (x != null) { + e.addSuppressed(x); + } + } finally { + countDownLatch.countDown(); + } + } + }); + thread2.start(); + + try { + countDownLatch.await(); + HandledObject a = recycler.get(); + HandledObject b = recycler.get(); + assertNotSame(a, b); + IllegalStateException exception = exceptionStore.get(); + if (exception != null) { + assertEquals("recycled already", exception.getMessage()); + assertEquals(0, exception.getSuppressed().length); + } + } finally { + thread1.join(1000); + thread2.join(1000); + } + } + + @Test + public void testMultipleRecycleRacing() throws InterruptedException { + Recycler<HandledObject> recycler = newRecycler(1024); + final HandledObject object = recycler.get(); + final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>(); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + final Thread thread1 = new Thread(new Runnable() { + @Override + public void run() { + try { + object.recycle(); + } catch (IllegalStateException e) { + Exception x = exceptionStore.getAndSet(e); + if (x != null) { + e.addSuppressed(x); + } + } finally { + countDownLatch.countDown(); + } + } + }); + thread1.start(); + + try { + object.recycle(); + } catch (IllegalStateException e) { + Exception x = exceptionStore.getAndSet(e); + if (x != null) { + e.addSuppressed(x); + } + } + + try { + countDownLatch.await(); + HandledObject a = recycler.get(); + HandledObject b = recycler.get(); + assertNotSame(a, b); + IllegalStateException exception = exceptionStore.get(); + if (exception != null) { + throw exception; + } + } finally { + thread1.join(1000); + } + } + @Test public void testRecycle() { Recycler<HandledObject> recycler = newRecycler(1024);
val
test
"2021-02-23T20:47:26"
"2021-02-02T13:47:59Z"
xiaoheng1
val
netty/netty/11054_11061
netty/netty
netty/netty/11054
netty/netty/11061
[ "connected" ]
7d4aaa268b8a536f61fbc7711365147c58238745
0f12472b580bcbb34cbe84810077aa33c1091d14
[ "@Shoothzj sure... Maybe provide a PR that allows to configure the `SocketAddress` via the `DnsResolverBuilder` ?", "@normanmaurer I am new to Netty code contributing. How about java property `io.netty.dnsResolverListenAddress`", "I would prefer to make it configurable via the builder.. " ]
[ "This should be an `InetSocketAddress`. Also we will need to preserve the old constructor to not break things, which means you should add a new one (package-private)", "See below... pass in `null` for using the default. ", "See below... should be `SocketAddress`", "call `register` if the `bindAddress` is `null`, otherwise call `bind()`", "please call this `localAddress` to be consistent with `Bootstrap` and make it take a `SocketAddress`.", "One question, Why `InetSocketAddress` is better than `String`, It's the reason the `InetSocketAddress` are more reliable?", "And the InetSocketAddress contains the port config?", "Rename the name to `localAddress`, And change the type to `SocketAddress `", "@Shoothzj Yes, `InetSocketAddress` contains IP Address and Port.", "Change `{@code this` to something else.", "```suggestion\r\n * Configure the address that will be used to bind too. If `null` the default will be used.\r\n```", "Should we make this `SocketAddress` just for the sake of be consistent with `Bootstrap` ?", "```suggestion\r\n SocketAddress localAddress,\r\n```", "```suggestion\r\nimport java.net.SocketAddress;\r\n```", "```suggestion\r\n private SocketAddress localAddress;\r\n```", "```suggestion\r\n public DnsNameResolverBuilder localAddress(SocketAddress localAddress) {\r\n```", "![image](https://user-images.githubusercontent.com/12933197/110261777-76f96f80-7fec-11eb-85f1-fad453cf9bcb.png)\r\nI saw the code below write this.\r\nThe \"Dns name resolver Builder\" will be better?", "Sorry, I don't know I can commit the suggestion, Fixed", "Sorry, I don't know I can commit the suggestion, I Fixed manually", "Sorry, I don't know I can commit the suggestion, I Fixed manually", "Sorry, I don't know I can commit the suggestion, I Fixed manually", "Sorry, I don't know I can commit the suggestion, I Fixed manually", "Sorry, I don't know I can commit the suggestion, I Fixed manually" ]
"2021-03-06T08:47:35Z"
[]
Allow to config the listen address on DnsResolver
The DnsResolver default start address listen to "0.0.0.0", which may have some security risks. Can we allow to config the listen address on DnsResolver?
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java" ]
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java" ]
[]
diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index 3781bd4015b..bf594db0627 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -383,6 +383,35 @@ public DnsNameResolver( int ndots, boolean decodeIdn, boolean completeOncePreferredResolved) { + this(eventLoop, channelFactory, socketChannelFactory, resolveCache, cnameCache, authoritativeDnsServerCache, + null, dnsQueryLifecycleObserverFactory, queryTimeoutMillis, resolvedAddressTypes, + recursionDesired, maxQueriesPerResolve, traceEnabled, maxPayloadSize, optResourceEnabled, + hostsFileEntriesResolver, dnsServerAddressStreamProvider, searchDomains, ndots, decodeIdn, + completeOncePreferredResolved); + } + + DnsNameResolver( + EventLoop eventLoop, + ChannelFactory<? extends DatagramChannel> channelFactory, + ChannelFactory<? extends SocketChannel> socketChannelFactory, + final DnsCache resolveCache, + final DnsCnameCache cnameCache, + final AuthoritativeDnsServerCache authoritativeDnsServerCache, + SocketAddress localAddress, + DnsQueryLifecycleObserverFactory dnsQueryLifecycleObserverFactory, + long queryTimeoutMillis, + ResolvedAddressTypes resolvedAddressTypes, + boolean recursionDesired, + int maxQueriesPerResolve, + boolean traceEnabled, + int maxPayloadSize, + boolean optResourceEnabled, + HostsFileEntriesResolver hostsFileEntriesResolver, + DnsServerAddressStreamProvider dnsServerAddressStreamProvider, + String[] searchDomains, + int ndots, + boolean decodeIdn, + boolean completeOncePreferredResolved) { super(eventLoop); this.queryTimeoutMillis = queryTimeoutMillis > 0 ? queryTimeoutMillis @@ -453,7 +482,12 @@ protected void initChannel(DatagramChannel ch) { }); channelFuture = responseHandler.channelActivePromise; - ChannelFuture future = b.register(); + final ChannelFuture future; + if (localAddress == null) { + future = b.register(); + } else { + future = b.bind(localAddress); + } Throwable cause = future.cause(); if (cause != null) { if (cause instanceof RuntimeException) { diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java index 5a0b5b14e84..197aa0aa831 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolverBuilder.java @@ -25,6 +25,7 @@ import io.netty.resolver.ResolvedAddressTypes; import io.netty.util.concurrent.Future; +import java.net.SocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -42,6 +43,7 @@ public final class DnsNameResolverBuilder { private DnsCache resolveCache; private DnsCnameCache cnameCache; private AuthoritativeDnsServerCache authoritativeDnsServerCache; + private SocketAddress localAddress; private Integer minTtl; private Integer maxTtl; private Integer negativeTtl; @@ -201,6 +203,16 @@ public DnsNameResolverBuilder authoritativeDnsServerCache(AuthoritativeDnsServer return this; } + /** + * Configure the address that will be used to bind too. If `null` the default will be used. + * @param localAddress the bind address + * @return {@code this} + */ + public DnsNameResolverBuilder localAddress(SocketAddress localAddress) { + this.localAddress = localAddress; + return this; + } + /** * Sets the minimum and maximum TTL of the cached DNS resource records (in seconds). If the TTL of the DNS * resource record returned by the DNS server is less than the minimum TTL or greater than the maximum TTL, @@ -480,6 +492,7 @@ public DnsNameResolver build() { resolveCache, cnameCache, authoritativeDnsServerCache, + localAddress, dnsQueryLifecycleObserverFactory, queryTimeoutMillis, resolvedAddressTypes,
null
train
test
"2021-03-07T19:15:39"
"2021-03-03T08:12:48Z"
shoothzj
val
netty/netty/11092_11093
netty/netty
netty/netty/11092
netty/netty/11093
[ "keyword_pr_to_issue" ]
654a54bbadad69031cb34cfdf6e5f6a6f6bbbfb1
56703b93ba2ab29b5ba076dc838c3b73a9389bda
[]
[]
"2021-03-17T05:17:41Z"
[]
IllegalReferenceCountException using compression if HttpResponse implements HttpContent and not LastHttpContent
This was originally reported in Quarkus as https://github.com/quarkusio/quarkus/issues/14695. Vert.x uses https://github.com/eclipse-vertx/vert.x/blob/master/src/main/java/io/vertx/core/http/impl/AssembledHttpResponse.java which implements both HttpResponse and HttpContent, but not LastHttpContent. This results in an IllegalReferenceCount exception in some situations when compression is enabled. I am pretty sure the root cause is here: https://github.com/netty/netty/blob/4.1/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java#L196, as the buffer is not retained.
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java index a0d951a16c5..9f94e025873 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java @@ -194,7 +194,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List<Object> ou res.headers().remove(HttpHeaderNames.CONTENT_LENGTH); res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); - out.add(res); + out.add(ReferenceCountUtil.retain(res)); state = State.AWAIT_CONTENT; if (!(msg instanceof HttpContent)) { // only break out the switch statement if we have not content to process
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java index c2bac013a10..901e424dd6a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; @@ -31,7 +32,11 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; public class HttpContentCompressorTest { @@ -151,6 +156,47 @@ public void testChunkedContent() throws Exception { assertThat(ch.readOutbound(), is(nullValue())); } + @Test + public void testChunkedContentWithAssembledResponse() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(newRequest()); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + ch.writeOutbound(res); + + assertAssembledEncodedResponse(ch); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("1f8b0800000000000000f248cdc901000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("cad7512807000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("ca2fca4901000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("0300c2a99ae70c000000")); + assertThat(chunk, is(instanceOf(HttpContent.class))); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().isReadable(), is(false)); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + @Test public void testChunkedContentWithTrailingHeader() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); @@ -550,4 +596,92 @@ private static void assertEncodedResponse(EmbeddedChannel ch) { assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue())); assertThat(res.headers().get(HttpHeaderNames.CONTENT_ENCODING), is("gzip")); } + private static void assertAssembledEncodedResponse(EmbeddedChannel ch) { + Object o = ch.readOutbound(); + assertThat(o, is(instanceOf(AssembledHttpResponse.class))); + + AssembledHttpResponse res = (AssembledHttpResponse) o; + try { + assertThat(res, is(instanceOf(HttpContent.class))); + assertThat(res.headers().get(HttpHeaderNames.TRANSFER_ENCODING), is("chunked")); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue())); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_ENCODING), is("gzip")); + } finally { + res.release(); + } + } + + static class AssembledHttpResponse extends DefaultHttpResponse implements HttpContent { + + private final ByteBuf content; + + AssembledHttpResponse(HttpVersion version, HttpResponseStatus status, ByteBuf content) { + super(version, status); + this.content = content; + } + + @Override + public HttpContent copy() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent duplicate() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent retainedDuplicate() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent replace(ByteBuf content) { + throw new UnsupportedOperationException(); + } + + @Override + public AssembledHttpResponse retain() { + content.retain(); + return this; + } + + @Override + public AssembledHttpResponse retain(int increment) { + content.retain(increment); + return this; + } + + @Override + public ByteBuf content() { + return content; + } + + @Override + public int refCnt() { + return content.refCnt(); + } + + @Override + public boolean release() { + return content.release(); + } + + @Override + public boolean release(int decrement) { + return content.release(decrement); + } + + @Override + public AssembledHttpResponse touch() { + content.touch(); + return this; + } + + @Override + public AssembledHttpResponse touch(Object hint) { + content.touch(hint); + return this; + } + } }
train
test
"2021-03-18T17:54:06"
"2021-03-17T05:14:50Z"
stuartwdouglas
val
netty/netty/10908_11100
netty/netty
netty/netty/10908
netty/netty/11100
[ "keyword_pr_to_issue" ]
bd62a9d6ffd9941d9e399cbbd73a10f06e38f217
9f242d27e05651a909571f232dd1f5afb1aed20c
[ "@srinidhibashyam I don't think byte discarding is intended to be used in conjunction with derived buffers (slices/duplicates) since it moves things around in the underlying buffer without derived buffers' knowledge. There should probably be a check to disallow those operations if the buffer's ref count is > 1 (as it would be in your case), and I recall some discussion about that a very long time ago.\r\n\r\nDepending on your use case, you could just use `copy()` instead of `retainedSlice()`. Or if the data is very large and you want to avoid copying bytes you could do:\r\n```java\r\nsliced = composite.alloc().compositeBuffer().addFlattenedComponents(true, composite)\r\n```\r\nafter which you can call `discardReadBytes()` on `composite` without it affecting `sliced`'s content. However this only works if `composite` really is a `CompositeByteBuf`. ", "The ref count check would guard against this issue when you use `retainedSlice`, but not if you just use `slice`, so I'm not sure it's worth it to add. I could also imagine some code that works today would be broken by stricter checks." ]
[]
"2021-03-18T13:49:07Z"
[]
NullPointerException while accessing a readable slice of CompositeByteBuf after discarding some bytes
### Expected behavior After skipping and discarding some bytes using `skipBytes()` and `discardSomeReadBytes()` functions in a `CompositeByteBuf`, the readable bytes in the buffer should still be accessible. ### Actual behavior Throws NullPointerException. ``` Exception in thread "main" java.lang.NullPointerException at io.netty.buffer.CompositeByteBuf.findIt(CompositeByteBuf.java:1604) at io.netty.buffer.CompositeByteBuf.findComponent0(CompositeByteBuf.java:1597) at io.netty.buffer.CompositeByteBuf._getByte(CompositeByteBuf.java:949) at io.netty.buffer.UnpooledSlicedByteBuf._getByte(UnpooledSlicedByteBuf.java:39) at io.netty.buffer.AbstractByteBuf.readByte(AbstractByteBuf.java:734) ... ``` ### Steps to reproduce 1. Allocate a container using CompositeByteBuf 2. Fill it with a segment 3. Retain a slice of payload from the container for future access 4. Skip retained number of bytes in the container 5. Call `discardSomeReadBytes()` on the container 6. Try to access the sliced buffer from Step 3. ### Minimal yet complete reproducer code (or URL to code) ``` import io.netty.buffer.ByteBuf; import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import io.netty.util.ResourceLeakDetector; public class BufBug { static { ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID); } private static final boolean EXPECTED = true; public static void main(String[] args) { // allocate a container final CompositeByteBuf composite = Unpooled.compositeBuffer(); // fill it with a segment composite.addComponent(true, Unpooled.copyBoolean(EXPECTED)); // take the whole payload (weirdly) final ByteBuf sliced = composite.retainedSlice(); composite.skipBytes(sliced.readableBytes()); composite.discardSomeReadBytes(); // observe that "sliced" still claims to have readable bytes System.out.println("readerIndex = " + sliced.readerIndex() + ", writerIndex = " + sliced.writerIndex() + ", readableBytes = " + sliced.readableBytes()); // this should work, but throws System.out.println("first byte = " + sliced.readBoolean()); sliced.release(); // cleanup composite.release(); } } ``` ### Netty version Affects 4.1.53.Final and 4.1.56.Final ### JVM version (e.g. `java -version`) java.version: 11.0.2 java.vm.name: OpenJDK 64-Bit Server VM ### OS version (e.g. `uname -a`) os.name: Mac OS X os.arch: x86_64 os.version: 10.15.7
[ "buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java" ]
[ "buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java index 2b03b801cb1..6dc0a2cbf73 100644 --- a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java @@ -1601,6 +1601,10 @@ private Component findIt(int offset) { for (int low = 0, high = componentCount; low <= high;) { int mid = low + high >>> 1; Component c = components[mid]; + if (c == null) { + throw new IllegalStateException("No component found for offset. " + + "Composite buffer layout might be outdated, e.g. from a discardReadBytes call."); + } if (offset >= c.endOffset) { low = mid + 1; } else if (offset < c.offset) {
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java index 6991fabd539..4af2e2efbd4 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java @@ -1614,4 +1614,24 @@ public void testOverflowWhileAddingComponentsViaIterable() { compositeByteBuf.release(); } } + + @Test + public void sliceOfCompositeBufferMustThrowISEAfterDiscardBytes() { + CompositeByteBuf composite = compositeBuffer(); + composite.addComponent(true, buffer(8).writeZero(8)); + + ByteBuf slice = composite.retainedSlice(); + composite.skipBytes(slice.readableBytes()); + composite.discardSomeReadBytes(); + + try { + slice.readByte(); + fail("Expected readByte of discarded slice to throw."); + } catch (IllegalStateException ignore) { + // Good. + } finally { + slice.release(); + composite.release(); + } + } }
val
test
"2021-03-17T20:18:50"
"2021-01-04T01:21:09Z"
srinidhibashyam
val
netty/netty/11101_11106
netty/netty
netty/netty/11101
netty/netty/11106
[ "keyword_pr_to_issue" ]
4f316b7cbd8af2bbf47319c82941a6f80afcb609
207108691976ac8ef646156b4289776d7966c4f7
[ "@alalag1 Oof! Nice catch. We'll take a look." ]
[ "So this means we may waste `directMemoryCacheAlignment` bytes in the worse case, correct ? ", "Should we better not make this static but create a new one in the test ? Otherwise we may end up with using more memory then we really need (due the fact that the ALLOCATOR will not be collected until the JVM dies).", "should we require that `alignment` is a power of two and so we can use `&` as a replacement for `%` for perf reasons ?", "Yes, that was always the case when alignment was requested.", "I switched to using JCTools Pow2 for this, which do these checks." ]
"2021-03-22T16:38:12Z"
[]
direct memory allocation error in PooledByteBufAllocator when setting directMemoryCacheAlignment to "1"
Define a `PooledByteBufAllocator` and set ·directMemoryCacheAlignment· to "1" and try to allocate some `ByteBuf` like ```java PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator( PlatformDependent.directBufferPreferred(), PooledByteBufAllocator.defaultNumHeapArena(), PooledByteBufAllocator.defaultNumDirectArena(), PooledByteBufAllocator.defaultPageSize(), 11, PooledByteBufAllocator.defaultSmallCacheSize(), 64, PooledByteBufAllocator.defaultUseCacheForAllThreads(), 1 ); ByteBuf buf1 = pooledByteBufAllocator.directBuffer(16384); for (int i = 0; i < 16384; i++) { buf1.writeByte(1); } ByteBuf buf2 = pooledByteBufAllocator.directBuffer(65536); buf2.writeByte(2); System.out.println("last byte of buf1: " + buf1.getByte(buf1.capacity() - 1)); ``` ### Expected behavior `last byte of the buf1: 1` ### Actual behavior `last byte of the buf1: 2` ### Steps to reproduce ```java PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator( PlatformDependent.directBufferPreferred(), PooledByteBufAllocator.defaultNumHeapArena(), PooledByteBufAllocator.defaultNumDirectArena(), PooledByteBufAllocator.defaultPageSize(), 11, PooledByteBufAllocator.defaultSmallCacheSize(), 64, PooledByteBufAllocator.defaultUseCacheForAllThreads(), 1 ); ByteBuf buf1 = pooledByteBufAllocator.directBuffer(16384); ByteBuf buf2 = pooledByteBufAllocator.directBuffer(65536); System.out.println("memory address of buf1: [" + buf1.memoryAddress() + ", " + (buf1.memoryAddress() + buf1.capacity()) + ")"); System.out.println("memory address of buf2: [" + buf2.memoryAddress() + ", " + (buf2.memoryAddress() + buf2.capacity()) + ")"); ``` Got 2 overlapped ranges(on my pc) memory address of buf1: [2047716884545, 2047716900929) memory address of buf2: [2047716900928, 2047716966464) ### Netty version 4.1.60.Final ### JVM version (e.g. `java -version`) 1.8.0_271 ### OS version (e.g. `uname -a`) win10
[ "buffer/src/main/java/io/netty/buffer/PoolArena.java", "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBuf.java", "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java", "common/src/main/java/io/netty/util/internal/PlatformDependent.java", "common/src/main/java/io/netty/util/internal/PlatformDependent0.java" ]
[ "buffer/src/main/java/io/netty/buffer/PoolArena.java", "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBuf.java", "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java", "common/src/main/java/io/netty/util/internal/PlatformDependent.java", "common/src/main/java/io/netty/util/internal/PlatformDependent0.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java", "buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java", "buffer/src/test/java/io/netty/buffer/PoolArenaTest.java", "buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/PoolArena.java b/buffer/src/main/java/io/netty/buffer/PoolArena.java index cab3224760f..fba1127b5d7 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolArena.java +++ b/buffer/src/main/java/io/netty/buffer/PoolArena.java @@ -41,7 +41,6 @@ enum SizeClass { final int numSmallSubpagePools; final int directMemoryCacheAlignment; - final int directMemoryCacheAlignmentMask; private final PoolSubpage<T>[] smallSubpagePools; private final PoolChunkList<T> q050; @@ -77,7 +76,6 @@ protected PoolArena(PooledByteBufAllocator parent, int pageSize, super(pageSize, pageShifts, chunkSize, cacheAlignment); this.parent = parent; directMemoryCacheAlignment = cacheAlignment; - directMemoryCacheAlignmentMask = cacheAlignment - 1; numSmallSubpagePools = nSubpages; smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools); @@ -152,7 +150,7 @@ private void tcacheAllocateSmall(PoolThreadCache cache, PooledByteBuf<T> buf, fi return; } - /** + /* * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and * {@link PoolChunk#free(long)} may modify the doubly linked list as well. */ @@ -235,7 +233,7 @@ void free(PoolChunk<T> chunk, ByteBuffer nioBuffer, long handle, int normCapacit } } - private SizeClass sizeClass(long handle) { + private static SizeClass sizeClass(long handle) { return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal; } @@ -559,12 +557,13 @@ boolean isDirect() { @Override protected PoolChunk<byte[]> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) { - return new PoolChunk<byte[]>(this, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx, 0); + return new PoolChunk<byte[]>( + this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx); } @Override protected PoolChunk<byte[]> newUnpooledChunk(int capacity) { - return new PoolChunk<byte[]>(this, newByteArray(capacity), capacity, 0); + return new PoolChunk<byte[]>(this, null, newByteArray(capacity), capacity); } @Override @@ -601,43 +600,31 @@ boolean isDirect() { return true; } - // mark as package-private, only for unit test - int offsetCacheLine(ByteBuffer memory) { - // We can only calculate the offset if Unsafe is present as otherwise directBufferAddress(...) will - // throw an NPE. - int remainder = HAS_UNSAFE - ? (int) (PlatformDependent.directBufferAddress(memory) & directMemoryCacheAlignmentMask) - : 0; - - // offset = alignment - address & (alignment - 1) - return directMemoryCacheAlignment - remainder; - } - @Override protected PoolChunk<ByteBuffer> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) { if (directMemoryCacheAlignment == 0) { - return new PoolChunk<ByteBuffer>(this, - allocateDirect(chunkSize), pageSize, pageShifts, - chunkSize, maxPageIdx, 0); + ByteBuffer memory = allocateDirect(chunkSize); + return new PoolChunk<ByteBuffer>(this, memory, memory, pageSize, pageShifts, + chunkSize, maxPageIdx); } - final ByteBuffer memory = allocateDirect(chunkSize - + directMemoryCacheAlignment); - return new PoolChunk<ByteBuffer>(this, memory, pageSize, - pageShifts, chunkSize, maxPageIdx, - offsetCacheLine(memory)); + + final ByteBuffer base = allocateDirect(chunkSize + directMemoryCacheAlignment); + final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment); + return new PoolChunk<ByteBuffer>(this, base, memory, pageSize, + pageShifts, chunkSize, maxPageIdx); } @Override protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) { if (directMemoryCacheAlignment == 0) { - return new PoolChunk<ByteBuffer>(this, - allocateDirect(capacity), capacity, 0); + ByteBuffer memory = allocateDirect(capacity); + return new PoolChunk<ByteBuffer>(this, memory, memory, capacity); } - final ByteBuffer memory = allocateDirect(capacity - + directMemoryCacheAlignment); - return new PoolChunk<ByteBuffer>(this, memory, capacity, - offsetCacheLine(memory)); + + final ByteBuffer base = allocateDirect(capacity + directMemoryCacheAlignment); + final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment); + return new PoolChunk<ByteBuffer>(this, base, memory, capacity); } private static ByteBuffer allocateDirect(int capacity) { @@ -648,9 +635,9 @@ private static ByteBuffer allocateDirect(int capacity) { @Override protected void destroyChunk(PoolChunk<ByteBuffer> chunk) { if (PlatformDependent.useDirectBufferNoCleaner()) { - PlatformDependent.freeDirectNoCleaner(chunk.memory); + PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base); } else { - PlatformDependent.freeDirectBuffer(chunk.memory); + PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base); } } diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunk.java b/buffer/src/main/java/io/netty/buffer/PoolChunk.java index b9604b31302..b9a1b4cb49b 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunk.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunk.java @@ -141,9 +141,9 @@ final class PoolChunk<T> implements PoolChunkMetric { static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT; final PoolArena<T> arena; + final Object base; final T memory; final boolean unpooled; - final int offset; /** * store the first page and last page of each avail run @@ -181,14 +181,14 @@ final class PoolChunk<T> implements PoolChunkMetric { //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; @SuppressWarnings("unchecked") - PoolChunk(PoolArena<T> arena, T memory, int pageSize, int pageShifts, int chunkSize, int maxPageIdx, int offset) { + PoolChunk(PoolArena<T> arena, Object base, T memory, int pageSize, int pageShifts, int chunkSize, int maxPageIdx) { unpooled = false; this.arena = arena; + this.base = base; this.memory = memory; this.pageSize = pageSize; this.pageShifts = pageShifts; this.chunkSize = chunkSize; - this.offset = offset; freeBytes = chunkSize; runsAvail = newRunsAvailqueueArray(maxPageIdx); @@ -204,11 +204,11 @@ final class PoolChunk<T> implements PoolChunkMetric { } /** Creates a special chunk that is not pooled. */ - PoolChunk(PoolArena<T> arena, T memory, int size, int offset) { + PoolChunk(PoolArena<T> arena, Object base, T memory, int size) { unpooled = true; this.arena = arena; + this.base = base; this.memory = memory; - this.offset = offset; pageSize = 0; pageShifts = 0; runsAvailMap = null; @@ -569,9 +569,8 @@ void initBufWithSubpage(PooledByteBuf<T> buf, ByteBuffer nioBuffer, long handle, assert s.doNotDestroy; assert reqCapacity <= s.elemSize; - buf.init(this, nioBuffer, handle, - (runOffset << pageShifts) + bitmapIdx * s.elemSize + offset, - reqCapacity, s.elemSize, threadCache); + int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize; + buf.init(this, nioBuffer, handle, offset, reqCapacity, s.elemSize, threadCache); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java index 2c3aef6fc4b..f0190850734 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java @@ -52,7 +52,7 @@ void init(PoolChunk<T> chunk, ByteBuffer nioBuffer, } void initUnpooled(PoolChunk<T> chunk, int length) { - init0(chunk, null, 0, chunk.offset, length, length, null); + init0(chunk, null, 0, 0, length, length, null); } private void init0(PoolChunk<T> chunk, ByteBuffer nioBuffer, diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java index a3c8c605f41..dd9e36f55b7 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java @@ -63,15 +63,19 @@ public void run() { }; static { + int defaultAlignment = SystemPropertyUtil.getInt( + "io.netty.allocator.directMemoryCacheAlignment", 0); int defaultPageSize = SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192); Throwable pageSizeFallbackCause = null; try { - validateAndCalculatePageShifts(defaultPageSize); + validateAndCalculatePageShifts(defaultPageSize, defaultAlignment); } catch (Throwable t) { pageSizeFallbackCause = t; defaultPageSize = 8192; + defaultAlignment = 0; } DEFAULT_PAGE_SIZE = defaultPageSize; + DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment; int defaultMaxOrder = SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 11); Throwable maxOrderFallbackCause = null; @@ -142,9 +146,6 @@ public void run() { DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean( "io.netty.allocator.useCacheForAllThreads", true); - DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = SystemPropertyUtil.getInt( - "io.netty.allocator.directMemoryCacheAlignment", 0); - // Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array // of 1024 elements. Otherwise we would allocate 2048 and only use 1024 which is wasteful. DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt( @@ -266,6 +267,17 @@ public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectA threadCache = new PoolThreadLocalCache(useCacheForAllThreads); this.smallCacheSize = smallCacheSize; this.normalCacheSize = normalCacheSize; + + if (directMemoryCacheAlignment != 0) { + if (!PlatformDependent.hasAlignDirectByteBuffer()) { + throw new UnsupportedOperationException("Buffer alignment is not supported. " + + "Either Unsafe or ByteBuffer.alignSlice() must be available."); + } + + // Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple. + pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment); + } + chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder); checkPositiveOrZero(nHeapArena, "nHeapArena"); @@ -281,7 +293,7 @@ public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectA + directMemoryCacheAlignment + " (expected: power of two)"); } - int pageShifts = validateAndCalculatePageShifts(pageSize); + int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment); if (nHeapArena > 0) { heapArenas = newArenaArray(nHeapArena); @@ -321,15 +333,20 @@ private static <T> PoolArena<T>[] newArenaArray(int size) { return new PoolArena[size]; } - private static int validateAndCalculatePageShifts(int pageSize) { + private static int validateAndCalculatePageShifts(int pageSize, int alignment) { if (pageSize < MIN_PAGE_SIZE) { - throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ")"); + throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')'); } if ((pageSize & pageSize - 1) != 0) { throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)"); } + if (pageSize < alignment) { + throw new IllegalArgumentException("Alignment cannot be greater than page size. " + + "Alignment: " + alignment + ", page size: " + pageSize + '.'); + } + // Logarithm base 2. At this point we know that pageSize is a power of two. return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize); } diff --git a/common/src/main/java/io/netty/util/internal/PlatformDependent.java b/common/src/main/java/io/netty/util/internal/PlatformDependent.java index 89daf214b00..3e752ad0434 100644 --- a/common/src/main/java/io/netty/util/internal/PlatformDependent.java +++ b/common/src/main/java/io/netty/util/internal/PlatformDependent.java @@ -768,6 +768,32 @@ public static void freeDirectNoCleaner(ByteBuffer buffer) { decrementMemoryCounter(capacity); } + public static boolean hasAlignDirectByteBuffer() { + return hasUnsafe() || PlatformDependent0.hasAlignSliceMethod(); + } + + public static ByteBuffer alignDirectBuffer(ByteBuffer buffer, int alignment) { + if (!buffer.isDirect()) { + throw new IllegalArgumentException("Cannot get aligned slice of non-direct byte buffer."); + } + if (PlatformDependent0.hasAlignSliceMethod()) { + return PlatformDependent0.alignSlice(buffer, alignment); + } + if (hasUnsafe()) { + long address = directBufferAddress(buffer); + long aligned = align(address, alignment); + buffer.position((int) (aligned - address)); + return buffer.slice(); + } + // We don't have enough information to be able to align any buffers. + throw new UnsupportedOperationException("Cannot align direct buffer. " + + "Needs either Unsafe or ByteBuffer.alignSlice method available."); + } + + public static long align(long value, int alignment) { + return Pow2.align(value, alignment); + } + private static void incrementMemoryCounter(int capacity) { if (DIRECT_MEMORY_COUNTER != null) { long newUsedMemory = DIRECT_MEMORY_COUNTER.addAndGet(capacity); diff --git a/common/src/main/java/io/netty/util/internal/PlatformDependent0.java b/common/src/main/java/io/netty/util/internal/PlatformDependent0.java index e5eb2f5b109..0609d0090e6 100644 --- a/common/src/main/java/io/netty/util/internal/PlatformDependent0.java +++ b/common/src/main/java/io/netty/util/internal/PlatformDependent0.java @@ -46,6 +46,7 @@ final class PlatformDependent0 { private static final Constructor<?> DIRECT_BUFFER_CONSTRUCTOR; private static final Throwable EXPLICIT_NO_UNSAFE_CAUSE = explicitNoUnsafeCause0(); private static final Method ALLOCATE_ARRAY_METHOD; + private static final Method ALIGN_SLICE; private static final int JAVA_VERSION = javaVersion0(); private static final boolean IS_ANDROID = isAndroid0(); @@ -398,6 +399,21 @@ public Object run() { ALLOCATE_ARRAY_METHOD = allocateArrayMethod; } + if (javaVersion() > 9) { + ALIGN_SLICE = (Method) AccessController.doPrivileged(new PrivilegedAction<Object>() { + @Override + public Object run() { + try { + return ByteBuffer.class.getDeclaredMethod("alignedSlice", int.class); + } catch (Exception e) { + return null; + } + } + }); + } else { + ALIGN_SLICE = null; + } + INTERNAL_UNSAFE = internalUnsafe; logger.debug("java.nio.DirectByteBuffer.<init>(long, int): {}", @@ -474,6 +490,20 @@ static ByteBuffer allocateDirectNoCleaner(int capacity) { return newDirectBuffer(UNSAFE.allocateMemory(Math.max(1, capacity)), capacity); } + static boolean hasAlignSliceMethod() { + return ALIGN_SLICE != null; + } + + static ByteBuffer alignSlice(ByteBuffer buffer, int alignment) { + try { + return (ByteBuffer) ALIGN_SLICE.invoke(buffer, alignment); + } catch (IllegalAccessException e) { + throw new Error(e); + } catch (InvocationTargetException e) { + throw new Error(e); + } + } + static boolean hasAllocateArrayMethod() { return ALLOCATE_ARRAY_METHOD != null; }
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java index f7c16661a67..54797b25647 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java @@ -125,4 +125,18 @@ public void testIsContiguous() { assertTrue(buf.isContiguous()); buf.release(); } + + @Test + public void distinctBuffersMustNotOverlap() { + ByteBuf a = newBuffer(16384); + ByteBuf b = newBuffer(65536); + a.setByte(a.capacity() - 1, 1); + b.setByte(0, 2); + try { + assertEquals(1, a.getByte(a.capacity() - 1)); + } finally { + a.release(); + b.release(); + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java new file mode 100644 index 00000000000..59fcf77c0e0 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +public class AlignedPooledByteBufAllocatorTest extends PooledByteBufAllocatorTest { + @Override + protected PooledByteBufAllocator newAllocator(boolean preferDirect) { + int directMemoryCacheAlignment = 1; + return new PooledByteBufAllocator( + preferDirect, + PooledByteBufAllocator.defaultNumHeapArena(), + PooledByteBufAllocator.defaultNumDirectArena(), + PooledByteBufAllocator.defaultPageSize(), + 11, + PooledByteBufAllocator.defaultSmallCacheSize(), + 64, + PooledByteBufAllocator.defaultUseCacheForAllThreads(), + directMemoryCacheAlignment); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java b/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java index 590dce11666..a598c910f38 100644 --- a/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java +++ b/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java @@ -103,26 +103,6 @@ public void testPageIdx2size() { } } - @Test - public void testDirectArenaOffsetCacheLine() throws Exception { - assumeTrue(PlatformDependent.hasUnsafe()); - int capacity = 5; - int alignment = 128; - - for (int i = 0; i < 1000; i++) { - ByteBuffer bb = PlatformDependent.useDirectBufferNoCleaner() - ? PlatformDependent.allocateDirectNoCleaner(capacity + alignment) - : ByteBuffer.allocateDirect(capacity + alignment); - - PoolArena.DirectArena arena = new PoolArena.DirectArena(null, 512, 9, 512, alignment); - int offset = arena.offsetCacheLine(bb); - long address = PlatformDependent.directBufferAddress(bb); - - Assert.assertEquals(0, (offset + address) & (alignment - 1)); - PlatformDependent.freeDirectBuffer(bb); - } - } - @Test public void testAllocationCounter() { final PooledByteBufAllocator allocator = new PooledByteBufAllocator( diff --git a/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java new file mode 100644 index 00000000000..6ac7740a190 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.nio.ByteOrder; + +import static org.junit.Assert.assertSame; + +public class PooledAlignedBigEndianDirectByteBufTest extends PooledBigEndianDirectByteBufTest { + private static final int directMemoryCacheAlignment = 1; + private static PooledByteBufAllocator allocator; + + @BeforeClass + public static void setUpAllocator() { + allocator = new PooledByteBufAllocator( + true, + PooledByteBufAllocator.defaultNumHeapArena(), + PooledByteBufAllocator.defaultNumDirectArena(), + PooledByteBufAllocator.defaultPageSize(), + 11, + PooledByteBufAllocator.defaultSmallCacheSize(), + 64, + PooledByteBufAllocator.defaultUseCacheForAllThreads(), + directMemoryCacheAlignment); + } + + @AfterClass + public static void releaseAllocator() { + allocator = null; + } + + @Override + protected ByteBuf alloc(int length, int maxCapacity) { + ByteBuf buffer = allocator.directBuffer(length, maxCapacity); + assertSame(ByteOrder.BIG_ENDIAN, buffer.order()); + return buffer; + } +}
train
test
"2021-03-21T14:51:56"
"2021-03-19T12:14:19Z"
alalag1
val
netty/netty/11143_11145
netty/netty
netty/netty/11143
netty/netty/11145
[ "keyword_pr_to_issue", "connected" ]
16b40d8a37937fd6e73858db2f05bd1e778a1d6f
93f021141d93e6529215601ba6b5938e44f499c8
[ "@jameskleeh \r\nFrom the beginning: https://github.com/fredericBregier/netty/blob/6daeb0cc51d8689805c1a657e61d395450afec47/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1160\r\n\r\nIt tries to find out the `delimiter` in the full buffer (multiple chunks up to now):\r\n- if `posDelimiter<0`, the `delimiter` is not found. So the upload cannot be finalized, but maybe we can add some bytes already.\r\n - So, then it tries to search for CRLF or LF\r\n - If not found (`posDelimiter<0`), the buffer can be appened fully (since the `CRLF/LF + delimiter` is not there)\r\n - If found (`posDelimiter>0`), only up to CRLF/LF (without) can be appened (the `delimiter` could be just after in the next chunk)\r\n - If `posDelimiter==0`, the CRLF/LF is found but at position 0, so nothing can be added (the `delimiter` could be just after in the next chunk)\r\n - It returns false in all cases since delimiter is not found, so the next chunk will be added to the current buffer to try to find out the `CRLF/LF + delimiter`.\r\n- If `posDelimiter >= 0`, it means it found the `CRLF/LF + delimiter`, so it finalizes the upload.\r\n\r\n\r\nSo I see nothing wrong there.\r\nCould you propose a reproducer with a simpler code ? It is quite too heavy to check in this huge git.", "@fredericBregier I don't have confidence I could create something outside of Micronaut to reproduce the issue. I'm not sure what you mean by \"It is quite too heavy to check in this huge git\". I clone and run the tests in that codebase everyday so surely you can as well. \r\n\r\nI don't understand the importance of CRLF/LF in this context so I really can't comment on whether it makes sense or not. All I can say is that it worked correctly prior to this change.", "I verified that LF is found at position 0, however your claim of \"nothing can be added (the delimiter could be just after in the next chunk)\" seems invalid to me. Can the contents of a file not contain line feeds? How can you assume the delimiter is coming after a LF? Is that in the RFC for multipart requests somewhere?", "@jameskleeh Hi, your code is quite heavy, so difficult to search for the reason behind and to be able to reproduce and fixing it.\r\n\r\nSo the reason if you have a reproducer, with simple code (one client and one server code), then we can extract a reproducer and fix it.\r\n\r\nOn the RFC part, on multipart side, each part is separated by a CRLF or LF following by the delimiter string.\r\nSo this method objective is to find out the first occurence of the CRLF/LF + delimiter in the buffer. Once found, it has found the final delimiter for the current part. And then it continues to the next part.\r\n\r\nOf course a part can contains a CRLF/LF. But if this is found, there is a risk such that the next bytes are the delimiter but not yet there (due to HTTP chunk by chunk reception).\r\nNote that if a file contains exactly the CRLF/LF+delimiter string, it can be considered as a end of a part. This is of course not wanted but that's how is the RFC.\r\n\r\nMaybe your issue is that the upload is beginning by a CRLF/LF, but not fitting within one HTTP chunk, so the delimiter will come in a later chunk. If this is that, then, once all chunks concerning this upload will arrive on the server side, it will find out the delimite preceeded by CRLF/LF, and therefore ending the part (the upload).\r\n\r\nI believe we could try to seak for the \"last\" CRLF/LF, not the first one when no delimiter is found. It might be better from a memory point of view, but it will not change the logic.\r\n", "The idea would be to change:\r\nhttps://github.com/netty/netty/blob/1529ef1794e0a6654fa4334fd979b769d6940e61/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1163\r\n\r\n posDelimiter = HttpPostBodyUtil.findLineBreak(undecodedChunk, startReaderIndex);\r\n\r\nby something that will search for \"last\" CRLF/LF: (not yet implemented)\r\n\r\n posDelimiter = HttpPostBodyUtil.findLastLineBreak(undecodedChunk, startReaderIndex);\r\n\r\nWe might even check if the new posDelimiter has more space than delimiter size, in order to ensure that the delimiter is not splitten across 2 HTTP chunks as:\r\n\r\n- HTTP Chunk 1\r\n\r\n file body example line 1CRLFThen line 2CRLF--del\r\n\r\nIt might takes `file body example line 1CRLFThen line 2` as first content for file.\r\n\r\n- HTTP Chunk 2\r\n\r\n imiter\r\n\r\nThe delimiter is then found fully and the file is filled with a 0 length buffer.\r\nTherefore, the file contains `file body example line 1CRLFThen line 2`.\r\n\r\nCurrently, on first step, it will gives: `file body example line 1`, then after Chunk 2: ``file body example line 1CRLFThen line 2`\r\n\r\n\r\nBut not that this will not change the behaviour: if there is never a CRLF/LF + delimiter, then the part is never ending. The difference is that, if there is a CRLF/LF in the middle (or start) of the file, it will however populate the file behind, but still waiting to find out the CRLF/LF+delimiter.", "@fredericBregier I'd be happy to test any local branch with the changes", "OK, I will try to make it, but I'm quite sure it will not change anything since I do not have a simple reproducer test code to test it.\r\nBut if it does the trick, well, I would be surprised since it seems to me not a fix but an improvement.\r\n\r\nIn other words, it seems your upload does not end up with a CRLF/LF + delimiter... ? But as I don't know the code, I cannot say for sure of course.", "@fredericBregier I'm not quite sure but its not getting to the end of the file. I'm uploading 15 megabytes and I'm seeing this behavior on the second chunk of data being received. I'm fairly confident all of the data in the buffer when `posDelimiter` is 0 is all data for the file and does not contain any delimiters or anything specific to HTTP", "If you'd like to do a screen share I'd be glad to show you", "Well, could you at least point me out directly to the code:\r\n- client side (I suppose the test itself)\r\n- server side\r\nI may try to do a reproducer such that I understand where the problem is. My previous point seems to me only an optimisation.\r\nI would be glad to help to fix this out.\r\n\r\nOf course, I could donwload all your project and following your original guidance, but perhaps I could read the code itself first ?\r\nWhen I go to your github repo, I found out it is so huge, that it was difficult for me to find out where it is...\r\n\r\nFor the screen sharing, might be useful next, if I don't get it ;-)", "@fredericBregier Sure\r\n\r\nClient\r\n--------\r\nHere is the client that is sending the data\r\nhttps://github.com/micronaut-projects/micronaut-core/blob/upgrade-netty/test-suite/src/test/groovy/io/micronaut/upload/StreamUploadSpec.groovy#L552\r\nYou can see its a random set of bytes 15mb long\r\n\r\nHere is where the Netty specific multipart stuff is created from our API\r\nhttps://github.com/micronaut-projects/micronaut-core/blob/upgrade-netty/http-client/src/main/java/io/micronaut/http/client/netty/DefaultHttpClient.java#L1525\r\n\r\nServer\r\n--------\r\nHere is where the data is offered to the decoder\r\nhttps://github.com/micronaut-projects/micronaut-core/blob/upgrade-netty/http-server-netty/src/main/java/io/micronaut/http/server/netty/FormDataHttpContentProcessor.java#L113\r\n\r\nIn the second invocation of the `onData` method above, the `currentPartialHttpData()` returned from the decoder will have a null buffer. See https://github.com/micronaut-projects/micronaut-core/blob/upgrade-netty/http-server-netty/src/main/java/io/micronaut/http/server/netty/FormDataHttpContentProcessor.java#L134\r\n\r\nThe buffer is null because the data previously received has already been read and released and no new data was added to the upload.\r\n\r\nThe data is released via https://github.com/micronaut-projects/micronaut-core/blob/upgrade-netty/http-server-netty/src/main/java/io/micronaut/http/server/netty/multipart/NettyStreamingFileUpload.java#L136\r\n", "OK, I think I got it.\r\nFor the current Http Chunk, once `while (postRequestDecoder.hasNext())` is false, it does not mean it has nothing, it could be in the middle of creation of a new `HttpData`. It just means that there is not enough data to create a new one yet.\r\n\r\nSo I believe that maybe https://github.com/micronaut-projects/micronaut-core/blob/e67ca50cf2a778cb6c7354b1ecd2c7fe7d2910ed/http-server-netty/src/main/java/io/micronaut/http/server/netty/FormDataHttpContentProcessor.java#L134 is wrong. Current `HttpData` might be null (empty or not yet any new `HttpData`).\r\n\r\nFor instance, let say the last chunk was ending like this:\r\n\r\n my previous Http DataCRLF--delimiterCRLFcontent-disposition: form-data; na\r\n\r\nSuch that the next Part from multipart is not yet available to know what it is.\r\nTherefore, the previous one is ready (`hasNext()` is true once, but not twice).\r\nThe second one is not yet created, so no new `HttpData` yet.\r\n\r\nAnother example:\r\n\r\n my previous Http DataCRLF--delimiterCRLFcontent-disposition: form-data; name=\"field1\"CRLF\r\n\r\nThen the second one `HttpData` is created but empty (no data yet).\r\n\r\nI don't know why you are adding like this `messages.add(currentPartialHttpData);` on line 134-135, because for me, it has no meaning. As long as the decoder is not giving you something new through `postRequestDecoder.hasNext()`, you don't have to care about new `HttpData`, the decoder will keep the already available data inside and will give you as soon as it has all the next `HttpData`.\r\n\r\nI'm on my way however to try to improve the decoder part to opimize it, but I feel like you are not using correctly the decoder however.", "> I don't know why you are adding like this messages.add(currentPartialHttpData); on line 134-135, because for me, it has no meaning. As long as the decoder is not giving you something new through postRequestDecoder.hasNext(), you don't have to care about new HttpData, the decoder will keep the already available data inside and will give you as soon as it has all the next HttpData.\r\n\r\nBecause we need to notify the downstream that new data is available on the upload. Users can read and release pieces of uploads as they become available. It is often the case we don't want to buffer any data beyond a single chunk\r\n\r\n> So I believe that maybe https://github.com/micronaut-projects/micronaut-core/blob/e67ca50cf2a778cb6c7354b1ecd2c7fe7d2910ed/http-server-netty/src/main/java/io/micronaut/http/server/netty/FormDataHttpContentProcessor.java#L134 is wrong. Current HttpData might be null (empty or not yet any new HttpData).\r\n\r\nIf it's null it wouldn't be passed through, so I don't think its wrong. This code has been in place for some time and working well\r\n\r\n> Then the second one HttpData is created but empty (no data yet).\r\n\r\nIt's possible this is a valid case and we need to handle it, however I don't think its relevant to this issue", "You can checkout the 2.4.x branch of micronaut-core to see the difference in behavior. In Netty 4.1.59 the partial upload is populated with a buffer on the second invocation and with 4.1.60+ it is not.", "Yes I understood. So the \"API behavior\" change. But what I'm saying, is that the goal of the decoder is not to give you a \"partial HttpData\" but a full one when ready. You have made assumption which are internal operations.\r\nIt is almost as a Queue: when the Queue has no more yet element, it says hasNext is false. You have to wait to see the next available element when it is ready.\r\nThere you assume the Queue will say to you it has \"almost\" something. It implementation dependent.\r\n\r\nI will try to make the current API to \"looks like\" what is was before, but my feeling IMHO is that you rely too much on the underlying implementation and not the API contract (which is not the implementation).", ">But what I'm saying, is that the goal of the decoder is not to give you a \"partial HttpData\" but a full one when ready.\r\n\r\nIf that is the case then why is there a method to retrieve the partial data?", "Good point ;-)\r\nFor me, it is the opportunity to know if there is any new HttpData on going, but not for sure.\r\n\r\nHowever, I was able to reproduce easily this bug (changing behavior), by setting the first bytes of the new file to be CRLF. \r\nWhile it is not really a bug, since the decoder is going on and finish correctly to get the full `HttpData`, it indeed has a `HttpData` through `currentPartialHttpData()` but with an empty buffer at first step, while it is ok at the end.\r\n\r\nNote that from a Disk based `HttpData`, using `HttpData.content()` or `HttpData.getByteBuf()` will read the `File` and create a newly allocated `ByteBuf` for that, giving a chance of Out Of Memory exception (if large file).\r\n\r\nI fix also the default behavior such that when a Memory based `HttpData` has not yet data, it returns an empty buffer.\r\nNote that current implementation does not ensure this buffer will remain the same until the `HttpData` is fully decoded.", "@jameskleeh You can check out my proposal to fix the behaviour.\r\n\r\nI believe it will run as expected now, even if it is not really a bug.", "@jameskleeh Could you try https://github.com/fredericBregier/netty/tree/improveMultipartAddingBuffer ?", "Yeah I’ll give it a go today\n\nOn Thu, Apr 8, 2021 at 7:56 AM Frédéric Brégier ***@***.***>\nwrote:\n\n> @jameskleeh <https://github.com/jameskleeh> Could you try\n> https://github.com/fredericBregier/netty/tree/improveMultipartAddingBuffer\n> ?\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/netty/netty/issues/11143#issuecomment-815712194>, or\n> unsubscribe\n> <https://github.com/notifications/unsubscribe-auth/AAMCVLIOCP4ZVP3MLRIIBS3THWKVVANCNFSM42PB7GYA>\n> .\n>\n", "@fredericBregier This is better, but still not ideal. The very first time the decoder is offered data results in the upload being created with a composite buffer where the first buffer is empty and the second one contains the data.\r\n\r\n<img width=\"795\" alt=\"Screen Shot 2021-04-08 at 3 39 03 PM\" src=\"https://user-images.githubusercontent.com/1583789/114086536-b8499d00-9880-11eb-8bde-b2275f063f23.png\">\r\n\r\nThis is a deviation from previous behavior where it would be a non composite buffer. The buffer being a composite would not necessarily be a problem, however having 2 components is because I'm relying on a chunk of data being offered to the decoder only resulting in a single chunk of data being added to any single upload.", "Hmm, I see. I will try to optimize one more time.\r\nI cannot prevent CompositeByteBuf due to the algorithm, but I can prevent that, if the first and current buffer is an empty one, the n the second one will replace the first one. But if there is a third one, then it will necessary be a Composite one.", "@jameskleeh I've done the new fix. You can rebase and recheck when you can.", "@fredericBregier Another step in the right direction, however I'm now finding that the upload never completes.", "@jameskleeh Very strange since the only difference is the following with the previous version:\r\n\r\n } else if (byteBuf.readableBytes() == 0) {\r\n // Previous buffer is empty, so just replace it\r\n byteBuf.release();\r\n byteBuf = buffer;\r\n\r\nI will check but I don't understand. Have you a trace (log error) ? It might be the release of the first empty buffer, but ver strange then.", "Moreover, all Junit tests are passing... So the reason I don't get it. I double check, and I see nothing wrong.\r\nIf you have traces or error logs ?", "Just in case I understand well your code: https://github.com/micronaut-projects/micronaut-core/blob/e67ca50cf2a778cb6c7354b1ecd2c7fe7d2910ed/http-server-netty/src/main/java/io/micronaut/http/server/netty/FormDataHttpContentProcessor.java#L111\r\n\r\n while (postRequestDecoder.hasNext()) {\r\n InterfaceHttpData data = postRequestDecoder.next();\r\n switch (data.getHttpDataType()) {\r\n case Attribute:\r\n Attribute attribute = (Attribute) data;\r\n messages.add(attribute); //2\r\n break;\r\n case FileUpload:\r\n FileUpload fileUpload = (FileUpload) data;\r\n if (fileUpload.isCompleted()) { //1\r\n messages.add(fileUpload);\r\n }\r\n break;\r\n default:\r\n // no-op\r\n }\r\n }\r\n\r\n InterfaceHttpData currentPartialHttpData = postRequestDecoder.currentPartialHttpData();\r\n if (currentPartialHttpData instanceof HttpData) {\r\n messages.add(currentPartialHttpData); //3\r\n }\r\n\r\nIn 1) you are checking if the FileUpload is completed, and if so, add it to the messages list.\r\nIn 2) you are not checking this, while nothing is sure that this Attribute is completed (it can be, as for a file, in the middle due to HttpChunk). Can it be an issue ?\r\nIn 3) you are adding a partial HttpData, it can be an Attribute already added in 2 just before. Moreover, if it is a FileUpload, when the FileUpload is over, it will be added another time.\r\n\r\nCould those being an issue ?\r\n\r\n", "@fredericBregier That isn't an issue. I believe the check at 1) is actually redundant given only completed items get passed through the iterable of the decoder.\r\n\r\nBasically if you put a breakpoint at\r\n\r\n```\r\n postRequestDecoder.offer(httpContent);\r\n\r\n while (postRequestDecoder.hasNext()) {\r\n InterfaceHttpData data = postRequestDecoder.next(); //after here\r\n```\r\n\r\nYou will find that `data` is never set to the file upload", "@fredericBregier I've done some debugging and found this to be the issue: \r\n\r\nhttps://github.com/fredericBregier/netty/blob/improveMultipartAddingBuffer/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1168\r\n\r\nThe readable bytes in this case was 6, but for some reason it is subtracting the delimiter length. That doesn't make sense to me since the delimiter wasn't found. The 6 bytes should be added to the upload yes?", "@fredericBregier I changed this specific section of code back to how it is on the netty repo and my test is green\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1161\r\n", "@jameskleeh Thanks ! I will check and update ;-)\r\nGood catch !", "@fredericBregier Sorry I forgot to mention I did change it to `lastLineBreak`", "@jameskleeh Could you give me the changes you've made ?\r\n\r\nThe reason of substracting delimiter size is the following:\r\n- once we don't find out the delimiter, it can means\r\n - either it is not at all within the buffer\r\n - either it is partially only present (missing some bytes)\r\n- Therefore we could search for CRLF/LF only from this maximum position (delimiter length from the end)\r\n - If no CRLF/LF found : full buffer can be added\r\n - If CRLF/LF is found : only up to this position, the buffer can be added (CRLF/LF can be followed by an incomplete delimiter)\r\n\r\nI may have make an over hypothesis, so I've changed from now: in loadDataMultipartOptimized\r\n\r\n // Not found but however perhaps because incomplete so search LF or CRLF from the end within delimiter\r\n // possible partial last bytes\r\n int lastPosition = undecodedChunk.readableBytes() - bdelimiter.length;\r\n if (lastPosition < 0) {\r\n // Not enough bytes but can still try to find CRLF\r\n lastPosition = 0; // Change HERE\r\n }\r\n\r\nIndeed, if the lastPosition is < 0, then it means the buffer has less than delimiter length, so we can afford to check for CRLF/LF within from relative position 0.\r\n", "@jameskleeh With this change, I've got the following:\r\n\r\n- delimiter is of size 42\r\n- previous last chunk is of size 24 with half real data, half begining of the delimiter => the half is indeed added\r\n - decoder.currentPartialHttpData() is not null\r\n- last chunk is of size 30 (last part of the delimiter) => the HttpData is over\r\n - decoder.currentPartialHttpData() is null\r\n", "@fredericBregier I'll try with your updated branch now", "@jameskleeh Use the next one ;-) Bad commit", "@fredericBregier All tests green on my end. I had to do a couple tweaks to handle no data being added to the upload, however that was probably something that should have been done regardless.", "Note that there are 2 other issues we've found during testing, but I haven't gotten to the bottom of them yet and they aren't related to this.", "@jameskleeh OK, thanks for your feedback !\r\nI will let the issue open until you 've got the point of the new issues, in order to ensure they are not related to.\r\n\r\nThank you for your help ! It was really helpful!!", "@fredericBregier I have the other issues resolved. I appreciate your help getting this resolved. I hope this change can go into 4.1.64", "@jameskleeh Great ! We will close this one as soon as the merge is done after review.\r\nThank you again !", "@jameskleeh Current review introduces some changes. When you have time, maybe you can try again to ensure it does not change again the behavior (should not) ?", "@fredericBregier I tried with the latest commit and its still good", "@jameskleeh Thanks a lot!" ]
[ "Why this change? There are a number of places in the code where we check if `byteBuf` is `null`. Also, when `delete` is called, the `byteBuf` is set to `null`.", "The English for these two comments reads weird. How about:\n\n> Note: this method will allocate a lot of memory, if the data is currently stored on the file system.", "I'd prefer this method was iterative rather than recursive. Recursion, especially when based on external input, can cause unpredictable stack usage. It's also harder on the JIT.", "Since we scan start to end, this method can be implemented in terms of `findLineBreak`, right?", "The `else` branch can be unwrapped since the `if` branch above never completes normally.", "So… we didn't find a complete delimiter, _but_ if we see a CRLF within a delimiters length of the end, then we _might_ have a delimiter that straddles two read calls i.e. two buffers. And that means we can't be sure that we won't have a delimiter in that byte range, so we can only add the content prior to the CRLF to the HttpData. And then in the next call, the data we didn't read here will be carried over, and get more added to it, and then we'll be able to tell if there's really a delimiter there or not.\n\nIs my understanding correct?", "The reason is the different behavior between Memory and Disk. When we allocate a Disk based, the buffer, not here, is like it is there. In Memory, it was not.\r\nTherefore, for those realying on existing buffer, even empty, is not correct (as reporting by the user in his issue). He used the `decoder.currentPartialHttpData()` in his code (legal), but then when he asked for the buffer, he's got \"null\" instead of an empty buffer when the buffer has not yet data pushed by the decoder.\r\n\r\nWhen the buffer is released, the buffer is set to null after releasing it.\r\nSo it does not change the global behavior but makes both implementation seemless the same, which was the main issue.\r\n\r\nNote that the change also gives more chance to use the current decoded buffer to fill the `currentHttpData`.", "It's the reverse. I would then propose:\r\n\r\n Note: this method will allocate a lot of memory, if the data is not currently stored on the file system.", "OK, let me try to break the recursion.", "Let me see (double check) if those two methods are indeed the same, just by changing the index value.\r\n\r\nI'm not that sure since `fineLineBreak` finds the \"first\" one, while `findLastLineBreak` finds the \"last\" one (so the recursion for the moment). And as it is not optimized to make a reverse reading (we cannot use `bytesBefore`)...", "So the response is no, the two methods differ (first vs last).\r\nHowever, since it is only from the end-delimiter.length, we can perhaps afford to use the same (small number of bytes to search in).", "? I don't get it.\r\nThe previous check is \"do we found CRLF/LF?\" : no, so all the buffer is OK.\r\nThe second check is \"do we found CRLF/LF?\": yes, so all the buffer up to this index is OK.\r\nIf the position is 0: no buffer to add since position is at startup of index\r\n\r\nNote: Same error than before here: missing the case where it is 0 but `lastPosition is > 0` (or -1 due to current error on CRLF instead of simple LF) => as the second check.", "Yes, exactly. The delimiter might be in the middle of 2 HttpChunks, so we cannot ensure the full buffer is OK.\r\n- We then search only between `end-delimiter.length` and `end` (as it is incomplete, at least 1 byte is missing there, so the LF at least at `end-delimiter` position). This prevents CRLF/LF in first position of the current buffer to be taken as a start of delimiter (while it is probable not). So first we compute this new start position (`lastPosition`).\r\n- if `lastPosition < 0`, the buffer is too small to contains the `delimiter.length`, so we can afford to search within the full buffer (very small), so `lastPosition = 0`\r\n- if `lastPosition >= 0`\r\n - If there is no CRLF/LF (`<0`), then OK, full buffer is OK\r\n - If there is a CRLF/LF (`>0`), then up to this position, the buffer is OK\r\n - If the answear is 0, no part of buffer at all shall be added.\r\n\r\n\r\nNote I found a possible error (not real one but not optimized): if there is a CRLF (not LF), but the delimiter is almost there (`delimiter.length-1`, just missing 1 byte), then the CR is in -1 position, which is not good. So we should start at `end-delimiter.length-1`.", "In `AbstractMemoryHttpData`, the `getByteBuf` method just returns the contained `ByteBuf` instance, so no memory is allocated just from calling that method. Though for the `get` method, it does allocate a new byte array.\r\n\r\nFor `AbstractDiskHttpData`, both of those methods allocate a byte array.\r\n\r\nSo the only case that doesn't allocate a lot is `getByteBuf` on `AbstractMemoryHttpData`. So maybe the warning should be unconditional.", "Yes right ! I just write the exact opposite of my own logic, and the reason I wrote this warning.", "Shall be kept", "Done", "Not the same (first vs last)", "Done", "Correction done", "I mean that\r\n\r\n```\r\nif … {\r\n return …;\r\n} else if … {\r\n return …;\r\n}\r\nreturn …;\r\n```\r\n\r\nCan be written as\r\n\r\n```\r\nif … {\r\n return …;\r\n}\r\nif … {\r\n return …;\r\n}\r\nreturn …;\r\n```\r\n\r\nIntellij likes to point this out.", "I was thinking something like this:\r\n\r\n```java\r\n static int findLastLineBreak(ByteBuf buffer, int index) {\r\n int candidate = findLineBreak(buffer, index);\r\n int next;\r\n while (candidate >= 0 && (next = findLineBreak(buffer, index + candidate)) > 0) {\r\n candidate += next;\r\n }\r\n return candidate;\r\n }\r\n```\r\n\r\nAny reason that can't work?", "Yes indeed, but almost since we need to recheck if there were CR at first pos, or LF, in order to restart at candudate + 2 or +1.\r\nHowever simplest than current version, I agree.", "I will change and check of course.", "@chrisvest I checked with this version working. Is it ok for you ?\r\n\r\n static int findLastLineBreak(ByteBuf buffer, int index) {\r\n int candidate = findLineBreak(buffer, index);\r\n int findCRLF = 0;\r\n if (candidate >= 0) {\r\n if (buffer.getByte(index + candidate) == HttpConstants.CR) {\r\n findCRLF = 2;\r\n } else {\r\n findCRLF = 1;\r\n }\r\n candidate += findCRLF;\r\n }\r\n int next;\r\n while (candidate > 0 && (next = findLineBreak(buffer, index + candidate)) >= 0) {\r\n candidate += next;\r\n if (buffer.getByte(index + candidate) == HttpConstants.CR) {\r\n findCRLF = 2;\r\n } else {\r\n findCRLF = 1;\r\n }\r\n candidate += findCRLF;\r\n }\r\n return candidate - findCRLF;\r\n }", "Yes, that looks good. Thanks for explaining.", "Commited ;-)" ]
"2021-04-07T06:52:01Z"
[]
HttpPostMultipartRequestDecoder may not add content to an existing upload after being offered data
### Expected behavior Once a file upload object exists in the multipart request decoder, but not finished, offering more data to the decoder should populate the buffer of the file object ### Actual behavior The buffer is not created/updated ### Steps to reproduce - `git clone https://github.com/micronaut-projects/micronaut-core` - `git checkout upgrade-netty` - `./gradlew test-suite:test --tests "io.micronaut.upload.StreamUploadSpec.test the file is not corrupted with transferTo"` ### Netty version 4.1.60+ due to https://github.com/netty/netty/pull/11001 ### JVM version (e.g. `java -version`) openjdk version "1.8.0_282" OpenJDK Runtime Environment (AdoptOpenJDK)(build 1.8.0_282-b08) OpenJDK 64-Bit Server VM (AdoptOpenJDK)(build 25.282-b08, mixed mode) ### OS version (e.g. `uname -a`) Darwin MacBook-Pro.local 20.3.0 Darwin Kernel Version 20.3.0: Thu Jan 21 00:07:06 PST 2021; root:xnu-7195.81.3~1/RELEASE_X86_64 x86_64 This issue is a blocker for Micronaut to upgrade Netty. With the functionality as it is, it is impossible to read a chunk of the file and release it immediately because new buffers are not set on the underlying file upload object. This line is the culprit. https://github.com/fredericBregier/netty/blob/6daeb0cc51d8689805c1a657e61d395450afec47/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java#L1187 In my case `posDelimiter` is 0, so the content is never added to the upload.
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java index 14071dad606..dc8dd02f1fa 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java @@ -43,6 +43,7 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData { protected AbstractMemoryHttpData(String name, Charset charset, long size) { super(name, charset, size); + byteBuf = EMPTY_BUFFER; } @Override @@ -109,6 +110,10 @@ public void addContent(ByteBuf buffer, boolean last) } else if (localsize == 0) { // Nothing to add and byteBuf already exists buffer.release(); + } else if (byteBuf.readableBytes() == 0) { + // Previous buffer is empty, so just replace it + byteBuf.release(); + byteBuf = buffer; } else if (byteBuf instanceof CompositeByteBuf) { CompositeByteBuf cbb = (CompositeByteBuf) byteBuf; cbb.addComponent(true, buffer); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java index 3f730c9e777..254ff2f964b 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java @@ -19,7 +19,6 @@ import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpRequest; -import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; @@ -38,6 +37,15 @@ * <li>MemoryAttribute, DiskAttribute or MixedAttribute</li> * <li>MemoryFileUpload, DiskFileUpload or MixedFileUpload</li> * </ul> + * A good example of releasing HttpData once all work is done is as follow:<br> + * <pre>{@code + * for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { + * httpData.release(); + * factory.removeHttpDataFromClean(request, httpData); + * } + * factory.cleanAllHttpData(); + * decoder.destroy(); + * }</pre> */ public class DefaultHttpDataFactory implements HttpDataFactory { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java index 2e464225be1..266e566523a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java @@ -121,7 +121,8 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { void delete(); /** - * Returns the contents of the file item as an array of bytes. + * Returns the contents of the file item as an array of bytes.<br> + * Note: this method will allocate a lot of memory, if the data is currently stored on the file system. * * @return the contents of the file item as an array of bytes. * @throws IOException @@ -129,7 +130,8 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { byte[] get() throws IOException; /** - * Returns the content of the file item as a ByteBuf + * Returns the content of the file item as a ByteBuf.<br> + * Note: this method will allocate a lot of memory, if the data is currently stored on the file system. * * @return the content of the file item as a ByteBuf * @throws IOException diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java index 13f75f55bf2..f174327d912 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java @@ -152,7 +152,7 @@ static int findEndOfString(String sb) { } /** - * Try to find LF or CRLF as Line Breaking + * Try to find first LF or CRLF as Line Breaking * * @param buffer the buffer to search in * @param index the index to start from in the buffer @@ -164,7 +164,7 @@ static int findLineBreak(ByteBuf buffer, int index) { int posFirstChar = buffer.bytesBefore(index, toRead, HttpConstants.LF); if (posFirstChar == -1) { // No LF, so neither CRLF - return -1; + return -1; } if (posFirstChar > 0 && buffer.getByte(index + posFirstChar - 1) == HttpConstants.CR) { posFirstChar--; @@ -172,6 +172,38 @@ static int findLineBreak(ByteBuf buffer, int index) { return posFirstChar; } + /** + * Try to find last LF or CRLF as Line Breaking + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @return a relative position from index > 0 if LF or CRLF is found + * or < 0 if not found + */ + static int findLastLineBreak(ByteBuf buffer, int index) { + int candidate = findLineBreak(buffer, index); + int findCRLF = 0; + if (candidate >= 0) { + if (buffer.getByte(index + candidate) == HttpConstants.CR) { + findCRLF = 2; + } else { + findCRLF = 1; + } + candidate += findCRLF; + } + int next; + while (candidate > 0 && (next = findLineBreak(buffer, index + candidate)) >= 0) { + candidate += next; + if (buffer.getByte(index + candidate) == HttpConstants.CR) { + findCRLF = 2; + } else { + findCRLF = 1; + } + candidate += findCRLF; + } + return candidate - findCRLF; + } + /** * Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed * diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index ebf88f23360..52229b8297f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -41,6 +41,7 @@ import java.util.Map; import java.util.TreeMap; +import static io.netty.buffer.Unpooled.EMPTY_BUFFER; import static io.netty.util.internal.ObjectUtil.*; /** @@ -1159,8 +1160,16 @@ private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String final byte[] bdelimiter = delimiter.getBytes(httpData.getCharset()); int posDelimiter = HttpPostBodyUtil.findDelimiter(undecodedChunk, startReaderIndex, bdelimiter, true); if (posDelimiter < 0) { - // Not found but however perhaps because incomplete so search LF or CRLF - posDelimiter = HttpPostBodyUtil.findLineBreak(undecodedChunk, startReaderIndex); + // Not found but however perhaps because incomplete so search LF or CRLF from the end. + // Possible last bytes contain partially delimiter + // (delimiter is possibly partially there, at least 1 missing byte), + // therefore searching last delimiter.length +1 (+1 for CRLF instead of LF) + int lastPosition = undecodedChunk.readableBytes() - bdelimiter.length - 1; + if (lastPosition < 0) { + // Not enough bytes, but at most delimiter.length bytes available so can still try to find CRLF there + lastPosition = 0; + } + posDelimiter = HttpPostBodyUtil.findLastLineBreak(undecodedChunk, startReaderIndex + lastPosition); if (posDelimiter < 0) { // not found so this chunk can be fully added ByteBuf content = undecodedChunk.copy(); @@ -1172,18 +1181,21 @@ private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String undecodedChunk.readerIndex(startReaderIndex); undecodedChunk.writerIndex(startReaderIndex); return false; - } else if (posDelimiter > 0) { - // Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found - ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); - try { - httpData.addContent(content, false); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); - } - rewriteCurrentBuffer(undecodedChunk, posDelimiter); + } + // posDelimiter is not from startReaderIndex but from startReaderIndex + lastPosition + posDelimiter += lastPosition; + if (posDelimiter == 0) { + // Nothing to add return false; } - // Empty chunk or so + // Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found + ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); + try { + httpData.addContent(content, false); + } catch (IOException e) { + throw new ErrorDataDecoderException(e); + } + rewriteCurrentBuffer(undecodedChunk, posDelimiter); return false; } // Delimiter found at posDelimiter, including LF or CRLF, so httpData has its last chunk
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 93991c375a5..40f2f400a4e 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -25,6 +25,7 @@ import io.netty.handler.codec.http.DefaultHttpRequest; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; @@ -1003,27 +1004,44 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)))); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); byte[] body = new byte[bytesPerChunk]; Arrays.fill(body, (byte) 1); + // Set first bytes as CRLF to ensure it is correctly getting the last CRLF + body[0] = HttpConstants.CR; + body[1] = HttpConstants.LF; for (int i = 0; i < nbChunks; i++) { ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerChunk); - decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory here** + decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory previously here** + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); content.release(); } byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8); - byte[] lastbody = new byte[bytesLastChunk + bsuffix1.length]; - Arrays.fill(body, (byte) 1); + byte[] previousLastbody = new byte[bytesLastChunk - bsuffix1.length]; + byte[] lastbody = new byte[2 * bsuffix1.length]; + Arrays.fill(previousLastbody, (byte) 1); + previousLastbody[0] = HttpConstants.CR; + previousLastbody[1] = HttpConstants.LF; + Arrays.fill(lastbody, (byte) 1); + lastbody[0] = HttpConstants.CR; + lastbody[1] = HttpConstants.LF; for (int i = 0; i < bsuffix1.length; i++) { - lastbody[bytesLastChunk + i] = bsuffix1[i]; + lastbody[bsuffix1.length + i] = bsuffix1[i]; } - ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); + ByteBuf content2 = Unpooled.wrappedBuffer(previousLastbody, 0, previousLastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content2.release(); + content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); decoder.offer(new DefaultHttpContent(content2)); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); content2.release(); content2 = Unpooled.wrappedBuffer(suffix2.getBytes(CharsetUtil.UTF_8)); decoder.offer(new DefaultHttpContent(content2)); + assertNull(decoder.currentPartialHttpData()); content2.release(); decoder.offer(new DefaultLastHttpContent());
train
test
"2021-04-15T17:18:54"
"2021-04-06T16:23:49Z"
jameskleeh
val
netty/netty/11147_11150
netty/netty
netty/netty/11147
netty/netty/11150
[ "keyword_pr_to_issue" ]
6724786dcc9fa38ba516dab97a04e2bbc17e81d9
df53de5b687cd25ba1079b556318da2b773cf4f1
[ "`available()` only tells you how many bytes you can read without blocking; it doesn't tell if you're at the end of the stream.", "Actually it returns -1 or an error when at end of stream" ]
[]
"2021-04-09T10:54:51Z"
[]
ChunkedInputStream isEndOfInput blocks event loop
In all versions of netty the ChunkedInputStream isEndOfInput uses b.read() which is blocking. This should instead use available()
[ "handler/src/main/java/io/netty/handler/stream/ChunkedStream.java" ]
[ "handler/src/main/java/io/netty/handler/stream/ChunkedStream.java" ]
[ "transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java" ]
diff --git a/handler/src/main/java/io/netty/handler/stream/ChunkedStream.java b/handler/src/main/java/io/netty/handler/stream/ChunkedStream.java index 8f67df20f8d..86e3f5d5c71 100644 --- a/handler/src/main/java/io/netty/handler/stream/ChunkedStream.java +++ b/handler/src/main/java/io/netty/handler/stream/ChunkedStream.java @@ -79,6 +79,9 @@ public boolean isEndOfInput() throws Exception { if (closed) { return true; } + if (in.available() > 0) { + return false; + } int b = in.read(); if (b < 0) {
diff --git a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java index a859085aeae..299183d89d6 100644 --- a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java +++ b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelConfigTest.java @@ -155,7 +155,7 @@ public void testGetOptionWhenClosed() { if (!(e.getCause() instanceof ClosedChannelException)) { AssertionError error = new AssertionError( "Expected the suppressed exception to be an instance of ClosedChannelException."); - error.addSuppressed(e.getCause()); + error.addSuppressed(e); throw error; } }
val
test
"2021-04-01T12:50:23"
"2021-04-08T16:40:22Z"
davydotcom
val
netty/netty/11142_11167
netty/netty
netty/netty/11142
netty/netty/11167
[ "keyword_pr_to_issue" ]
e2daae9ac8e4b5c0a04f47b163157445915b949a
d34212439068091bcec29a8fad4df82f0a82c638
[ "Is it possible to have a solution similar to the one for `localhost`?\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java#L684-L697", "@violetagg not sure what you are suggesting... can you maybe do a PR to show the idea ?", "@normanmaurer I see two options but none of them is good enough.\r\n\r\n- To use `System.getenv(\"COMPUTERNAME\")` and implement it in a way `localhost` is handled, but we use environment variable which I don't think is reliable and secure.\r\n- Another option is to use `InetAddress.getLocalHost().getHostName()` but this is blocking\r\n\r\nWdyt?\r\nI don't like both of them", "@violetagg couldn't we just call `InetAddress.getLocalHost().getHostName()` once in a static block ?", "> @violetagg couldn't we just call `InetAddress.getLocalHost().getHostName()` once in a static block ?\r\n\r\nYep and then apply the solution for `localhost`", "@violetagg can you do a PR ?", "> @violetagg can you do a PR ?\r\n\r\nok" ]
[ "Let add WINDOWS to the field name to make it clear this is only used on windows ", "done" ]
"2021-04-19T18:49:52Z"
[]
Netty DNS resolver fails to resolve computer's name on Windows
When running on a VM with Windows 10, Netty DNS resolver cannot resolve computer's name. ### Expected behavior When on Windows, Netty DNS resolver is able to resolve the computer's name ### Actual behavior The current implementation throws an exception when one tries to resolve the computer's name ``` Caused by: java.net.UnknownHostException: failed to resolve 'DESKTOP-BHE9K15' after 2 queries at io.netty.resolver.dns.DnsResolveContext.finishResolve(DnsResolveContext.java:1013) at io.netty.resolver.dns.DnsResolveContext.tryToFinishResolve(DnsResolveContext.java:966) at io.netty.resolver.dns.DnsResolveContext.query(DnsResolveContext.java:414) at io.netty.resolver.dns.DnsResolveContext.tryToFinishResolve(DnsResolveContext.java:938) at io.netty.resolver.dns.DnsResolveContext.access$700(DnsResolveContext.java:63) at io.netty.resolver.dns.DnsResolveContext$2.operationComplete(DnsResolveContext.java:467) ``` ### Minimal yet complete reproducer code (or URL to code) ``` public class Application { public static void main(String[] args) throws Exception { // JDK InetAddress[] addresses = InetAddress.getAllByName("DESKTOP-BHE9K15"); System.out.println(Arrays.asList(addresses)); // JDK InetAddress localhost = InetAddress.getLocalHost(); System.out.println(localhost); NioEventLoopGroup group = new NioEventLoopGroup(); try { List<InetSocketAddress> result = DefaultAddressResolverGroup.INSTANCE .getResolver(group.next()) .resolveAll(InetSocketAddress.createUnresolved("DESKTOP-BHE9K15", 80)) .get(); System.out.println("Netty DefaultAddressResolverGroup " + result); DnsNameResolverBuilder builder = new DnsNameResolverBuilder(group.next()) .channelType(NioDatagramChannel.class); DnsNameResolver resolver = builder.build(); resolver.resolveAll("DESKTOP-BHE9K15") .get(); } finally { group.shutdownGracefully(); } } } ``` when the above code is executed, the result is: ``` [DESKTOP-BHE9K15/IPv4, DESKTOP-BHE9K15/IPv6] DESKTOP-BHE9K15/IPv4 Netty DefaultAddressResolverGroup [DESKTOP-BHE9K15/IPv4:80, DESKTOP-BHE9K15/[IPv6]:80] Exception in thread "main" java.util.concurrent.ExecutionException: java.net.UnknownHostException: failed to resolve 'DESKTOP-BHE9K15' after 2 queries ``` ### Netty version 4.1.63.Final ### JVM version (e.g. `java -version`) java 11 ### OS version (e.g. `uname -a`) Windows 10
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java" ]
[ "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java" ]
[ "resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java" ]
diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index bf594db0627..a3a514f2039 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -91,6 +91,7 @@ public class DnsNameResolver extends InetNameResolver { private static final InternalLogger logger = InternalLoggerFactory.getInstance(DnsNameResolver.class); private static final String LOCALHOST = "localhost"; + private static final String WINDOWS_HOST_NAME; private static final InetAddress LOCALHOST_ADDRESS; private static final DnsRecord[] EMPTY_ADDITIONALS = new DnsRecord[0]; private static final DnsRecordType[] IPV4_ONLY_RESOLVED_RECORD_TYPES = @@ -127,6 +128,14 @@ public class DnsNameResolver extends InetNameResolver { LOCALHOST_ADDRESS = NetUtil.LOCALHOST4; } } + + String hostName; + try { + hostName = PlatformDependent.isWindows() ? InetAddress.getLocalHost().getHostName() : null; + } catch (Exception ignore) { + hostName = null; + } + WINDOWS_HOST_NAME = hostName; } static { @@ -686,10 +695,14 @@ private InetAddress resolveHostsFileEntry(String hostname) { return null; } else { InetAddress address = hostsFileEntriesResolver.address(hostname, resolvedAddressTypes); - if (address == null && PlatformDependent.isWindows() && LOCALHOST.equalsIgnoreCase(hostname)) { + if (address == null && PlatformDependent.isWindows() && + (LOCALHOST.equalsIgnoreCase(hostname) || + (WINDOWS_HOST_NAME != null && WINDOWS_HOST_NAME.equalsIgnoreCase(hostname)))) { // If we tried to resolve localhost we need workaround that windows removed localhost from its // hostfile in later versions. // See https://github.com/netty/netty/issues/5386 + // Need a workaround for resolving the host (computer) name in case it cannot be resolved from hostfile + // See https://github.com/netty/netty/issues/11142 return LOCALHOST_ADDRESS; } return address;
diff --git a/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java b/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java index 3ba8f7aecae..bd16d47ba19 100644 --- a/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java +++ b/resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java @@ -110,8 +110,10 @@ import static io.netty.handler.codec.dns.DnsRecordType.AAAA; import static io.netty.handler.codec.dns.DnsRecordType.CNAME; import static io.netty.handler.codec.dns.DnsRecordType.SRV; +import static io.netty.resolver.dns.DnsNameResolver.DEFAULT_RESOLVE_ADDRESS_TYPES; import static io.netty.resolver.dns.DnsServerAddresses.sequential; import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assumptions.assumeThat; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -319,6 +321,18 @@ public class DnsNameResolverTest { StringUtil.EMPTY_STRING); } + private static final String HOST_NAME; + + static { + String hostName; + try { + hostName = PlatformDependent.isWindows() ? InetAddress.getLocalHost().getHostName() : null; + } catch (Exception ignore) { + hostName = null; + } + HOST_NAME = hostName; + } + private static final TestDnsServer dnsServer = new TestDnsServer(DOMAINS_ALL); private static final EventLoopGroup group = new NioEventLoopGroup(1); @@ -743,6 +757,34 @@ public void testResolveEmptyIpv6() { testResolve0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, StringUtil.EMPTY_STRING); } + @Test + public void testResolveLocalhostIpv4() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isNotEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolve0(ResolvedAddressTypes.IPV4_ONLY, NetUtil.LOCALHOST4, "localhost"); + } + + @Test + public void testResolveLocalhostIpv6() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolve0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, "localhost"); + } + + @Test + public void testResolveHostNameIpv4() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isNotEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolve0(ResolvedAddressTypes.IPV4_ONLY, NetUtil.LOCALHOST4, HOST_NAME); + } + + @Test + public void testResolveHostNameIpv6() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolve0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, HOST_NAME); + } + @Test public void testResolveNullIpv4() { testResolve0(ResolvedAddressTypes.IPV4_ONLY, NetUtil.LOCALHOST4, null); @@ -776,6 +818,34 @@ public void testResolveAllEmptyIpv6() { testResolveAll0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, StringUtil.EMPTY_STRING); } + @Test + public void testResolveAllLocalhostIpv4() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isNotEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolveAll0(ResolvedAddressTypes.IPV4_ONLY, NetUtil.LOCALHOST4, "localhost"); + } + + @Test + public void testResolveAllLocalhostIpv6() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolveAll0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, "localhost"); + } + + @Test + public void testResolveAllHostNameIpv4() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isNotEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolveAll0(ResolvedAddressTypes.IPV4_ONLY, NetUtil.LOCALHOST4, HOST_NAME); + } + + @Test + public void testResolveAllHostNameIpv6() { + assumeThat(PlatformDependent.isWindows()).isTrue(); + assumeThat(DEFAULT_RESOLVE_ADDRESS_TYPES).isEqualTo(ResolvedAddressTypes.IPV6_PREFERRED); + testResolveAll0(ResolvedAddressTypes.IPV6_ONLY, NetUtil.LOCALHOST6, HOST_NAME); + } + @Test public void testCNAMEResolveAllIpv4() throws IOException { testCNAMERecursiveResolve(true);
test
test
"2021-04-19T16:05:53"
"2021-04-05T16:29:40Z"
violetagg
val
netty/netty/11184_11188
netty/netty
netty/netty/11184
netty/netty/11188
[ "keyword_pr_to_issue" ]
63352b135a2a49ec91b5cf0d183ef744aa53a7a6
e7330490e3ff29146b7658e1e2a23593edfb4d5e
[ "Related to #11174 \r\n\r\nA fix will be proposed soon\r\n" ]
[ "@fredericBregier can you do this change in a separate PR and also add a unit test ?", "@normanmaurer I'm not sure I can. Let me explain.\r\nFirst, this is already in place in HttpPostStandardRequestDecoder, so it should be also within Multipart too.\r\n\r\nI notice this during PR since the deploy was wrong due to some Junit having a Leak detection error due to this missing.\r\n\r\nSo, in fact, there are already tests for that, but relying on leak detection step in CI/CD.\r\n\r\nFor Multipart, such as: \r\n- testMultipartRequestWithFileInvalidCharset\r\n- testMultipartRequestWithFieldInvalidCharset\r\n- testDecodeMalformedNotEncodedContentDispositionFieldParameters\r\n- testDecodeMalformedBadCharsetContentDispositionFieldParameters\r\n\r\nWhile on Standard one, we have:\r\n- testNotLeak\r\n- testNotLeakDirectBufferWhenWrapIllegalArgumentException\r\n- testNotLeakWhenWrapIllegalArgumentException\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte0\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte1\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleHi\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo\r\n\r\nThey all share the same spirit: allocating a Decoder with a wrong parameter, but request shall be released and as there is no Decoder, we cannot check it but the check on memory leak i the CI/CD get them if they are not correctly handled.\r\n\r\nIf I removed this, it will certainly failed during leak detection, as it was the case before I added this, and with equal action than to the other standard implementation.\r\n\r\nWDYT?", "Sounds good.. But please just do it in a separate PR. This will make it easier to review / revert etc if needed. ", "OK, I will remove it, but not sure it will not generate leak detetection errors on CI/CD then. \r\nAnd if is so, this PR could not be successful, we shall probably have both but in separate commit if you want. Let see what's happening when removing it.", "As I suspected, removing this is causing leak detection.\r\n\r\nThis should have been done when the standard decoder was changed, but it wasn't. Now, it's necessary.\r\nSo how can we proceed?", "All tests that goes to \"Checking for detected leak\" have the following similar output:\r\n\r\n Run ./.github/scripts/check_leak.sh build-leak.output\r\n ./.github/scripts/check_leak.sh build-leak.output\r\n shell: /usr/bin/bash -e {0}\r\n Leak detected, please inspect build log\r\n Error: Process completed with exit code 1.\r\n\r\nI repushed the commit with the correction on constructor, as it should be and in line with the other one for Standard decoder. If you have any suggestion, I will go through them.", "Reference of CI/CD was: https://github.com/netty/netty/pull/11188/checks?check_run_id=2440801961", "I factored out this change in its own PR and added a unit test... please have a look https://github.com/netty/netty/pull/11207", "OK, I will rebase once merged (review is OK ;-) )", "Rebase done, CI/CD on going...", "@fredericBregier can you explain me why this change ?", "Several reasons:\r\n\r\n- The decoder is not acting in the same way on every chunk : only the first one was `retainedSlice`\r\n- This introduces some bad usages on user's end where the original buffer (from Chunk) were retained and released as soon as possible (the original Chunk), involving memory leak\r\n- It simplifies a lot the mechanism, while it allocates a duplicate ByteBuf, it is as for the next chunks, and performances were not degraded, neither memory pressure (cf Flame on #11175 )" ]
"2021-04-22T18:30:31Z"
[]
Memleak
### Expected behavior ### Actual behavior ![image](https://user-images.githubusercontent.com/20749545/115715628-e8c62680-a3aa-11eb-9373-e2dea504a1f6.png) 2021-04-22 19:17:28 805[ERROR][ResourceLeakDetector.java-319][lettuce-epollEventLoop-22-4]-LEAK: ByteBuf.release() was not called before it's garbage-collected. See https://netty.io/wiki/reference-counted-objects.html for more information. Recent access records: #1: io.netty.buffer.AdvancedLeakAwareByteBuf.toString(AdvancedLeakAwareByteBuf.java:742) io.netty.handler.codec.http.multipart.MemoryAttribute.getValue(MemoryAttribute.java:65) io.vertx.core.http.impl.HttpServerRequestImpl.endDecode(HttpServerRequestImpl.java:535) io.vertx.core.http.impl.HttpServerRequestImpl.onEnd(HttpServerRequestImpl.java:517) io.vertx.core.http.impl.HttpServerRequestImpl.handleEnd(HttpServerRequestImpl.java:509) io.vertx.core.http.impl.Http1xServerConnection.handleEnd(Http1xServerConnection.java:176) io.vertx.core.http.impl.Http1xServerConnection.handleContent(Http1xServerConnection.java:163) io.vertx.core.http.impl.Http1xServerConnection.handleMessage(Http1xServerConnection.java:140) io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:366) io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43) io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:229) io.vertx.core.net.impl.VertxHandler.channelRead(VertxHandler.java:163) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93) io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandler.channelRead(WebSocketServerExtensionHandler.java:102) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1533) io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1282) io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1329) io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508) io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655) io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581) io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.lang.Thread.run(Thread.java:748) #2: io.netty.buffer.AdvancedLeakAwareByteBuf.writeByte(AdvancedLeakAwareByteBuf.java:544) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder$UrlDecoder.process(HttpPostStandardRequestDecoder.java:747) io.netty.buffer.AbstractByteBuf.forEachByteAsc0(AbstractByteBuf.java:1303) io.netty.buffer.CompositeByteBuf.forEachByteAsc0(CompositeByteBuf.java:678) io.netty.buffer.AbstractByteBuf.forEachByte(AbstractByteBuf.java:1283) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.decodeAttribute(HttpPostStandardRequestDecoder.java:666) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.setFinalBuffer(HttpPostStandardRequestDecoder.java:637) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBodyAttributes(HttpPostStandardRequestDecoder.java:561) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBody(HttpPostStandardRequestDecoder.java:375) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:304) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:49) io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.offer(HttpPostRequestDecoder.java:223) io.vertx.core.http.impl.HttpServerRequestImpl.onData(HttpServerRequestImpl.java:488) io.vertx.core.http.impl.HttpServerRequestImpl.handleContent(HttpServerRequestImpl.java:138) io.vertx.core.http.impl.Http1xServerConnection.handleContent(Http1xServerConnection.java:160) io.vertx.core.http.impl.Http1xServerConnection.handleMessage(Http1xServerConnection.java:140) io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:366) io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43) io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:229) io.vertx.core.net.impl.VertxHandler.channelRead(VertxHandler.java:163) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93) io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandler.channelRead(WebSocketServerExtensionHandler.java:102) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1533) io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1282) io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1329) io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508) io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655) io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581) io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.lang.Thread.run(Thread.java:748) #3: io.netty.buffer.AdvancedLeakAwareByteBuf.writeByte(AdvancedLeakAwareByteBuf.java:544) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder$UrlDecoder.process(HttpPostStandardRequestDecoder.java:755) io.netty.buffer.AbstractByteBuf.forEachByteAsc0(AbstractByteBuf.java:1303) io.netty.buffer.CompositeByteBuf.forEachByteAsc0(CompositeByteBuf.java:678) io.netty.buffer.AbstractByteBuf.forEachByte(AbstractByteBuf.java:1283) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.decodeAttribute(HttpPostStandardRequestDecoder.java:666) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.setFinalBuffer(HttpPostStandardRequestDecoder.java:637) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBodyAttributes(HttpPostStandardRequestDecoder.java:561) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBody(HttpPostStandardRequestDecoder.java:375) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:304) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:49) io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.offer(HttpPostRequestDecoder.java:223) io.vertx.core.http.impl.HttpServerRequestImpl.onData(HttpServerRequestImpl.java:488) io.vertx.core.http.impl.HttpServerRequestImpl.handleContent(HttpServerRequestImpl.java:138) io.vertx.core.http.impl.Http1xServerConnection.handleContent(Http1xServerConnection.java:160) io.vertx.core.http.impl.Http1xServerConnection.handleMessage(Http1xServerConnection.java:140) io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:366) io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43) io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:229) io.vertx.core.net.impl.VertxHandler.channelRead(VertxHandler.java:163) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93) io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandler.channelRead(WebSocketServerExtensionHandler.java:102) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1533) io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1282) io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1329) io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508) io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655) io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581) io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.lang.Thread.run(Thread.java:748) Created at: io.netty.buffer.UnpooledByteBufAllocator.newDirectBuffer(UnpooledByteBufAllocator.java:96) io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:187) io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:178) io.netty.buffer.AbstractByteBufAllocator.buffer(AbstractByteBufAllocator.java:115) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.decodeAttribute(HttpPostStandardRequestDecoder.java:664) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.setFinalBuffer(HttpPostStandardRequestDecoder.java:637) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBodyAttributes(HttpPostStandardRequestDecoder.java:561) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.parseBody(HttpPostStandardRequestDecoder.java:375) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:304) io.netty.handler.codec.http.multipart.HttpPostStandardRequestDecoder.offer(HttpPostStandardRequestDecoder.java:49) io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.offer(HttpPostRequestDecoder.java:223) io.vertx.core.http.impl.HttpServerRequestImpl.onData(HttpServerRequestImpl.java:488) io.vertx.core.http.impl.HttpServerRequestImpl.handleContent(HttpServerRequestImpl.java:138) io.vertx.core.http.impl.Http1xServerConnection.handleContent(Http1xServerConnection.java:160) io.vertx.core.http.impl.Http1xServerConnection.handleMessage(Http1xServerConnection.java:140) io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:366) io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43) io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:229) io.vertx.core.net.impl.VertxHandler.channelRead(VertxHandler.java:163) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93) io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandler.channelRead(WebSocketServerExtensionHandler.java:102) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1533) io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1282) io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1329) io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508) io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447) io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655) io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581) io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.lang.Thread.run(Thread.java:748) ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.59Final ### JVM version (e.g. `java -version`) openjdk version "1.8.0_262" ### OS version (e.g. `uname -a`) Linux host-10-33-176-104 4.18.0-147.5.1.2.h340.eulerosv2r9.x86_64 #1 SMP Sat Jan 30 09:00:44 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index 5099ee60ac4..490b1ffcc14 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -43,7 +43,6 @@ import java.util.Map; import java.util.TreeMap; -import static io.netty.buffer.Unpooled.EMPTY_BUFFER; import static io.netty.util.internal.ObjectUtil.*; /** @@ -336,13 +335,8 @@ public HttpPostMultipartRequestDecoder offer(HttpContent content) { ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = isLastChunk ? - // Take a slice instead of copying when the first chunk is also the last - // as undecodedChunk.writeBytes will never be called. - buf.retainedSlice() : - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it // // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity // which is not really usable for us as we may exceed it once we add more bytes. @@ -958,8 +952,15 @@ protected InterfaceHttpData getFileUpload(String delimiter) { */ @Override public void destroy() { - // Release all data items, including those not yet pulled + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } destroyed = true; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index d2ef677dbf0..bdbccfa9f2f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -153,7 +153,7 @@ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest reque this.factory = checkNotNull(factory, "factory"); try { if (request instanceof HttpContent) { - // Offer automatically if the given request is als type of HttpContent + // Offer automatically if the given request is as type of HttpContent // See #1089 offer((HttpContent) request); } else { @@ -287,13 +287,8 @@ public HttpPostStandardRequestDecoder offer(HttpContent content) { ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = isLastChunk ? - // Take a slice instead of copying when the first chunk is also the last - // as undecodedChunk.writeBytes will never be called. - buf.retainedSlice() : - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage. + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it // // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity // which is not really usable for us as we may exceed it once we add more bytes. @@ -693,8 +688,15 @@ private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) { */ @Override public void destroy() { - // Release all data items, including those not yet pulled + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } destroyed = true;
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 40f2f400a4e..665606fad83 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -88,7 +88,8 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); - decoder.offer(new DefaultHttpContent(Unpooled.copiedBuffer(body, CharsetUtil.UTF_8))); + ByteBuf buf = Unpooled.copiedBuffer(body, CharsetUtil.UTF_8); + decoder.offer(new DefaultHttpContent(buf)); decoder.offer(new DefaultHttpContent(Unpooled.EMPTY_BUFFER)); // Validate it's enough chunks to decode upload. @@ -102,6 +103,7 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { data, upload.getString(CharsetUtil.UTF_8)); upload.release(); decoder.destroy(); + buf.release(); } } @@ -134,8 +136,8 @@ public void testFullHttpRequestUpload() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/2544 @@ -181,8 +183,8 @@ public void testMultipartCodecWithCRasEndOfAttribute() throws Exception { assertNotNull(datar); assertEquals(datas[i].getBytes(CharsetUtil.UTF_8).length, datar.length); - req.release(); decoder.destroy(); + assertTrue(req.release()); } } @@ -215,8 +217,8 @@ public void testQuotedBoundary() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/1848 @@ -276,6 +278,8 @@ public void testNoZeroOut() throws Exception { aDecodedData.release(); aDecoder.destroy(); + aSmallBuf.release(); + aLargeBuf.release(); } // See https://github.com/netty/netty/issues/2305 @@ -373,8 +377,8 @@ public void testFilenameContainingSemicolon() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -403,8 +407,8 @@ public void testFilenameContainingSemicolon2() throws Exception { assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp 0.txt", fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -434,8 +438,8 @@ public void testMultipartRequestWithoutContentTypeBody() { // Create decoder instance to test without any exception. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -467,8 +471,8 @@ public void testDecodeOtherMimeHeaderFields() throws Exception { FileUpload fileUpload = (FileUpload) part1; byte[] fileBytes = fileUpload.get(); assertTrue("the filecontent should not be decoded", filecontent.equals(new String(fileBytes))); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -497,7 +501,7 @@ public void testMultipartRequestWithFileInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -530,7 +534,7 @@ public void testMultipartRequestWithFieldInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -581,8 +585,8 @@ public void testDecodeContentDispositionFieldParameters() throws Exception { assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -617,8 +621,8 @@ public void testDecodeWithLanguageContentDispositionFieldParameters() throws Exc assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -649,7 +653,7 @@ public void testDecodeMalformedNotEncodedContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof ArrayIndexOutOfBoundsException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -681,7 +685,7 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -712,8 +716,8 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp-0.txt", fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/issues/8575 @@ -832,8 +836,8 @@ public void testDecodeWithLanguageContentDispositionFieldParametersForFix() thro FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -869,8 +873,8 @@ public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { assertTrue(attr.getByteBuf().isDirect()); assertEquals("los angeles", attr.getValue()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -987,7 +991,8 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo int bytesLastChunk = 10000; int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes - String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" + + String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466"; + String prefix = delimiter + "\n" + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + "Content-Type: image/jpeg\n" + "Content-Length: " + fileSize + "\n" + @@ -1003,8 +1008,10 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo request.headers().set("content-length", prefix.length() + fileSize + suffix.length()); HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); - decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)))); + ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(buf)); assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + buf.release(); byte[] body = new byte[bytesPerChunk]; Arrays.fill(body, (byte) 1); @@ -1020,11 +1027,16 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8); byte[] previousLastbody = new byte[bytesLastChunk - bsuffix1.length]; + byte[] bdelimiter = delimiter.getBytes(CharsetUtil.UTF_8); byte[] lastbody = new byte[2 * bsuffix1.length]; Arrays.fill(previousLastbody, (byte) 1); previousLastbody[0] = HttpConstants.CR; previousLastbody[1] = HttpConstants.LF; Arrays.fill(lastbody, (byte) 1); + // put somewhere a not valid delimiter + for (int i = 0; i < bdelimiter.length; i++) { + previousLastbody[i + 10] = bdelimiter[i]; + } lastbody[0] = HttpConstants.CR; lastbody[1] = HttpConstants.LF; for (int i = 0; i < bsuffix1.length; i++) { @@ -1055,12 +1067,18 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo } assertTrue("Capacity should be less than 1M", decoder.getCurrentAllocatedCapacity() < 1024 * 1024); - for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { - httpData.release(); - factory.removeHttpDataFromClean(request, httpData); + InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("Before cleanAllHttpData should be 1", 1, httpData.refCnt()); } factory.cleanAllHttpData(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("Before cleanAllHttpData should be 1 if in Memory", inMemory? 1 : 0, httpData.refCnt()); + } decoder.destroy(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("RefCnt should be 0", 0, httpData.refCnt()); + } } @Test @@ -1121,7 +1139,9 @@ private void commonNotBadReleaseBuffersDuringDecoding(HttpDataFactory factory, b for (int i = 0; i < bp2.length; i++) { prefix[bp1.length + 2 + i] = bp2[i]; } - decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix))); + ByteBuf buf = Unpooled.wrappedBuffer(prefix); + decoder.offer(new DefaultHttpContent(buf)); + buf.release(); byte[] body = new byte[bytesPerItem]; Arrays.fill(body, (byte) rank); ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem);
train
test
"2021-04-29T10:26:44"
"2021-04-22T12:42:13Z"
jr981008
val
netty/netty/11175_11188
netty/netty
netty/netty/11175
netty/netty/11188
[ "keyword_issue_to_pr", "keyword_pr_to_issue" ]
63352b135a2a49ec91b5cf0d183ef744aa53a7a6
e7330490e3ff29146b7658e1e2a23593edfb4d5e
[ "There are some benchmarks within Netty that shows exactly the contrary (whatever Memory, Mixed or Disk Factory based).\r\nThe results are not using big file however, just 512KB but with more than 6 000 upload/s, so very quick, and about 3 GB/s.\r\nI've tested by changing size to 25 MB, increasing Factory size to 1GB, and it gives 131 upload/s, so 3.2 GB/s (so less than 1s to upload).\r\n\r\nTrace of benchmark using this 25 MB file test: (HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel is the reference in your cases)\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 0,132 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 0,131 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,118 ± 0,003 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 0,131 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,124 ± 0,004 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 0,125 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,075 ± 0,002 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,125 ± 0,004 ops/ms\r\n \r\n [817.169s][info ][gc,heap,exit ] Heap\r\n [817.169s][info ][gc,heap,exit ] garbage-first heap total 522240K, used 69233K [0x0000000602400000, 0x0000000800000000)\r\n [817.169s][info ][gc,heap,exit ] region size 2048K, 35 young (71680K), 2 survivors (4096K)\r\n [817.169s][info ][gc,heap,exit ] Metaspace used 17320K, capacity 17729K, committed 17792K, reserved 1064960K\r\n [817.169s][info ][gc,heap,exit ] class space used 1790K, capacity 1906K, committed 1920K, reserved 1048576K\r\n\r\nI also test using the Junit tests testBIgFileUploadDelimiterInMiddleChunkDecoderXxxxFactory (where Xxx can be Disk, Memory, Mixed) using a 25 MB file, and it gives less than 0.5s (even 0,05s with Memory Factory) and only 1 MB of memory allocated bu the Factory.\r\n\r\nMaybe you are using a Factory disk based (or mixed with a lower limit than file size), such that it depends on write disk speed (therefore on your disk speed)?\r\n\r\nAlso, regarding your other issue, have you clean the factory and HttpData correctly ?\r\n\r\n for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {\r\n httpData.release();\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n factory.cleanAllHttpData();\r\n decoder.destroy();\r\n\r\nCould you setup a simple test without your Micronaut code ?\r\n", "@fredericBregier Our test cases that exercise the same code seem to perform OK as well. I've only experienced this with curl. I don't know how to setup a test given the complexity and the number of factors. Simply changing the Netty version, with the Micronaut code exactly the same, shows the issue. That leads me to believe the issue lies in Netty.", "But When I did tests using either Junit or Benchmark from Netty code, it appears that it seems not an issue of Netty.\r\nSo I believe there is something wrong in the way you create the Factory or free up the factory and the decoder and the HttpData.\r\nBut as the code or Micronaut is huge, I cannot deep into it. ;-)", "@fredericBregier Unfortunately if that is the approach then I believe we will be stuck on a relatively old version of Netty until we can investigate alternatives", "There is also an example within Netty that starts a simple Http Web server with multipart activated.\r\nSee https://github.com/netty/netty/tree/4.1/example/src/main/java/io/netty/example/http/upload\r\nYou can use your own browser (using inspection to see the speed from browser point of view).\r\nI've uploaded a file of 420 MB in 3.2s (131 MB/s) and I repeat the operation, with still 370 MB max memory associated with the Java Web server.\r\n\r\nSo I believe the issue is not within Netty but the way you use it, sorry.", "@jameskleeh what a reproducer + run the profiler in the parallel?", "Guys the fact remains that in a patch release of Netty behaviour has regressed such that we have memory leaks and performance problems which cannot be acceptable. One thing is these changes go into a new minor or major versions, but patch versions surely have to maintain behaviour and if a regression happens in a patch it needs to be taken seriously. \r\n\r\nCan the changes that caused this regression be rolled back if they are not production ready? Micronaut is a large user of Netty with many users and it is disheartening to see feedback addressed with \"well our tests pass so sorry\"", "It seems as well that Vertx is having Memory leak issues in a similar area #11184", "@graemerocher I agree for most of what you wrote, but where is the reproducer?\r\n\r\nI cannot go deep within either Micronaut or Vertx. I have to have a reproducer to understand.\r\n\r\nNote that the previous version (58 or before) was leaking a lot (memory, speed, wrong assignment with loss of data, unkown from end user). And we put much effort to add as much as possible Junit tests (or benchmarks) to ensure nothing is broken and even that those issues (memoey, speed, wrong assignment with loss of data, unkown from end user) were fixed.\r\n\r\nIf this so reproducible (VertX, Micronaut), then it should be easy to have a simple reproducer, right?\r\n\r\nLook at the previous issue that was raised by @jameskleeh , he gives me enough elements to have a reproducer, and then I was able to fix it.\r\n\r\nIt seems, for what I believe, a bad usage of the Factory and Decoder, and in particular, the way to release all items.\r\nI proposed to make a change to simplify it, but as it was a change of behavior (the way Memory based vs Disk based Factories are working), and most of all because I use a guard such as `buffer.refCnt() > 0` to release the underlying buffers, it was not accepted and I just then ensure the documentation says how it should be done.\r\n\r\n for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {\r\n httpData.release();\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n factory.cleanAllHttpData();\r\n decoder.destroy();\r\n\r\nIt might be releated to a wrong usage of the API, due to its complexity.\r\n\r\nI believe that we should add (to be backward compatible) a special release method on both Factory and Decoder, to ensure that everything is clean up. But then, you will have to adapt the code, whatever the way.\r\n\r\nSo don't take me bad. Without help, I cannot help either. That's all I wrote.", "Thanks for the clarification. A reproducer was provided with the issue report which currently reproduces the issue. You are asking for a pure Netty reproducer? That is very complex to setup given that Micronaut has a complete pipeline that is not easy to reproduce standalone. ", "From other thread on other issue (I mixed wrongly the comments):\r\n\r\nJavadoc for Factory:\r\n\r\n /**\r\n * Remove all InterfaceHttpData from virtual File storage from clean list for all requests\r\n */\r\n void cleanAllHttpData();\r\n\r\nIt means that only \"file\" based are released there, not \"memory\" based one.\r\n\r\nThen on Decoder:\r\n\r\n /**\r\n * Destroy the {@link HttpPostMultipartRequestDecoder} and release all it resources. After this method\r\n * was called it is not possible to operate on it anymore.\r\n */\r\n @Override\r\n public void destroy() {\r\n // Release all data items, including those not yet pulled\r\n cleanFiles();\r\n\r\n destroyed = true;\r\n\r\n if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {\r\n undecodedChunk.release();\r\n undecodedChunk = null;\r\n }\r\n }\r\n\r\nExcept that it calls `cleanFiles()` which calls ` factory.cleanRequestHttpData(request);` which Javadoc says:\r\n\r\n /**\r\n * Remove all InterfaceHttpData from virtual File storage from clean list for the request\r\n *\r\n * @param request associated request\r\n */\r\n void cleanRequestHttpData(HttpRequest request);\r\n\r\nSo yes, there is an issue, since only \"Disk\" based are free. And your codes are wrong.\r\nThis is what I point out.\r\nFor your intention, this issue was introduced 2 years ago (not by the last changes).\r\n\r\nSo, in order to propose something, what I can propose is to change as the following:\r\n\r\n\r\n /**\r\n * Destroy the {@link HttpPostMultipartRequestDecoder} and release all it resources. After this method\r\n * was called it is not possible to operate on it anymore.\r\n */\r\n @Override\r\n public void destroy() {\r\n // Release all data items, including those not yet pulled\r\n cleanFiles();\r\n \r\n // Explicitely remove all HttpData but only if refCnt is > 0\r\n for (InterfaceHttpData httpData: getBodyHttpDatas()) {\r\n if (httpData.refCnt > 0) {\r\n httpData.release();\r\n }\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n destroyed = true;\r\n\r\n if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {\r\n undecodedChunk.release();\r\n undecodedChunk = null;\r\n }\r\n }\r\n", "You can do that however that also isn't related to this issue. This is about the speed of the upload which can be demonstrated before any data is released/destroyed", "@jameskleeh as @graemerocher refered to a memory issue, I believe that it was also the good place to put the same.\r\n\r\nNow considering this: I really mismatched between your 2 issues. Too fast reading and answearing, my bad.\r\n\r\nThis exact issue is related to performance issue, which I already demonstrated that currently no issue is found without a reproducer.\r\nSorry about that...", "I referred to both. To quote\r\n\r\n> Netty behaviour has regressed such that we have memory leaks and performance problems ", "Hey I am late as I just returned from vacation and have a huge backlog. That said I can try to have a look next week as well. As we know in which release the issue started it should be possible to figure out what is wrong ", "@normanmaurer Thanks, since on performance issue, I don't know how to find out in such a huge code the reason.\r\nOn memory issue (other issue), I figured out (2 years ago change that I didn't do) and propose a fix right now.", "@jameskleeh I checked your reproducer and got the memory leak as well with error:\r\n\r\n```\r\njava.lang.OutOfMemoryError: Direct buffer memory\r\n at java.base/java.nio.Bits.reserveMemory(Bits.java:175)\r\n at java.base/java.nio.DirectByteBuffer.<init>(DirectByteBuffer.java:118)\r\n at java.base/java.nio.ByteBuffer.allocateDirect(ByteBuffer.java:317)\r\n at io.netty.buffer.PoolArena$DirectArena.allocateDirect(PoolArena.java:645)\r\n at io.netty.buffer.PoolArena$DirectArena.newChunk(PoolArena.java:621)\r\n at io.netty.buffer.PoolArena.allocateNormal(PoolArena.java:204)\r\n at io.netty.buffer.PoolArena.tcacheAllocateNormal(PoolArena.java:188)\r\n at io.netty.buffer.PoolArena.allocate(PoolArena.java:138)\r\n at io.netty.buffer.PoolArena.reallocate(PoolArena.java:288)\r\n at io.netty.buffer.PooledByteBuf.capacity(PooledByteBuf.java:118)\r\n at io.netty.buffer.AbstractByteBuf.ensureWritable0(AbstractByteBuf.java:307)\r\n at io.netty.buffer.AbstractByteBuf.ensureWritable(AbstractByteBuf.java:282)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1105)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1098)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1089)\r\n at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:340)\r\n at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:52)\r\n at io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.offer(HttpPostRequestDecoder.java:223)\r\n at io.micronaut.http.server.netty.FormDataHttpContentProcessor.onData(FormDataHttpContentProcessor.java:113)\r\n at io.micronaut.http.server.netty.AbstractHttpContentProcessor.doOnNext(AbstractHttpContentProcessor.java:78)\r\n at io.micronaut.http.server.netty.AbstractHttpContentProcessor.doOnNext(AbstractHttpContentProcessor.java:36)\r\n at io.micronaut.core.async.subscriber.CompletionAwareSubscriber.onNext(CompletionAwareSubscriber.java:52)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.publishMessage(HandlerPublisher.java:378)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.channelRead(HandlerPublisher.java:334)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler.handleReadHttpContent(HttpStreamsHandler.java:292)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler.channelRead(HttpStreamsHandler.java:257)\r\n at io.micronaut.http.netty.stream.HttpStreamsServerHandler.channelRead(HttpStreamsServerHandler.java:121)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103)\r\n at io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93)\r\n at io.netty.handler.codec.http.HttpServerKeepAliveHandler.channelRead(HttpServerKeepAliveHandler.java:64)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.flow.FlowControlHandler.dequeue(FlowControlHandler.java:200)\r\n at io.netty.handler.flow.FlowControlHandler.read(FlowControlHandler.java:139)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeRead(AbstractChannelHandlerContext.java:686)\r\n at io.netty.channel.AbstractChannelHandlerContext.read(AbstractChannelHandlerContext.java:671)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.requestDemand(HandlerPublisher.java:163)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler$2.requestDemand(HttpStreamsHandler.java:248)\r\n\r\n```\r\n\r\nHowever, reproducer says it uses` 4.1.59`, while in the ticket description you say \"This started happening in 4.1.60 and is present in 4.1.63\". Could you please clarify?\r\n\r\nAlso, I can't reproduce the memory leak with uncommented \"4.1.63.Final\". But I see visible performance degradation. Could you please clarify - with `4.1.63.Final` do you see a memory leak or performance issue or both?\r\n\r\nStacktrace reminds me of this issue - https://github.com/netty/netty/issues/10281 I fixed it by overriding the `HttpPostMultipartRequestDecoder`. We have a lot of upload tests with paranoid level and everything is ok with the overrided version (the use case is the same as in your example).", "@doom369 The `upload_perf` branch of the repo uses `4.1.63`, so that is designed to show the performance issue. I was relatively certain I did reproduce the memory leak in `4.1.63`, however I just went through the exercise of waiting for x requests and I can not reproduce it either. I'll close the other issue and we can just leave this one for the performance issue. I appreciate your help\r\n", "@jameskleeh @fredericBregier here are my findings so far.\r\n\r\nFlame Graph for (netty 63 where performance issue is present):\r\n\r\n[flame_63.zip](https://github.com/netty/netty/files/6369135/flame_63.zip)\r\n\r\nFlame Graph for (netty 59 where no performance issue, but memory leak is present):\r\n\r\n[flame_59.zip](https://github.com/netty/netty/files/6369137/flame_59.zip)\r\n\r\nSo the problem seems like in `io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartOptimized` method. @fredericBregier could you please take a look?\r\n\r\nI wasn't able to reproduce it in our tests. (Probably because we have overridden `HttpPostMultipartRequestDecoder`).\r\nBut in the provided reproducer of @jameskleeh upload is ~100 times slower, than it should be.\r\n\r\n\r\n\r\n\r\n\r\n", "@doom369 Thank you !\r\nStrange, but as there is also another improvement in the current stream (#11145), could you recheck against the current 4.1 branch?\r\n\r\nStrange since the difference between previous behaviour (59) and (63) is:\r\n- 59 is usung getByte for each and every bytes when searching for delimiter, which cause a huge cost, and in particular using PARANOID detection mode, but even on SIMPLE or NONE detection mode\r\n- 63 is using bytesBfore to search delimiter, which is really more efficient, whatever detection mode, even if PARANOID is still slower (independent of this codec, related to the way detection mode changed)\r\n- current 4.1 (including patch) is using in addition a more efficient way to search for CRLF when no delimiter is found, by limiting the search interval from the end of the buffer (for instance if the buffer is 10K, it will only search in the last \"delimiter.size\" interval)\r\n\r\nAnd I do use benchmark with async-profile and it shows the great improvement (comparing 59 to after), not the contrary.\r\n\r\nI will try to redo an async-profile ton try to understand, but if you can in the same time redo it using last 4.1 version from github (including the last merged patch), that could be useful.", "@fredericBregier I'd be happy to test with the snapshot, however it was a great pain to get that working before. Are there instructions somewhere on how to generate snapshot artifacts locally? I can run `mvn install` but that doesn't produce the os specific binaries for xpoll etc..", "@jameskleeh I'm using directly IntelliJ to make my tests. But obviously, within Netty code (since I think we cannot shared multiple projects as micronaut and netty at once, such as in Eclispe).\r\nI believe there is a Wiki page that presents how to build Netty.", "@fredericBregier I'm unable to find that info", "@jameskleeh Maybe the following ones ?\r\n- https://netty.io/wiki/developer-guide.html\r\n- https://netty.io/wiki/native-transports.html", "@doom369 I did tests again V59 and current v64.\r\n\r\n1. Running Benchmark: `HttpPostMultipartRequestDecoderBenchmark`\r\n\r\n2. Running a slightly modified Junit within `HttpPostRequestDecoderTest`: `testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory` and also Mixed and Disk Factory\r\n\r\n### Junit tests as performance tests\r\n\r\nChanges in `HttpPostRequestDecoderTest` renamed into `HttpMultipartBenchamrkTemporaryTest`:\r\n\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderDiskFactory() throws IOException {\r\n // Factory using Disk mode\r\n HttpDataFactory factory = new DefaultHttpDataFactory(true);\r\n for (int i = 0; i < 100; i++) { // CHANGE: Add LOOP but only 100 since too long due to disk access\r\n commonTestBigFileDelimiterInMiddleChunk(factory, false);\r\n }\r\n }\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory() throws IOException, InterruptedException {\r\n // Factory using Memory mode\r\n Thread.sleep(20000);\r\n HttpDataFactory factory = new DefaultHttpDataFactory(false);\r\n for (int i = 0; i < 6000; i++) { // CHANGE: Add LOOP\r\n commonTestBigFileDelimiterInMiddleChunk(factory, true);\r\n }\r\n }\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderMixedFactory() throws IOException {\r\n // Factory using Mixed mode, where file shall be on Disk\r\n HttpDataFactory factory = new DefaultHttpDataFactory(10000);\r\n for (int i = 0; i < 100; i++) { // CHANGE: Add LOOP but only 100 since too long due to disk access\r\n commonTestBigFileDelimiterInMiddleChunk(factory, false);\r\n }\r\n }\r\n\r\n private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory)\r\n throws IOException {\r\n int nbChunks = 300; // CHANGE here from 30 to 300 to get 30 MB transfer\r\n int bytesPerChunk = 100000;\r\n int bytesLastChunk = 10000;\r\n\r\nTimes are the following:\r\n- V59 : about 12 minutes for each (Disk or Mixed Factory with 100 about the same time than 6000 using Memory Factory) (except PARANOID mode where more than 2 hours and need to stop)\r\n- V64 : about 2 minutes for each (whatever PARANOID or not)\r\n\r\nI generate only for Memory based the Flame and standard async-profile outputs, both for V59 and V64, using or not PARANOID level. I had to stop after 2 hours the V59 in PARANOID mode since still not ending.\r\n\r\n[TestV59vsV64.zip](https://github.com/netty/netty/files/6380056/TestV59vsV64.zip)\r\n\r\n\r\n### On Benchmarks side\r\n\r\nIt uses lot of small files or a big one, depending on the tests, and use all kind of leak detection from NONE to PARANOID\r\n\r\n- V59\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 0,110 ± 0,115 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 0,678 ± 0,295 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 0,442 ± 0,150 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,063 ± 0,021 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 0,460 ± 0,055 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,504 ± 0,070 ops/ms\r\n\r\n Benchmark result is saved to /home/frederic/Waarp2/netty/microbench/target/reports/performance/HttpPostMultipartRequestDecoderBenchmark.json\r\n [835,175s][info ][gc,heap,exit ] Heap\r\n [835,175s][info ][gc,heap,exit ] garbage-first heap total 258048K, used 55694K [0x0000000707800000, 0x0000000800000000)\r\n [835,175s][info ][gc,heap,exit ] region size 1024K, 54 young (55296K), 2 survivors (2048K)\r\n [835,175s][info ][gc,heap,exit ] Metaspace used 9371K, capacity 9763K, committed 10112K, reserved 1058816K\r\n [835,175s][info ][gc,heap,exit ] class space used 1036K, capacity 1132K, committed 1152K, reserved 1048576K\r\n\r\n- V64:\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 5,318 ± 0,199 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 4,978 ± 1,401 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,620 ± 0,164 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 4,362 ± 0,355 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 1,900 ± 0,619 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 2,463 ± 0,176 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,146 ± 0,014 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 2,352 ± 0,139 ops/ms\r\n\r\n Benchmark result is saved to /home/frederic/Waarp2/netty/microbench/target/reports/performance/HttpPostMultipartRequestDecoderBenchmark.json\r\n [811,531s][info ][gc,heap,exit ] Heap\r\n [811,531s][info ][gc,heap,exit ] garbage-first heap total 258048K, used 56735K [0x0000000707800000, 0x0000000800000000)\r\n [811,531s][info ][gc,heap,exit ] region size 1024K, 55 young (56320K), 2 survivors (2048K)\r\n [811,531s][info ][gc,heap,exit ] Metaspace used 9349K, capacity 9715K, committed 9856K, reserved 1058816K\r\n [811,531s][info ][gc,heap,exit ] class space used 1035K, capacity 1132K, committed 1152K, reserved 1048576K\r\n\r\nSo roughly 10 times bette with V64.\r\n\r\nSo I cannot explain this performance issue.\r\n", "@doom369 \r\nAs I canot reproduce within Netty, I can try to execute manually (thourgh IntellJ) the test from micronaut to see exactly whats is going on on your side.\r\n\r\nCould you give me the setup and which test to run?", "@fredericBregier @doom369 \r\nI've given a quick look at the flamegraphs from @doom369 and, as the author of the optimized index of, I can say:\r\n\r\n- it could be an issue with index of implementation, failing to find some specific byte and leaving the algo to go into some weird logic code path\r\n- it could be an uncovered logical (corner) case with the find delimiter logic and/or caller, making it to call index of more and more (or on larger and larger chunks)\r\n\r\nThe former should be saved thanks to the coverage we already have on the test suite re index of.\r\nThe latter seems the most realistic candidate, although it's surprising there is no way to isolate this with vanilla Netty.", "@franz1981 I agree, so the reason I would try to check manually the test from Micronaut under IntellJ to check the underlying corner that I can't fiigure out within vanilla Netty.\r\n\r\n@doom369 So the reason I would like some help (guideline) to run this (or those) test(s) (which ones?) from Micronaut on my own to make debugging and trace.\r\nIt was the case for instance from another bug (fixed recently) from Micronaut too, but I was able to find out the logic in vanilla Netty.\r\nMy goal is to be able to reproduce within Micronaut, then within vanilla Netty.", "@fabienrenaud I would be able to test only on Saturday. I'm not familiar with Micronaut as well so It will require some time to set up it with the snapshot version. I just run the provided reproducer from the initial desctioption, I was expecting to see some obvious thing to fix :). But looks like that not the case.", "@doom369 @fredericBregier Once you have Netty in your local maven cache its simply a matter of changing the `build.gradle` file to point to that version.\r\n\r\nJust change the string here https://github.com/jameskleeh/netty-memory-leak/blob/upload_perf/build.gradle#L28 then follow the steps in the README to run the app and upload a file:\r\n\r\nhttps://github.com/jameskleeh/netty-memory-leak/blob/upload_perf/README.md\r\n", "@doom369 @jameskleeh @franz1981 \r\nI did run your code: with a file of 25 MB (zip file of 25577548 bytes)\r\n- vs 59 : 0.727s at best each curl (the 5th through a memory Direct buffer exception)\r\n- vs 64 : 0.115s each curl in average with 1000 curl command (with a for)\r\n\r\nSo first the time is good with v64.\r\n\r\nNow here are the flame and tree files.\r\n\r\n[profile-micronaut-netty.zip](https://github.com/netty/netty/files/6388095/profile-micronaut-netty.zip)\r\n\r\nWhat I see:\r\n- V59 : 56% from loadDataMultipartStandard with self 8.45%\r\n\r\n [53] 56.41% 387 self: 8.45% 58 io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartStandard\r\n\r\n- V64: 32% from loadDataMultipartOptimized with self 0.36% and findDelimiter 1.08%, so almost half\r\n\r\n [52] 32.43% 180 self: 0.36% 2 io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartOptimized\r\n [53] 25.23% 140 self: 1.08% 6 io/netty/handler/codec/http/multipart/HttpPostBodyUtil.findDelimiter\r\n\r\nSo I can't still explain what you have since even with your reproducer, I see no issue.\r\nI surely missed something.", "Just given a quick look at the flames from @fredericBregier and the sample count match my expectation (if I am reading it right) for cases where finding the delimiter is the bottleneck, see results on https://github.com/netty/netty/pull/10737#issuecomment-720481198 with high number of permutations and input size: the SWAR index of is at best twice as faster then reading single byte each time (for 32 long inputs, yet to see with more).\r\nThe samples on the flamegraphs are 381 samples loadDataMultiPartStandard vs 175 samples for loadDataMultiPartOptimized, meaning that the time spent on it is halved (ie assuming the same outcomes and the amount of work performed during the elapsed time is the same, it consumes half CPU time and/or is twice faster). Results related to the elapsed time shows 6X shorter elapsed time to perform the same work, so probably the gap is far more then the expected 2X because the original readByte used on V59 was getting a bound check + ref cnt check for each byte read.\r\n\r\nI am a bit surprised how GC is stressed by 59 and how different is the I/O utilization of the 2 versions for both file read and networking. \r\n", "@fredericBregier It may be that the changes that were made after 63 was released fixed the issue. Can you verify you see the problem with 63?", "@jameskleeh I will check when I have some time, but yes, I will ! ;-)", "The result with V63 is very bad:\r\n- v63 : 61.500s in average\r\n\r\nThe flame (with 5 transfers with the very same file):\r\n\r\n[profile-micronaut-netty63-flame.zip](https://github.com/netty/netty/files/6392779/profile-micronaut-netty63-flame.zip)\r\n\r\nIt seems the change made in V64 are very critical (probably linked to both previous patch on your 15 days first ssue and your 2 last current issues).", "@fredericBregier Then what is the chance we can get a 64 release in the near future?", "@jameskleeh pretty good :) I think next week should be possible easily ", "@fredericBregier do you know which change ? Also did the testing include your pending PR or not ?", "@normanmaurer The tests are including the 3 PR:\r\n- the current one: #11188 (Fix Memory release not correctly in Multipart Decoder)\r\n- the 2 weeks ago one: #11145 (Fix behavior of HttpPostMultipartRequestDecoder for Memory based Factory)\r\n- yours #11207 (Destroy HttpPostMultipartRequestDecoder if contructor throws)\r\n\r\nThe changes are about:\r\n- speed up in CRLF finding (and fix) within `loadDataMultipartOptimized` (HttpPostMultipartRequestDecoder) and `findLastLineBreak` (HttpPostBodyUtil)\r\n- freeing Memory each time possible (mainly was in constructor but you did included it in another one and also in `destroy` (HttpPostMultipartRequestDecoder)", "@normanmaurer Do you know when 64 will go out? I'd like to do a new release of Micronaut with it asap", "@jameskleeh next week" ]
[ "@fredericBregier can you do this change in a separate PR and also add a unit test ?", "@normanmaurer I'm not sure I can. Let me explain.\r\nFirst, this is already in place in HttpPostStandardRequestDecoder, so it should be also within Multipart too.\r\n\r\nI notice this during PR since the deploy was wrong due to some Junit having a Leak detection error due to this missing.\r\n\r\nSo, in fact, there are already tests for that, but relying on leak detection step in CI/CD.\r\n\r\nFor Multipart, such as: \r\n- testMultipartRequestWithFileInvalidCharset\r\n- testMultipartRequestWithFieldInvalidCharset\r\n- testDecodeMalformedNotEncodedContentDispositionFieldParameters\r\n- testDecodeMalformedBadCharsetContentDispositionFieldParameters\r\n\r\nWhile on Standard one, we have:\r\n- testNotLeak\r\n- testNotLeakDirectBufferWhenWrapIllegalArgumentException\r\n- testNotLeakWhenWrapIllegalArgumentException\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte0\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte1\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleHi\r\n- testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo\r\n\r\nThey all share the same spirit: allocating a Decoder with a wrong parameter, but request shall be released and as there is no Decoder, we cannot check it but the check on memory leak i the CI/CD get them if they are not correctly handled.\r\n\r\nIf I removed this, it will certainly failed during leak detection, as it was the case before I added this, and with equal action than to the other standard implementation.\r\n\r\nWDYT?", "Sounds good.. But please just do it in a separate PR. This will make it easier to review / revert etc if needed. ", "OK, I will remove it, but not sure it will not generate leak detetection errors on CI/CD then. \r\nAnd if is so, this PR could not be successful, we shall probably have both but in separate commit if you want. Let see what's happening when removing it.", "As I suspected, removing this is causing leak detection.\r\n\r\nThis should have been done when the standard decoder was changed, but it wasn't. Now, it's necessary.\r\nSo how can we proceed?", "All tests that goes to \"Checking for detected leak\" have the following similar output:\r\n\r\n Run ./.github/scripts/check_leak.sh build-leak.output\r\n ./.github/scripts/check_leak.sh build-leak.output\r\n shell: /usr/bin/bash -e {0}\r\n Leak detected, please inspect build log\r\n Error: Process completed with exit code 1.\r\n\r\nI repushed the commit with the correction on constructor, as it should be and in line with the other one for Standard decoder. If you have any suggestion, I will go through them.", "Reference of CI/CD was: https://github.com/netty/netty/pull/11188/checks?check_run_id=2440801961", "I factored out this change in its own PR and added a unit test... please have a look https://github.com/netty/netty/pull/11207", "OK, I will rebase once merged (review is OK ;-) )", "Rebase done, CI/CD on going...", "@fredericBregier can you explain me why this change ?", "Several reasons:\r\n\r\n- The decoder is not acting in the same way on every chunk : only the first one was `retainedSlice`\r\n- This introduces some bad usages on user's end where the original buffer (from Chunk) were retained and released as soon as possible (the original Chunk), involving memory leak\r\n- It simplifies a lot the mechanism, while it allocates a duplicate ByteBuf, it is as for the next chunks, and performances were not degraded, neither memory pressure (cf Flame on #11175 )" ]
"2021-04-22T18:30:31Z"
[]
File upload extremely slow
### Expected behavior Uploading a file should be almost instantaneous in a local environment ### Actual behavior Uploading a file is taking approximately 1 minute ### Steps to reproduce - `./gradlew run` - Upload a 25-30mb file (the project README has an example curl) ### Minimal yet complete reproducer code (or URL to code) https://github.com/jameskleeh/netty-memory-leak Checkout the `upload_perf` branch ### Netty version This started happening in 4.1.60 and is present in 4.1.63 ### JVM version (e.g. `java -version`) JDK 8 ### OS version (e.g. `uname -a`) Darwin MacBook-Pro.local 20.3.0 Darwin Kernel Version 20.3.0: Thu Jan 21 00:07:06 PST 2021; root:xnu-7195.81.3~1/RELEASE_X86_64 x86_64
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index 5099ee60ac4..490b1ffcc14 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -43,7 +43,6 @@ import java.util.Map; import java.util.TreeMap; -import static io.netty.buffer.Unpooled.EMPTY_BUFFER; import static io.netty.util.internal.ObjectUtil.*; /** @@ -336,13 +335,8 @@ public HttpPostMultipartRequestDecoder offer(HttpContent content) { ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = isLastChunk ? - // Take a slice instead of copying when the first chunk is also the last - // as undecodedChunk.writeBytes will never be called. - buf.retainedSlice() : - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it // // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity // which is not really usable for us as we may exceed it once we add more bytes. @@ -958,8 +952,15 @@ protected InterfaceHttpData getFileUpload(String delimiter) { */ @Override public void destroy() { - // Release all data items, including those not yet pulled + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } destroyed = true; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index d2ef677dbf0..bdbccfa9f2f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -153,7 +153,7 @@ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest reque this.factory = checkNotNull(factory, "factory"); try { if (request instanceof HttpContent) { - // Offer automatically if the given request is als type of HttpContent + // Offer automatically if the given request is as type of HttpContent // See #1089 offer((HttpContent) request); } else { @@ -287,13 +287,8 @@ public HttpPostStandardRequestDecoder offer(HttpContent content) { ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = isLastChunk ? - // Take a slice instead of copying when the first chunk is also the last - // as undecodedChunk.writeBytes will never be called. - buf.retainedSlice() : - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage. + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it // // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity // which is not really usable for us as we may exceed it once we add more bytes. @@ -693,8 +688,15 @@ private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) { */ @Override public void destroy() { - // Release all data items, including those not yet pulled + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } destroyed = true;
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index 40f2f400a4e..665606fad83 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -88,7 +88,8 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); - decoder.offer(new DefaultHttpContent(Unpooled.copiedBuffer(body, CharsetUtil.UTF_8))); + ByteBuf buf = Unpooled.copiedBuffer(body, CharsetUtil.UTF_8); + decoder.offer(new DefaultHttpContent(buf)); decoder.offer(new DefaultHttpContent(Unpooled.EMPTY_BUFFER)); // Validate it's enough chunks to decode upload. @@ -102,6 +103,7 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { data, upload.getString(CharsetUtil.UTF_8)); upload.release(); decoder.destroy(); + buf.release(); } } @@ -134,8 +136,8 @@ public void testFullHttpRequestUpload() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/2544 @@ -181,8 +183,8 @@ public void testMultipartCodecWithCRasEndOfAttribute() throws Exception { assertNotNull(datar); assertEquals(datas[i].getBytes(CharsetUtil.UTF_8).length, datar.length); - req.release(); decoder.destroy(); + assertTrue(req.release()); } } @@ -215,8 +217,8 @@ public void testQuotedBoundary() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/1848 @@ -276,6 +278,8 @@ public void testNoZeroOut() throws Exception { aDecodedData.release(); aDecoder.destroy(); + aSmallBuf.release(); + aLargeBuf.release(); } // See https://github.com/netty/netty/issues/2305 @@ -373,8 +377,8 @@ public void testFilenameContainingSemicolon() throws Exception { // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -403,8 +407,8 @@ public void testFilenameContainingSemicolon2() throws Exception { assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp 0.txt", fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -434,8 +438,8 @@ public void testMultipartRequestWithoutContentTypeBody() { // Create decoder instance to test without any exception. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -467,8 +471,8 @@ public void testDecodeOtherMimeHeaderFields() throws Exception { FileUpload fileUpload = (FileUpload) part1; byte[] fileBytes = fileUpload.get(); assertTrue("the filecontent should not be decoded", filecontent.equals(new String(fileBytes))); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -497,7 +501,7 @@ public void testMultipartRequestWithFileInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -530,7 +534,7 @@ public void testMultipartRequestWithFieldInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -581,8 +585,8 @@ public void testDecodeContentDispositionFieldParameters() throws Exception { assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -617,8 +621,8 @@ public void testDecodeWithLanguageContentDispositionFieldParameters() throws Exc assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -649,7 +653,7 @@ public void testDecodeMalformedNotEncodedContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof ArrayIndexOutOfBoundsException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -681,7 +685,7 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -712,8 +716,8 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio assertTrue(part1 instanceof FileUpload); FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp-0.txt", fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } // https://github.com/netty/netty/issues/8575 @@ -832,8 +836,8 @@ public void testDecodeWithLanguageContentDispositionFieldParametersForFix() thro FileUpload fileUpload = (FileUpload) part1; assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -869,8 +873,8 @@ public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { assertTrue(attr.getByteBuf().isDirect()); assertEquals("los angeles", attr.getValue()); - req.release(); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -987,7 +991,8 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo int bytesLastChunk = 10000; int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes - String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" + + String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466"; + String prefix = delimiter + "\n" + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + "Content-Type: image/jpeg\n" + "Content-Length: " + fileSize + "\n" + @@ -1003,8 +1008,10 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo request.headers().set("content-length", prefix.length() + fileSize + suffix.length()); HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); - decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)))); + ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(buf)); assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + buf.release(); byte[] body = new byte[bytesPerChunk]; Arrays.fill(body, (byte) 1); @@ -1020,11 +1027,16 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8); byte[] previousLastbody = new byte[bytesLastChunk - bsuffix1.length]; + byte[] bdelimiter = delimiter.getBytes(CharsetUtil.UTF_8); byte[] lastbody = new byte[2 * bsuffix1.length]; Arrays.fill(previousLastbody, (byte) 1); previousLastbody[0] = HttpConstants.CR; previousLastbody[1] = HttpConstants.LF; Arrays.fill(lastbody, (byte) 1); + // put somewhere a not valid delimiter + for (int i = 0; i < bdelimiter.length; i++) { + previousLastbody[i + 10] = bdelimiter[i]; + } lastbody[0] = HttpConstants.CR; lastbody[1] = HttpConstants.LF; for (int i = 0; i < bsuffix1.length; i++) { @@ -1055,12 +1067,18 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo } assertTrue("Capacity should be less than 1M", decoder.getCurrentAllocatedCapacity() < 1024 * 1024); - for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { - httpData.release(); - factory.removeHttpDataFromClean(request, httpData); + InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("Before cleanAllHttpData should be 1", 1, httpData.refCnt()); } factory.cleanAllHttpData(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("Before cleanAllHttpData should be 1 if in Memory", inMemory? 1 : 0, httpData.refCnt()); + } decoder.destroy(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals("RefCnt should be 0", 0, httpData.refCnt()); + } } @Test @@ -1121,7 +1139,9 @@ private void commonNotBadReleaseBuffersDuringDecoding(HttpDataFactory factory, b for (int i = 0; i < bp2.length; i++) { prefix[bp1.length + 2 + i] = bp2[i]; } - decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix))); + ByteBuf buf = Unpooled.wrappedBuffer(prefix); + decoder.offer(new DefaultHttpContent(buf)); + buf.release(); byte[] body = new byte[bytesPerItem]; Arrays.fill(body, (byte) rank); ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem);
train
test
"2021-04-29T10:26:44"
"2021-04-20T16:40:17Z"
jameskleeh
val
netty/netty/11175_11207
netty/netty
netty/netty/11175
netty/netty/11207
[ "keyword_issue_to_pr" ]
3657805711668ff1249b4c19e227bedb94d55417
def8a3f17d875ac188ed1d3364618f559a0b59ce
[ "There are some benchmarks within Netty that shows exactly the contrary (whatever Memory, Mixed or Disk Factory based).\r\nThe results are not using big file however, just 512KB but with more than 6 000 upload/s, so very quick, and about 3 GB/s.\r\nI've tested by changing size to 25 MB, increasing Factory size to 1GB, and it gives 131 upload/s, so 3.2 GB/s (so less than 1s to upload).\r\n\r\nTrace of benchmark using this 25 MB file test: (HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel is the reference in your cases)\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 0,132 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 0,131 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,118 ± 0,003 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 0,131 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,124 ± 0,004 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 0,125 ± 0,005 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,075 ± 0,002 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,125 ± 0,004 ops/ms\r\n \r\n [817.169s][info ][gc,heap,exit ] Heap\r\n [817.169s][info ][gc,heap,exit ] garbage-first heap total 522240K, used 69233K [0x0000000602400000, 0x0000000800000000)\r\n [817.169s][info ][gc,heap,exit ] region size 2048K, 35 young (71680K), 2 survivors (4096K)\r\n [817.169s][info ][gc,heap,exit ] Metaspace used 17320K, capacity 17729K, committed 17792K, reserved 1064960K\r\n [817.169s][info ][gc,heap,exit ] class space used 1790K, capacity 1906K, committed 1920K, reserved 1048576K\r\n\r\nI also test using the Junit tests testBIgFileUploadDelimiterInMiddleChunkDecoderXxxxFactory (where Xxx can be Disk, Memory, Mixed) using a 25 MB file, and it gives less than 0.5s (even 0,05s with Memory Factory) and only 1 MB of memory allocated bu the Factory.\r\n\r\nMaybe you are using a Factory disk based (or mixed with a lower limit than file size), such that it depends on write disk speed (therefore on your disk speed)?\r\n\r\nAlso, regarding your other issue, have you clean the factory and HttpData correctly ?\r\n\r\n for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {\r\n httpData.release();\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n factory.cleanAllHttpData();\r\n decoder.destroy();\r\n\r\nCould you setup a simple test without your Micronaut code ?\r\n", "@fredericBregier Our test cases that exercise the same code seem to perform OK as well. I've only experienced this with curl. I don't know how to setup a test given the complexity and the number of factors. Simply changing the Netty version, with the Micronaut code exactly the same, shows the issue. That leads me to believe the issue lies in Netty.", "But When I did tests using either Junit or Benchmark from Netty code, it appears that it seems not an issue of Netty.\r\nSo I believe there is something wrong in the way you create the Factory or free up the factory and the decoder and the HttpData.\r\nBut as the code or Micronaut is huge, I cannot deep into it. ;-)", "@fredericBregier Unfortunately if that is the approach then I believe we will be stuck on a relatively old version of Netty until we can investigate alternatives", "There is also an example within Netty that starts a simple Http Web server with multipart activated.\r\nSee https://github.com/netty/netty/tree/4.1/example/src/main/java/io/netty/example/http/upload\r\nYou can use your own browser (using inspection to see the speed from browser point of view).\r\nI've uploaded a file of 420 MB in 3.2s (131 MB/s) and I repeat the operation, with still 370 MB max memory associated with the Java Web server.\r\n\r\nSo I believe the issue is not within Netty but the way you use it, sorry.", "@jameskleeh what a reproducer + run the profiler in the parallel?", "Guys the fact remains that in a patch release of Netty behaviour has regressed such that we have memory leaks and performance problems which cannot be acceptable. One thing is these changes go into a new minor or major versions, but patch versions surely have to maintain behaviour and if a regression happens in a patch it needs to be taken seriously. \r\n\r\nCan the changes that caused this regression be rolled back if they are not production ready? Micronaut is a large user of Netty with many users and it is disheartening to see feedback addressed with \"well our tests pass so sorry\"", "It seems as well that Vertx is having Memory leak issues in a similar area #11184", "@graemerocher I agree for most of what you wrote, but where is the reproducer?\r\n\r\nI cannot go deep within either Micronaut or Vertx. I have to have a reproducer to understand.\r\n\r\nNote that the previous version (58 or before) was leaking a lot (memory, speed, wrong assignment with loss of data, unkown from end user). And we put much effort to add as much as possible Junit tests (or benchmarks) to ensure nothing is broken and even that those issues (memoey, speed, wrong assignment with loss of data, unkown from end user) were fixed.\r\n\r\nIf this so reproducible (VertX, Micronaut), then it should be easy to have a simple reproducer, right?\r\n\r\nLook at the previous issue that was raised by @jameskleeh , he gives me enough elements to have a reproducer, and then I was able to fix it.\r\n\r\nIt seems, for what I believe, a bad usage of the Factory and Decoder, and in particular, the way to release all items.\r\nI proposed to make a change to simplify it, but as it was a change of behavior (the way Memory based vs Disk based Factories are working), and most of all because I use a guard such as `buffer.refCnt() > 0` to release the underlying buffers, it was not accepted and I just then ensure the documentation says how it should be done.\r\n\r\n for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {\r\n httpData.release();\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n factory.cleanAllHttpData();\r\n decoder.destroy();\r\n\r\nIt might be releated to a wrong usage of the API, due to its complexity.\r\n\r\nI believe that we should add (to be backward compatible) a special release method on both Factory and Decoder, to ensure that everything is clean up. But then, you will have to adapt the code, whatever the way.\r\n\r\nSo don't take me bad. Without help, I cannot help either. That's all I wrote.", "Thanks for the clarification. A reproducer was provided with the issue report which currently reproduces the issue. You are asking for a pure Netty reproducer? That is very complex to setup given that Micronaut has a complete pipeline that is not easy to reproduce standalone. ", "From other thread on other issue (I mixed wrongly the comments):\r\n\r\nJavadoc for Factory:\r\n\r\n /**\r\n * Remove all InterfaceHttpData from virtual File storage from clean list for all requests\r\n */\r\n void cleanAllHttpData();\r\n\r\nIt means that only \"file\" based are released there, not \"memory\" based one.\r\n\r\nThen on Decoder:\r\n\r\n /**\r\n * Destroy the {@link HttpPostMultipartRequestDecoder} and release all it resources. After this method\r\n * was called it is not possible to operate on it anymore.\r\n */\r\n @Override\r\n public void destroy() {\r\n // Release all data items, including those not yet pulled\r\n cleanFiles();\r\n\r\n destroyed = true;\r\n\r\n if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {\r\n undecodedChunk.release();\r\n undecodedChunk = null;\r\n }\r\n }\r\n\r\nExcept that it calls `cleanFiles()` which calls ` factory.cleanRequestHttpData(request);` which Javadoc says:\r\n\r\n /**\r\n * Remove all InterfaceHttpData from virtual File storage from clean list for the request\r\n *\r\n * @param request associated request\r\n */\r\n void cleanRequestHttpData(HttpRequest request);\r\n\r\nSo yes, there is an issue, since only \"Disk\" based are free. And your codes are wrong.\r\nThis is what I point out.\r\nFor your intention, this issue was introduced 2 years ago (not by the last changes).\r\n\r\nSo, in order to propose something, what I can propose is to change as the following:\r\n\r\n\r\n /**\r\n * Destroy the {@link HttpPostMultipartRequestDecoder} and release all it resources. After this method\r\n * was called it is not possible to operate on it anymore.\r\n */\r\n @Override\r\n public void destroy() {\r\n // Release all data items, including those not yet pulled\r\n cleanFiles();\r\n \r\n // Explicitely remove all HttpData but only if refCnt is > 0\r\n for (InterfaceHttpData httpData: getBodyHttpDatas()) {\r\n if (httpData.refCnt > 0) {\r\n httpData.release();\r\n }\r\n factory.removeHttpDataFromClean(request, httpData);\r\n }\r\n destroyed = true;\r\n\r\n if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {\r\n undecodedChunk.release();\r\n undecodedChunk = null;\r\n }\r\n }\r\n", "You can do that however that also isn't related to this issue. This is about the speed of the upload which can be demonstrated before any data is released/destroyed", "@jameskleeh as @graemerocher refered to a memory issue, I believe that it was also the good place to put the same.\r\n\r\nNow considering this: I really mismatched between your 2 issues. Too fast reading and answearing, my bad.\r\n\r\nThis exact issue is related to performance issue, which I already demonstrated that currently no issue is found without a reproducer.\r\nSorry about that...", "I referred to both. To quote\r\n\r\n> Netty behaviour has regressed such that we have memory leaks and performance problems ", "Hey I am late as I just returned from vacation and have a huge backlog. That said I can try to have a look next week as well. As we know in which release the issue started it should be possible to figure out what is wrong ", "@normanmaurer Thanks, since on performance issue, I don't know how to find out in such a huge code the reason.\r\nOn memory issue (other issue), I figured out (2 years ago change that I didn't do) and propose a fix right now.", "@jameskleeh I checked your reproducer and got the memory leak as well with error:\r\n\r\n```\r\njava.lang.OutOfMemoryError: Direct buffer memory\r\n at java.base/java.nio.Bits.reserveMemory(Bits.java:175)\r\n at java.base/java.nio.DirectByteBuffer.<init>(DirectByteBuffer.java:118)\r\n at java.base/java.nio.ByteBuffer.allocateDirect(ByteBuffer.java:317)\r\n at io.netty.buffer.PoolArena$DirectArena.allocateDirect(PoolArena.java:645)\r\n at io.netty.buffer.PoolArena$DirectArena.newChunk(PoolArena.java:621)\r\n at io.netty.buffer.PoolArena.allocateNormal(PoolArena.java:204)\r\n at io.netty.buffer.PoolArena.tcacheAllocateNormal(PoolArena.java:188)\r\n at io.netty.buffer.PoolArena.allocate(PoolArena.java:138)\r\n at io.netty.buffer.PoolArena.reallocate(PoolArena.java:288)\r\n at io.netty.buffer.PooledByteBuf.capacity(PooledByteBuf.java:118)\r\n at io.netty.buffer.AbstractByteBuf.ensureWritable0(AbstractByteBuf.java:307)\r\n at io.netty.buffer.AbstractByteBuf.ensureWritable(AbstractByteBuf.java:282)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1105)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1098)\r\n at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1089)\r\n at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:340)\r\n at io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder.offer(HttpPostMultipartRequestDecoder.java:52)\r\n at io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.offer(HttpPostRequestDecoder.java:223)\r\n at io.micronaut.http.server.netty.FormDataHttpContentProcessor.onData(FormDataHttpContentProcessor.java:113)\r\n at io.micronaut.http.server.netty.AbstractHttpContentProcessor.doOnNext(AbstractHttpContentProcessor.java:78)\r\n at io.micronaut.http.server.netty.AbstractHttpContentProcessor.doOnNext(AbstractHttpContentProcessor.java:36)\r\n at io.micronaut.core.async.subscriber.CompletionAwareSubscriber.onNext(CompletionAwareSubscriber.java:52)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.publishMessage(HandlerPublisher.java:378)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.channelRead(HandlerPublisher.java:334)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler.handleReadHttpContent(HttpStreamsHandler.java:292)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler.channelRead(HttpStreamsHandler.java:257)\r\n at io.micronaut.http.netty.stream.HttpStreamsServerHandler.channelRead(HttpStreamsServerHandler.java:121)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103)\r\n at io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:93)\r\n at io.netty.handler.codec.http.HttpServerKeepAliveHandler.channelRead(HttpServerKeepAliveHandler.java:64)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n at io.netty.handler.flow.FlowControlHandler.dequeue(FlowControlHandler.java:200)\r\n at io.netty.handler.flow.FlowControlHandler.read(FlowControlHandler.java:139)\r\n at io.netty.channel.AbstractChannelHandlerContext.invokeRead(AbstractChannelHandlerContext.java:686)\r\n at io.netty.channel.AbstractChannelHandlerContext.read(AbstractChannelHandlerContext.java:671)\r\n at io.micronaut.http.netty.reactive.HandlerPublisher.requestDemand(HandlerPublisher.java:163)\r\n at io.micronaut.http.netty.stream.HttpStreamsHandler$2.requestDemand(HttpStreamsHandler.java:248)\r\n\r\n```\r\n\r\nHowever, reproducer says it uses` 4.1.59`, while in the ticket description you say \"This started happening in 4.1.60 and is present in 4.1.63\". Could you please clarify?\r\n\r\nAlso, I can't reproduce the memory leak with uncommented \"4.1.63.Final\". But I see visible performance degradation. Could you please clarify - with `4.1.63.Final` do you see a memory leak or performance issue or both?\r\n\r\nStacktrace reminds me of this issue - https://github.com/netty/netty/issues/10281 I fixed it by overriding the `HttpPostMultipartRequestDecoder`. We have a lot of upload tests with paranoid level and everything is ok with the overrided version (the use case is the same as in your example).", "@doom369 The `upload_perf` branch of the repo uses `4.1.63`, so that is designed to show the performance issue. I was relatively certain I did reproduce the memory leak in `4.1.63`, however I just went through the exercise of waiting for x requests and I can not reproduce it either. I'll close the other issue and we can just leave this one for the performance issue. I appreciate your help\r\n", "@jameskleeh @fredericBregier here are my findings so far.\r\n\r\nFlame Graph for (netty 63 where performance issue is present):\r\n\r\n[flame_63.zip](https://github.com/netty/netty/files/6369135/flame_63.zip)\r\n\r\nFlame Graph for (netty 59 where no performance issue, but memory leak is present):\r\n\r\n[flame_59.zip](https://github.com/netty/netty/files/6369137/flame_59.zip)\r\n\r\nSo the problem seems like in `io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartOptimized` method. @fredericBregier could you please take a look?\r\n\r\nI wasn't able to reproduce it in our tests. (Probably because we have overridden `HttpPostMultipartRequestDecoder`).\r\nBut in the provided reproducer of @jameskleeh upload is ~100 times slower, than it should be.\r\n\r\n\r\n\r\n\r\n\r\n", "@doom369 Thank you !\r\nStrange, but as there is also another improvement in the current stream (#11145), could you recheck against the current 4.1 branch?\r\n\r\nStrange since the difference between previous behaviour (59) and (63) is:\r\n- 59 is usung getByte for each and every bytes when searching for delimiter, which cause a huge cost, and in particular using PARANOID detection mode, but even on SIMPLE or NONE detection mode\r\n- 63 is using bytesBfore to search delimiter, which is really more efficient, whatever detection mode, even if PARANOID is still slower (independent of this codec, related to the way detection mode changed)\r\n- current 4.1 (including patch) is using in addition a more efficient way to search for CRLF when no delimiter is found, by limiting the search interval from the end of the buffer (for instance if the buffer is 10K, it will only search in the last \"delimiter.size\" interval)\r\n\r\nAnd I do use benchmark with async-profile and it shows the great improvement (comparing 59 to after), not the contrary.\r\n\r\nI will try to redo an async-profile ton try to understand, but if you can in the same time redo it using last 4.1 version from github (including the last merged patch), that could be useful.", "@fredericBregier I'd be happy to test with the snapshot, however it was a great pain to get that working before. Are there instructions somewhere on how to generate snapshot artifacts locally? I can run `mvn install` but that doesn't produce the os specific binaries for xpoll etc..", "@jameskleeh I'm using directly IntelliJ to make my tests. But obviously, within Netty code (since I think we cannot shared multiple projects as micronaut and netty at once, such as in Eclispe).\r\nI believe there is a Wiki page that presents how to build Netty.", "@fredericBregier I'm unable to find that info", "@jameskleeh Maybe the following ones ?\r\n- https://netty.io/wiki/developer-guide.html\r\n- https://netty.io/wiki/native-transports.html", "@doom369 I did tests again V59 and current v64.\r\n\r\n1. Running Benchmark: `HttpPostMultipartRequestDecoderBenchmark`\r\n\r\n2. Running a slightly modified Junit within `HttpPostRequestDecoderTest`: `testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory` and also Mixed and Disk Factory\r\n\r\n### Junit tests as performance tests\r\n\r\nChanges in `HttpPostRequestDecoderTest` renamed into `HttpMultipartBenchamrkTemporaryTest`:\r\n\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderDiskFactory() throws IOException {\r\n // Factory using Disk mode\r\n HttpDataFactory factory = new DefaultHttpDataFactory(true);\r\n for (int i = 0; i < 100; i++) { // CHANGE: Add LOOP but only 100 since too long due to disk access\r\n commonTestBigFileDelimiterInMiddleChunk(factory, false);\r\n }\r\n }\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory() throws IOException, InterruptedException {\r\n // Factory using Memory mode\r\n Thread.sleep(20000);\r\n HttpDataFactory factory = new DefaultHttpDataFactory(false);\r\n for (int i = 0; i < 6000; i++) { // CHANGE: Add LOOP\r\n commonTestBigFileDelimiterInMiddleChunk(factory, true);\r\n }\r\n }\r\n\r\n @Test\r\n public void testBIgFileUploadDelimiterInMiddleChunkDecoderMixedFactory() throws IOException {\r\n // Factory using Mixed mode, where file shall be on Disk\r\n HttpDataFactory factory = new DefaultHttpDataFactory(10000);\r\n for (int i = 0; i < 100; i++) { // CHANGE: Add LOOP but only 100 since too long due to disk access\r\n commonTestBigFileDelimiterInMiddleChunk(factory, false);\r\n }\r\n }\r\n\r\n private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory)\r\n throws IOException {\r\n int nbChunks = 300; // CHANGE here from 30 to 300 to get 30 MB transfer\r\n int bytesPerChunk = 100000;\r\n int bytesLastChunk = 10000;\r\n\r\nTimes are the following:\r\n- V59 : about 12 minutes for each (Disk or Mixed Factory with 100 about the same time than 6000 using Memory Factory) (except PARANOID mode where more than 2 hours and need to stop)\r\n- V64 : about 2 minutes for each (whatever PARANOID or not)\r\n\r\nI generate only for Memory based the Flame and standard async-profile outputs, both for V59 and V64, using or not PARANOID level. I had to stop after 2 hours the V59 in PARANOID mode since still not ending.\r\n\r\n[TestV59vsV64.zip](https://github.com/netty/netty/files/6380056/TestV59vsV64.zip)\r\n\r\n\r\n### On Benchmarks side\r\n\r\nIt uses lot of small files or a big one, depending on the tests, and use all kind of leak detection from NONE to PARANOID\r\n\r\n- V59\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 0,110 ± 0,115 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 0,678 ± 0,295 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 0,442 ± 0,150 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,063 ± 0,021 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 0,460 ± 0,055 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,504 ± 0,070 ops/ms\r\n\r\n Benchmark result is saved to /home/frederic/Waarp2/netty/microbench/target/reports/performance/HttpPostMultipartRequestDecoderBenchmark.json\r\n [835,175s][info ][gc,heap,exit ] Heap\r\n [835,175s][info ][gc,heap,exit ] garbage-first heap total 258048K, used 55694K [0x0000000707800000, 0x0000000800000000)\r\n [835,175s][info ][gc,heap,exit ] region size 1024K, 54 young (55296K), 2 survivors (2048K)\r\n [835,175s][info ][gc,heap,exit ] Metaspace used 9371K, capacity 9763K, committed 10112K, reserved 1058816K\r\n [835,175s][info ][gc,heap,exit ] class space used 1036K, capacity 1132K, committed 1152K, reserved 1048576K\r\n\r\n- V64:\r\n\r\n Benchmark Mode Cnt Score Error Units\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 5,318 ± 0,199 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 4,978 ± 1,401 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,620 ± 0,164 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 4,362 ± 0,355 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 1,900 ± 0,619 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 2,463 ± 0,176 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,146 ± 0,014 ops/ms\r\n HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 2,352 ± 0,139 ops/ms\r\n\r\n Benchmark result is saved to /home/frederic/Waarp2/netty/microbench/target/reports/performance/HttpPostMultipartRequestDecoderBenchmark.json\r\n [811,531s][info ][gc,heap,exit ] Heap\r\n [811,531s][info ][gc,heap,exit ] garbage-first heap total 258048K, used 56735K [0x0000000707800000, 0x0000000800000000)\r\n [811,531s][info ][gc,heap,exit ] region size 1024K, 55 young (56320K), 2 survivors (2048K)\r\n [811,531s][info ][gc,heap,exit ] Metaspace used 9349K, capacity 9715K, committed 9856K, reserved 1058816K\r\n [811,531s][info ][gc,heap,exit ] class space used 1035K, capacity 1132K, committed 1152K, reserved 1048576K\r\n\r\nSo roughly 10 times bette with V64.\r\n\r\nSo I cannot explain this performance issue.\r\n", "@doom369 \r\nAs I canot reproduce within Netty, I can try to execute manually (thourgh IntellJ) the test from micronaut to see exactly whats is going on on your side.\r\n\r\nCould you give me the setup and which test to run?", "@fredericBregier @doom369 \r\nI've given a quick look at the flamegraphs from @doom369 and, as the author of the optimized index of, I can say:\r\n\r\n- it could be an issue with index of implementation, failing to find some specific byte and leaving the algo to go into some weird logic code path\r\n- it could be an uncovered logical (corner) case with the find delimiter logic and/or caller, making it to call index of more and more (or on larger and larger chunks)\r\n\r\nThe former should be saved thanks to the coverage we already have on the test suite re index of.\r\nThe latter seems the most realistic candidate, although it's surprising there is no way to isolate this with vanilla Netty.", "@franz1981 I agree, so the reason I would try to check manually the test from Micronaut under IntellJ to check the underlying corner that I can't fiigure out within vanilla Netty.\r\n\r\n@doom369 So the reason I would like some help (guideline) to run this (or those) test(s) (which ones?) from Micronaut on my own to make debugging and trace.\r\nIt was the case for instance from another bug (fixed recently) from Micronaut too, but I was able to find out the logic in vanilla Netty.\r\nMy goal is to be able to reproduce within Micronaut, then within vanilla Netty.", "@fabienrenaud I would be able to test only on Saturday. I'm not familiar with Micronaut as well so It will require some time to set up it with the snapshot version. I just run the provided reproducer from the initial desctioption, I was expecting to see some obvious thing to fix :). But looks like that not the case.", "@doom369 @fredericBregier Once you have Netty in your local maven cache its simply a matter of changing the `build.gradle` file to point to that version.\r\n\r\nJust change the string here https://github.com/jameskleeh/netty-memory-leak/blob/upload_perf/build.gradle#L28 then follow the steps in the README to run the app and upload a file:\r\n\r\nhttps://github.com/jameskleeh/netty-memory-leak/blob/upload_perf/README.md\r\n", "@doom369 @jameskleeh @franz1981 \r\nI did run your code: with a file of 25 MB (zip file of 25577548 bytes)\r\n- vs 59 : 0.727s at best each curl (the 5th through a memory Direct buffer exception)\r\n- vs 64 : 0.115s each curl in average with 1000 curl command (with a for)\r\n\r\nSo first the time is good with v64.\r\n\r\nNow here are the flame and tree files.\r\n\r\n[profile-micronaut-netty.zip](https://github.com/netty/netty/files/6388095/profile-micronaut-netty.zip)\r\n\r\nWhat I see:\r\n- V59 : 56% from loadDataMultipartStandard with self 8.45%\r\n\r\n [53] 56.41% 387 self: 8.45% 58 io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartStandard\r\n\r\n- V64: 32% from loadDataMultipartOptimized with self 0.36% and findDelimiter 1.08%, so almost half\r\n\r\n [52] 32.43% 180 self: 0.36% 2 io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.loadDataMultipartOptimized\r\n [53] 25.23% 140 self: 1.08% 6 io/netty/handler/codec/http/multipart/HttpPostBodyUtil.findDelimiter\r\n\r\nSo I can't still explain what you have since even with your reproducer, I see no issue.\r\nI surely missed something.", "Just given a quick look at the flames from @fredericBregier and the sample count match my expectation (if I am reading it right) for cases where finding the delimiter is the bottleneck, see results on https://github.com/netty/netty/pull/10737#issuecomment-720481198 with high number of permutations and input size: the SWAR index of is at best twice as faster then reading single byte each time (for 32 long inputs, yet to see with more).\r\nThe samples on the flamegraphs are 381 samples loadDataMultiPartStandard vs 175 samples for loadDataMultiPartOptimized, meaning that the time spent on it is halved (ie assuming the same outcomes and the amount of work performed during the elapsed time is the same, it consumes half CPU time and/or is twice faster). Results related to the elapsed time shows 6X shorter elapsed time to perform the same work, so probably the gap is far more then the expected 2X because the original readByte used on V59 was getting a bound check + ref cnt check for each byte read.\r\n\r\nI am a bit surprised how GC is stressed by 59 and how different is the I/O utilization of the 2 versions for both file read and networking. \r\n", "@fredericBregier It may be that the changes that were made after 63 was released fixed the issue. Can you verify you see the problem with 63?", "@jameskleeh I will check when I have some time, but yes, I will ! ;-)", "The result with V63 is very bad:\r\n- v63 : 61.500s in average\r\n\r\nThe flame (with 5 transfers with the very same file):\r\n\r\n[profile-micronaut-netty63-flame.zip](https://github.com/netty/netty/files/6392779/profile-micronaut-netty63-flame.zip)\r\n\r\nIt seems the change made in V64 are very critical (probably linked to both previous patch on your 15 days first ssue and your 2 last current issues).", "@fredericBregier Then what is the chance we can get a 64 release in the near future?", "@jameskleeh pretty good :) I think next week should be possible easily ", "@fredericBregier do you know which change ? Also did the testing include your pending PR or not ?", "@normanmaurer The tests are including the 3 PR:\r\n- the current one: #11188 (Fix Memory release not correctly in Multipart Decoder)\r\n- the 2 weeks ago one: #11145 (Fix behavior of HttpPostMultipartRequestDecoder for Memory based Factory)\r\n- yours #11207 (Destroy HttpPostMultipartRequestDecoder if contructor throws)\r\n\r\nThe changes are about:\r\n- speed up in CRLF finding (and fix) within `loadDataMultipartOptimized` (HttpPostMultipartRequestDecoder) and `findLastLineBreak` (HttpPostBodyUtil)\r\n- freeing Memory each time possible (mainly was in constructor but you did included it in another one and also in `destroy` (HttpPostMultipartRequestDecoder)", "@normanmaurer Do you know when 64 will go out? I'd like to do a new release of Micronaut with it asap", "@jameskleeh next week" ]
[]
"2021-04-28T09:01:13Z"
[]
File upload extremely slow
### Expected behavior Uploading a file should be almost instantaneous in a local environment ### Actual behavior Uploading a file is taking approximately 1 minute ### Steps to reproduce - `./gradlew run` - Upload a 25-30mb file (the project README has an example curl) ### Minimal yet complete reproducer code (or URL to code) https://github.com/jameskleeh/netty-memory-leak Checkout the `upload_perf` branch ### Netty version This started happening in 4.1.60 and is present in 4.1.63 ### JVM version (e.g. `java -version`) JDK 8 ### OS version (e.g. `uname -a`) Darwin MacBook-Pro.local 20.3.0 Darwin Kernel Version 20.3.0: Thu Jan 21 00:07:06 PST 2021; root:xnu-7195.81.3~1/RELEASE_X86_64 x86_64
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index af3b5022a23..5099ee60ac4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -31,6 +31,7 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException; import io.netty.util.CharsetUtil; import io.netty.util.internal.InternalThreadLocalMap; +import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import java.io.IOException; @@ -199,12 +200,17 @@ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest requ } currentStatus = MultiPartStatus.HEADERDELIMITER; - if (request instanceof HttpContent) { - // Offer automatically if the given request is als type of HttpContent - // See #1089 - offer((HttpContent) request); - } else { - parseBody(); + try { + if (request instanceof HttpContent) { + // Offer automatically if the given request is als type of HttpContent + // See #1089 + offer((HttpContent) request); + } else { + parseBody(); + } + } catch (Throwable e) { + destroy(); + PlatformDependent.throwException(e); } }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java index 696c2cb20a9..419e93beb1d 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java @@ -15,11 +15,13 @@ */ package io.netty.handler.codec.http.multipart; +import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.CharsetUtil; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -55,4 +57,28 @@ public void testDecodeFullHttpRequestWithInvalidCharset() { assertTrue(req.release()); } } + + @Test + public void testDecodeFullHttpRequestWithInvalidPayloadReleaseBuffer() { + String content = "\n--861fbeab-cd20-470c-9609-d40a0f704466\n" + + "Content-Disposition: form-data; name=\"image1\"; filename*=\"'some.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: 1\n" + + "x\n" + + "--861fbeab-cd20-470c-9609-d40a0f704466--\n"; + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload", + Unpooled.copiedBuffer(content, CharsetUtil.US_ASCII)); + req.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + req.headers().set("content-length", content.length()); + + try { + new HttpPostMultipartRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException expected) { + // expected + } finally { + assertTrue(req.release()); + } + } }
test
test
"2021-04-27T16:37:37"
"2021-04-20T16:40:17Z"
jameskleeh
val
netty/netty/11240_11245
netty/netty
netty/netty/11240
netty/netty/11245
[ "keyword_pr_to_issue" ]
d984d4422ca36aac4ddcdb54362f12b589e52f71
11e6a77fba9ec7184a558d869373d0ce506d7236
[ "@normanmaurer I can provide a PR for this.", "@hyperxpro sure lets do it :)" ]
[ "I think to be safe you want to use something like :\n\nreplaceAll(\"[^\\\\w.-]\", \"_\");", "With: `fqdn = fqdn.replaceFirst(\"\\\\*\", \"X\");`, Output: `X.shieldblaze.com`\r\nWith: `fqdn = fqdn.replaceAll(\"[^\\\\w.-]\", \"_\");`, Output: `_.shieldblaze.com`\r\n\r\nBoth are acceptable in terms of safety. However, we're already appending `_` (underscore) after `keyutil`. We'll end up with 2 underscores.\r\nhttps://github.com/netty/netty/blob/86765c840cf85284780a0be90191fc530c73ce05/handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java#L368\r\n\r\nWhich one is good? @normanmaurer @chrisvest ", "I think we should use `fqdn.replaceAll(\"[^\\\\w.-]\", \"x\")`", "Done, PTAL.", "why only `replaceFirst` and not `replaceAll` ? ", "Well, wildcards are expected to have an asterisk at first char. Like this: `*.netty.io`. However, invalid fqdn such as `*.me*ow.io` is an invalid hostname. And `SelfSignedCertificate` must throw an exception.", "true... that said I think it may be better safe then sorry and so I would just use `replaceAll` and call it a day ", "Alright, will change to `replaceAll`. :)" ]
"2021-05-11T16:10:17Z"
[]
SelfSignedCertificate should use valid characters for file name
### Expected behavior `SelfSignedCertificate` should filter out asterisk `*` while saving certificate and private key file. ### Actual behavior `SelfSignedCertificate` creates a certificate and private key files and store them in a temporary directory. However, if the certificate uses a wildcard hostname that uses asterisk `*`, e.g. `*.shieldblaze.com`, it'll throw an error because `*` is not a valid character in the file system. ``` 11 May 2021 14:34:22.457 [main] DEBUG i.n.h.s.u.SelfSignedCertificate - Failed to generate a self-signed X.509 certificate using sun.security.x509: java.nio.file.InvalidPathException: Illegal char <*> at index 8: keyutil_*.shieldblaze.com_4376271060691248819.key at sun.nio.fs.WindowsPathParser.normalize(WindowsPathParser.java:182) ~[?:?] at sun.nio.fs.WindowsPathParser.parse(WindowsPathParser.java:153) ~[?:?] at sun.nio.fs.WindowsPathParser.parse(WindowsPathParser.java:77) ~[?:?] at sun.nio.fs.WindowsPath.parse(WindowsPath.java:92) ~[?:?] at sun.nio.fs.WindowsFileSystem.getPath(WindowsFileSystem.java:229) ~[?:?] at java.nio.file.TempFileHelper.generatePath(TempFileHelper.java:59) ~[?:?] at java.nio.file.TempFileHelper.create(TempFileHelper.java:126) ~[?:?] at java.nio.file.TempFileHelper.createTempFile(TempFileHelper.java:160) ~[?:?] at java.nio.file.Files.createTempFile(Files.java:913) ~[?:?] at io.netty.util.internal.PlatformDependent.createTempFile(PlatformDependent.java:1423) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.newSelfSignedCertificate(SelfSignedCertificate.java:335) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator.generate(OpenJdkSelfSignedCertGenerator.java:82) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:241) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:167) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:137) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at com.shieldblaze.expressgateway.configuration.tls.TLSConfigurationTest.addMappingWildcardTest(TLSConfigurationTest.java:84) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:566) ~[?:?] at org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:688) ~[junit-platform-commons-1.7.0.jar:1.7.0] at org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:149) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:140) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:84) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(ExecutableInvoker.java:115) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.lambda$invoke$0(ExecutableInvoker.java:105) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:104) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:98) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.lambda$invokeTestMethod$6(TestMethodTestDescriptor.java:210) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.invokeTestMethod(TestMethodTestDescriptor.java:206) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:131) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:65) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:139) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at java.util.ArrayList.forEach(ArrayList.java:1541) ~[?:?] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:143) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at java.util.ArrayList.forEach(ArrayList.java:1541) ~[?:?] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:143) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.submit(SameThreadHierarchicalTestExecutorService.java:32) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor.execute(HierarchicalTestExecutor.java:57) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine.execute(HierarchicalTestEngine.java:51) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:108) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:88) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.lambda$execute$0(EngineExecutionOrchestrator.java:54) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.withInterceptedStreams(EngineExecutionOrchestrator.java:67) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:52) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:96) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:75) [junit-platform-launcher-1.7.0.jar:1.7.0] at com.intellij.junit5.JUnit5IdeaTestRunner.startRunnerWithArgs(JUnit5IdeaTestRunner.java:71) [junit5-rt.jar:?] at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33) [junit-rt.jar:?] at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:221) [junit-rt.jar:?] at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54) [junit-rt.jar:?] 11 May 2021 14:34:24.018 [main] DEBUG i.n.h.s.u.SelfSignedCertificate - Failed to generate a self-signed X.509 certificate using Bouncy Castle: java.nio.file.InvalidPathException: Illegal char <*> at index 8: keyutil_*.shieldblaze.com_2205665342087087069.key at sun.nio.fs.WindowsPathParser.normalize(WindowsPathParser.java:182) ~[?:?] at sun.nio.fs.WindowsPathParser.parse(WindowsPathParser.java:153) ~[?:?] at sun.nio.fs.WindowsPathParser.parse(WindowsPathParser.java:77) ~[?:?] at sun.nio.fs.WindowsPath.parse(WindowsPath.java:92) ~[?:?] at sun.nio.fs.WindowsFileSystem.getPath(WindowsFileSystem.java:229) ~[?:?] at java.nio.file.TempFileHelper.generatePath(TempFileHelper.java:59) ~[?:?] at java.nio.file.TempFileHelper.create(TempFileHelper.java:126) ~[?:?] at java.nio.file.TempFileHelper.createTempFile(TempFileHelper.java:160) ~[?:?] at java.nio.file.Files.createTempFile(Files.java:913) ~[?:?] at io.netty.util.internal.PlatformDependent.createTempFile(PlatformDependent.java:1423) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.newSelfSignedCertificate(SelfSignedCertificate.java:335) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator.generate(BouncyCastleSelfSignedCertGenerator.java:60) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:246) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:167) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at io.netty.handler.ssl.util.SelfSignedCertificate.<init>(SelfSignedCertificate.java:137) ~[netty-all-4.1.63.Final.jar:4.1.63.Final] at com.shieldblaze.expressgateway.configuration.tls.TLSConfigurationTest.addMappingWildcardTest(TLSConfigurationTest.java:84) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:566) ~[?:?] at org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:688) ~[junit-platform-commons-1.7.0.jar:1.7.0] at org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:149) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:140) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:84) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(ExecutableInvoker.java:115) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.lambda$invoke$0(ExecutableInvoker.java:105) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:104) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:98) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.lambda$invokeTestMethod$6(TestMethodTestDescriptor.java:210) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.invokeTestMethod(TestMethodTestDescriptor.java:206) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:131) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:65) ~[junit-jupiter-engine-5.7.0.jar:5.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:139) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at java.util.ArrayList.forEach(ArrayList.java:1541) ~[?:?] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:143) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at java.util.ArrayList.forEach(ArrayList.java:1541) ~[?:?] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:143) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:129) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:127) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:126) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:84) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.submit(SameThreadHierarchicalTestExecutorService.java:32) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor.execute(HierarchicalTestExecutor.java:57) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine.execute(HierarchicalTestEngine.java:51) ~[junit-platform-engine-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:108) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:88) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.lambda$execute$0(EngineExecutionOrchestrator.java:54) ~[junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.withInterceptedStreams(EngineExecutionOrchestrator.java:67) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:52) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:96) [junit-platform-launcher-1.7.0.jar:1.7.0] at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:75) [junit-platform-launcher-1.7.0.jar:1.7.0] at com.intellij.junit5.JUnit5IdeaTestRunner.startRunnerWithArgs(JUnit5IdeaTestRunner.java:71) [junit5-rt.jar:?] at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33) [junit-rt.jar:?] at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:221) [junit-rt.jar:?] at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54) [junit-rt.jar:?] ``` ### Steps to reproduce ```java SelfSignedCertificate ssc = new SelfSignedCertificate("*.shieldblaze.com", "EC", 256); ``` ### Netty version 4.1.63.Final ### JVM version (e.g. `java -version`) openjdk version "1.8.0_292" OpenJDK Runtime Environment Corretto-8.292.10.1 (build 1.8.0_292-b10) OpenJDK 64-Bit Server VM Corretto-8.292.10.1 (build 25.292-b10, mixed mode) ### OS version (e.g. `uname -a`) Windows 10 Pro x64
[ "handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java" ]
[ "handler/src/test/java/io/netty/handler/ssl/util/SelfSignedCertificateTest.java" ]
diff --git a/handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java b/handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java index 46a28d272f6..be57cd60caf 100644 --- a/handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java +++ b/handler/src/main/java/io/netty/handler/ssl/util/SelfSignedCertificate.java @@ -332,6 +332,9 @@ static String[] newSelfSignedCertificate( wrappedBuf.release(); } + // Change all asterisk to 'x' for file name safety. + fqdn = fqdn.replaceAll("[^\\w.-]", "x"); + File keyFile = PlatformDependent.createTempFile("keyutil_" + fqdn + '_', ".key", null); keyFile.deleteOnExit();
diff --git a/handler/src/test/java/io/netty/handler/ssl/util/SelfSignedCertificateTest.java b/handler/src/test/java/io/netty/handler/ssl/util/SelfSignedCertificateTest.java new file mode 100644 index 00000000000..59ca37635bc --- /dev/null +++ b/handler/src/test/java/io/netty/handler/ssl/util/SelfSignedCertificateTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.ssl.util; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; + +import java.security.cert.CertificateException; + +import static org.junit.jupiter.api.Assertions.*; + +class SelfSignedCertificateTest { + + @Test + void fqdnAsteriskDoesNotThrowTest() { + assertDoesNotThrow(new Executable() { + @Override + public void execute() throws Throwable { + new SelfSignedCertificate("*.netty.io", "EC", 256); + } + }); + + assertDoesNotThrow(new Executable() { + @Override + public void execute() throws Throwable { + new SelfSignedCertificate("*.netty.io", "RSA", 2048); + } + }); + } + + @Test + void fqdnAsteriskFileNameTest() throws CertificateException { + SelfSignedCertificate ssc = new SelfSignedCertificate("*.netty.io", "EC", 256); + assertFalse(ssc.certificate().getName().contains("*")); + assertFalse(ssc.privateKey().getName().contains("*")); + } +}
train
test
"2021-05-11T14:07:15"
"2021-05-11T09:11:56Z"
hyperxpro
val
netty/netty/10834_11246
netty/netty
netty/netty/10834
netty/netty/11246
[ "keyword_pr_to_issue" ]
d984d4422ca36aac4ddcdb54362f12b589e52f71
fa8f7a3510d0dbb57488272cf5ba92cca8076794
[ "That seems none of Netty`s business, cause just connect 127.0.0.1:37655 successfully but not 127.0.1.1:37655", "@violetagg can you connect to the service via telnet on `127.0.1.1` ?", "> @violetagg can you connect to the service via telnet on `127.0.1.1` ?\r\n\r\n```\r\n# python -m SimpleHTTPServer 8000 &> /dev/null & pid=$!\r\n[1] 1143\r\n# telnet 127.0.1.1 8000\r\nTrying 127.0.1.1...\r\nConnected to 127.0.1.1.\r\nEscape character is '^]'.\r\n^C\r\nConnection closed by foreign host.\r\n# telnet 127.0.0.1 8000\r\nTrying 127.0.0.1...\r\nConnected to 127.0.0.1.\r\nEscape character is '^]'.\r\n^C\r\nConnection closed by foreign host.\r\n# telnet localhost 8000\r\nTrying 127.0.0.1...\r\nConnected to localhost.\r\nEscape character is '^]'.\r\n^C\r\nConnection closed by foreign host.\r\n```", "Thanks, @violetagg. I've also added the test case to [the repo](https://github.com/hisener/playground/blob/master/spring-webclient-connection-failure-test/src/test/java/com/github/hisener/spring/webclient/WebClientTest.java#L83). Do you have some time to have a look at this issue? @normanmaurer \r\n\r\n```\r\n[ERROR] Failures: \r\n[ERROR] WebClientTest.reactorNetty expectation \"assertNext\" failed (expected: onNext(); actual: onError(io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: localhost/127.0.1.1:42891))\r\n[ERROR] WebClientTest.webClient expectation \"expectComplete\" failed (expected: onComplete(); actual: onError(org.springframework.web.reactive.function.client.WebClientRequestException: finishConnect(..) failed: Connection refused: localhost/127.0.1.1:42891; nested exception is io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: localhost/127.0.1.1:42891))\r\n[ERROR] Errors: \r\n[ERROR] WebClientTest.nettyClient » AnnotatedConnect Connection refused: localhost/127...\r\n[INFO] \r\n[ERROR] Tests run: 4, Failures: 2, Errors: 1, Skipped: 0\r\n```\r\nhttps://travis-ci.com/github/hisener/playground/builds/209949385#L1163-L1169", "@normanmaurer The current Netty implementations returns always only one address for `localhost` when `resolveAll` is invoked. If Netty is able to return all resolved addresses for `localhost`, Reactor Netty will be able to iterate over the result.\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java#L46-L63\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java#L1023\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java#L684-L697\r\n\r\nAlso JDK's `HostsFileNameService` returns all entries IPv4/IPv6 for `localhost`, while Netty's `HostsFileParser` always returns the first for IPv4/IPv6.\r\nhttps://github.com/openjdk/jdk/blob/e8eda655bb27ab1b8d806a64622ea7ee9719a85d/src/java.base/share/classes/java/net/InetAddress.java#L1047-L1054\r\n\r\nhttps://github.com/netty/netty/blob/6724786dcc9fa38ba516dab97a04e2bbc17e81d9/resolver/src/main/java/io/netty/resolver/HostsFileParser.java#L198-L209\r\n\r\n\r\nHere is a simple test and its results:\r\n\r\n```\r\n@Test\r\nvoid test() throws Exception {\r\n InetAddress[] addresses = InetAddress.getAllByName(\"localhost\");\r\n System.out.println(\"JDK \" + Arrays.asList(addresses));\r\n\r\n\r\n NioEventLoopGroup group = new NioEventLoopGroup();\r\n try {\r\n DefaultAddressResolverGroup.INSTANCE\r\n .getResolver(group.next())\r\n .resolveAll(InetSocketAddress.createUnresolved(\"localhost\", 80))\r\n .addListener(future -> System.out.println(\"Netty DefaultAddressResolverGroup \" + future.get()));\r\n\r\n DnsNameResolverBuilder builder = new DnsNameResolverBuilder(group.next())\r\n .channelType(NioDatagramChannel.class);\r\n DnsNameResolver resolver = builder.build();\r\n resolver.resolveAll(\"localhost\")\r\n .addListener(future -> System.out.println(\"Netty DnsNameResolver \" + future.get()));\r\n }\r\n finally {\r\n group.shutdownGracefully();\r\n }\r\n}\r\n```\r\n\r\n```\r\nJDK [localhost/127.0.0.1, localhost/127.0.1.1]\r\nNetty DefaultAddressResolverGroup [localhost/127.0.0.1:80, localhost/127.0.1.1:80]\r\nNetty DnsNameResolver [localhost/127.0.1.1]\r\n```\r\n\r\nAlso I'm seeing that when I use JDK's `PlatformNameService`, the result is sorted, while when I force the usage of `HostsFileNameService`, they are not.\r\n\r\nSo I need a method that returns all entries for `localhost` like in JDK's implementation (preferably sorted). If it is not possible to modify the current interface (io.netty.resolver.HostsFileEntriesResolver), is it possible to introduce a new one? May be even `completeOncePreferredResolved` can be used.", "@violetagg I think we don't want to break the API ... So maybe add a \"sup-interface\" and implement it in our implementation ? We could expose a method like `List<InetAddress> addressess(...)` or something like this ", "or we could just add an extra method to DefaultHostsFileEntriesResolver and do an instanceof check and call the extra method. ", "> or we could just add an extra method to DefaultHostsFileEntriesResolver and do an instanceof check and call the extra method.\r\n\r\nEven with `sup-interface` we will need `instance of` so may be let's add this functionality to `DefaultHostsFileEntriesResolver`, if later somebody wants also such functionality, we can always add `sup-interface`", "I see similar weird behavior when using Redisson 3.15.3 on top of Netty. Netty says:\r\n```\r\n1376 [main] [DEBUG] Loopback interface: lo1 (lo1, 127.0.1.1)\r\n```\r\nwhile `ifconfig` says:\r\n```\r\n$ ifconfig lo0\r\nlo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384\r\n options=680003<RXCSUM,TXCSUM,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6>\r\n inet6 ::1 prefixlen 128\r\n inet6 fe80::1%lo0 prefixlen 64 scopeid 0x5\r\n inet 127.0.0.1 netmask 0xff000000\r\n groups: lo\r\n nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>\r\nosipovmi@deblndw013x:~/var/Projekte/MRESOLVER-25\r\n$ ifconfig lo1\r\nlo1: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384\r\n options=680003<RXCSUM,TXCSUM,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6>\r\n inet 127.0.1.1 netmask 0xffffffff\r\n groups: lo\r\n nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>\r\n```\r\nNote that host `deblndw013x` uses `lo0` while `lo1` is a cloned loopback interface passed onto jails with a shared network stack as the jailhost.\r\n\r\nRedis listens on:\r\n```\r\n$ sockstat | grep redis\r\nmenz_i redis-serv 77878 6 tcp4 127.0.0.1:6379 *:*\r\n```", "@normanmaurer PTAL #11246 , I made some tricks to keep the backwards compatibility." ]
[ "nit: you can remove the else as we return in the if.", "nit: should we use a different initial capacity ?", "please add javadocs", "imho it would be better to use `new ArrayList<InetAddress>(a.size() + (b == null ? 0 : b.size()));`. Otherwise you may need to expand the array after creation ", "imho its a bit strange that you can modify the `ParserImpl` and `parse()` depends on the \"state\". Either make this a proper builder or change `parse(...)` to take arguments (which I think may be the best). ", "I was thinking about that but it is not guaranteed that for every `InetAddress` from `hostsFileEntries` we will create a `DnsRecord`", "fixed", "fixed", "fixed", "fixed", "@violetagg I think the two methods above should not exists but this should be just arguments to `parse`", "Not really fixed... I think both methods above should also not exist.", "So instead of configuration I should add the same signature as in `HostsFileParser`, is that correct?\r\n\r\n```\r\nparseSilently()\r\nparseSilently(Charset... charsets)\r\nparse()\r\nparse(File file)\r\nparse(File file, Charset... charsets)\r\n```", "yes... Or alternative just one which take all parameter and allow \"null\".", "Please check the new commit", "nit: either we should tell that this method returns `null` as well or return an empty List in this case. ", "fixed\r\n", "@violetagg question... should we use a `LinkedHashMap` here or doesn't it matter ?", "I don't think that we have a use case for `LinkedHashMap` where we will need the insertion order. We always need the info for one particular `hostname`", "nit: Since it is used more than once and has context in comments, should we extract this longish condition and comment in a method like `isLocalhost()`? ", "let me do that", "please check the new commit" ]
"2021-05-11T18:25:27Z"
[]
AnnotatedConnectException: Connection refused: localhost/127.0.1.1:37655
When there are more than one entries for localhost in `/etc/hosts`, connection is refused. This happens only when using `DnsAddressResolverGroup`. If `DefaultAddressResolverGroup` is used there is no problem with connection establishment. ``` # /etc/hosts 127.0.1.1 localhost 127.0.0.1 localhost ``` If `127.0.0.1` is first then there is no problem ``` # /etc/hosts 127.0.0.1 localhost 127.0.1.1 localhost ``` Original issue https://github.com/reactor/reactor-netty/issues/1405 ### Expected behavior Connection to be established regardless of the `AddressResolverGroup` that is used. ### Actual behavior Exception is thrown with `DnsAddressResolverGroup`: `AnnotatedConnectException: Connection refused: localhost/127.0.1.1:37655` ### Steps to reproduce A reproducible example and steps are described in the issue https://github.com/reactor/reactor-netty/issues/1405 I added additional test to the existing test cases here https://github.com/hisener/playground/blob/master/spring-webclient-connection-failure-test/src/test/java/com/github/hisener/spring/webclient/WebClientTest.java ``` @RepeatedTest(10) void nettyClient() throws Exception { NioEventLoopGroup group = new NioEventLoopGroup(); try { Bootstrap bootstrap = new Bootstrap(); bootstrap.group(group) .channel(NioSocketChannel.class) // There is no issue with DefaultAddressResolverGroup //.resolver(DefaultAddressResolverGroup.INSTANCE) .resolver(new DnsAddressResolverGroup(new DnsNameResolverBuilder(group.next()).channelType(NioDatagramChannel.class))) .handler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel socketChannel) { socketChannel.pipeline() .addLast(new HttpClientCodec()); } }); Channel ch = bootstrap.connect(baseUrl.getHost(), baseUrl.getPort()) .sync() .channel(); ch.close(); } finally { group.shutdownGracefully(); } } ``` ### Netty version 4.1.55.Final-SNAPSHOT ### JVM version (e.g. `java -version`) openjdk version "11.0.9.1" 2020-11-04 OpenJDK Runtime Environment 18.9 (build 11.0.9.1+1) OpenJDK 64-Bit Server VM 18.9 (build 11.0.9.1+1, mixed mode) ### OS version (e.g. `uname -a`) Linux b732417723fd 5.4.39-linuxkit #1 SMP Fri May 8 23:03:06 UTC 2020 x86_64 GNU/Linux
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java", "resolver/src/main/java/io/netty/resolver/HostsFileEntries.java", "resolver/src/main/java/io/netty/resolver/HostsFileParser.java" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java", "resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java", "resolver/src/main/java/io/netty/resolver/HostsFileEntries.java", "resolver/src/main/java/io/netty/resolver/HostsFileEntriesProvider.java", "resolver/src/main/java/io/netty/resolver/HostsFileParser.java" ]
[ "resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java", "resolver/src/test/java/io/netty/resolver/HostsFileEntriesProviderTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index abef31287cc..da64a8b1c21 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -128,7 +128,7 @@ public void applyTo(BlockHound.Builder builder) { "parseEtcResolverOptions"); builder.allowBlockingCallsInside( - "io.netty.resolver.HostsFileParser", + "io.netty.resolver.HostsFileEntriesProvider$ParserImpl", "parse"); builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() { diff --git a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java index a3a514f2039..76518f188c0 100644 --- a/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java +++ b/resolver-dns/src/main/java/io/netty/resolver/dns/DnsNameResolver.java @@ -46,6 +46,7 @@ import io.netty.handler.codec.dns.DnsResponse; import io.netty.handler.codec.dns.TcpDnsQueryEncoder; import io.netty.handler.codec.dns.TcpDnsResponseDecoder; +import io.netty.resolver.DefaultHostsFileEntriesResolver; import io.netty.resolver.HostsFileEntries; import io.netty.resolver.HostsFileEntriesResolver; import io.netty.resolver.InetNameResolver; @@ -693,20 +694,38 @@ protected EventLoop executor() { private InetAddress resolveHostsFileEntry(String hostname) { if (hostsFileEntriesResolver == null) { return null; + } + InetAddress address = hostsFileEntriesResolver.address(hostname, resolvedAddressTypes); + return address == null && isLocalWindowsHost(hostname) ? LOCALHOST_ADDRESS : address; + } + + private List<InetAddress> resolveHostsFileEntries(String hostname) { + if (hostsFileEntriesResolver == null) { + return null; + } + List<InetAddress> addresses; + if (hostsFileEntriesResolver instanceof DefaultHostsFileEntriesResolver) { + addresses = ((DefaultHostsFileEntriesResolver) hostsFileEntriesResolver) + .addresses(hostname, resolvedAddressTypes); } else { InetAddress address = hostsFileEntriesResolver.address(hostname, resolvedAddressTypes); - if (address == null && PlatformDependent.isWindows() && - (LOCALHOST.equalsIgnoreCase(hostname) || - (WINDOWS_HOST_NAME != null && WINDOWS_HOST_NAME.equalsIgnoreCase(hostname)))) { - // If we tried to resolve localhost we need workaround that windows removed localhost from its - // hostfile in later versions. - // See https://github.com/netty/netty/issues/5386 - // Need a workaround for resolving the host (computer) name in case it cannot be resolved from hostfile - // See https://github.com/netty/netty/issues/11142 - return LOCALHOST_ADDRESS; - } - return address; + addresses = address != null ? Collections.singletonList(address) : null; } + return addresses == null && isLocalWindowsHost(hostname) ? + Collections.singletonList(LOCALHOST_ADDRESS) : addresses; + } + + /** + * Checks whether the given hostname is the localhost/host (computer) name on Windows OS. + * Windows OS removed the localhost/host (computer) name information from the hosts file in the later versions + * and such hostname cannot be resolved from hosts file. + * See https://github.com/netty/netty/issues/5386 + * See https://github.com/netty/netty/issues/11142 + */ + private static boolean isLocalWindowsHost(String hostname) { + return PlatformDependent.isWindows() && + (LOCALHOST.equalsIgnoreCase(hostname) || + (WINDOWS_HOST_NAME != null && WINDOWS_HOST_NAME.equalsIgnoreCase(hostname))); } /** @@ -840,24 +859,29 @@ private Future<List<DnsRecord>> resolveAll(DnsQuestion question, DnsRecord[] add final String hostname = question.name(); if (type == DnsRecordType.A || type == DnsRecordType.AAAA) { - final InetAddress hostsFileEntry = resolveHostsFileEntry(hostname); - if (hostsFileEntry != null) { - ByteBuf content = null; - if (hostsFileEntry instanceof Inet4Address) { - if (type == DnsRecordType.A) { - content = Unpooled.wrappedBuffer(hostsFileEntry.getAddress()); + final List<InetAddress> hostsFileEntries = resolveHostsFileEntries(hostname); + if (hostsFileEntries != null) { + List<DnsRecord> result = new ArrayList<DnsRecord>(); + for (InetAddress hostsFileEntry : hostsFileEntries) { + ByteBuf content = null; + if (hostsFileEntry instanceof Inet4Address) { + if (type == DnsRecordType.A) { + content = Unpooled.wrappedBuffer(hostsFileEntry.getAddress()); + } + } else if (hostsFileEntry instanceof Inet6Address) { + if (type == DnsRecordType.AAAA) { + content = Unpooled.wrappedBuffer(hostsFileEntry.getAddress()); + } } - } else if (hostsFileEntry instanceof Inet6Address) { - if (type == DnsRecordType.AAAA) { - content = Unpooled.wrappedBuffer(hostsFileEntry.getAddress()); + if (content != null) { + // Our current implementation does not support reloading the hosts file, + // so use a fairly large TTL (1 day, i.e. 86400 seconds). + result.add(new DefaultDnsRawRecord(hostname, type, 86400, content)); } } - if (content != null) { - // Our current implementation does not support reloading the hosts file, - // so use a fairly large TTL (1 day, i.e. 86400 seconds). - trySuccess(promise, Collections.<DnsRecord>singletonList( - new DefaultDnsRawRecord(hostname, type, 86400, content))); + if (!result.isEmpty()) { + trySuccess(promise, result); return promise; } } @@ -1033,9 +1057,9 @@ protected void doResolveAll(String inetHost, final String hostname = hostname(inetHost); - InetAddress hostsFileEntry = resolveHostsFileEntry(hostname); - if (hostsFileEntry != null) { - promise.setSuccess(Collections.singletonList(hostsFileEntry)); + List<InetAddress> hostsFileEntries = resolveHostsFileEntries(hostname); + if (hostsFileEntries != null) { + promise.setSuccess(hostsFileEntries); return; } diff --git a/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java b/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java index 5063c8006ed..378fd98d87e 100644 --- a/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java +++ b/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java @@ -18,10 +18,10 @@ import io.netty.util.CharsetUtil; import io.netty.util.internal.PlatformDependent; -import java.net.Inet4Address; -import java.net.Inet6Address; import java.net.InetAddress; import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -30,21 +30,47 @@ */ public final class DefaultHostsFileEntriesResolver implements HostsFileEntriesResolver { - private final Map<String, Inet4Address> inet4Entries; - private final Map<String, Inet6Address> inet6Entries; + private final Map<String, List<InetAddress>> inet4Entries; + private final Map<String, List<InetAddress>> inet6Entries; public DefaultHostsFileEntriesResolver() { this(parseEntries()); } // for testing purpose only - DefaultHostsFileEntriesResolver(HostsFileEntries entries) { - inet4Entries = entries.inet4Entries(); - inet6Entries = entries.inet6Entries(); + DefaultHostsFileEntriesResolver(HostsFileEntriesProvider entries) { + inet4Entries = entries.ipv4Entries(); + inet6Entries = entries.ipv6Entries(); } @Override public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddressTypes) { + String normalized = normalize(inetHost); + switch (resolvedAddressTypes) { + case IPV4_ONLY: + return firstAddress(inet4Entries.get(normalized)); + case IPV6_ONLY: + return firstAddress(inet6Entries.get(normalized)); + case IPV4_PREFERRED: + InetAddress inet4Address = firstAddress(inet4Entries.get(normalized)); + return inet4Address != null ? inet4Address : firstAddress(inet6Entries.get(normalized)); + case IPV6_PREFERRED: + InetAddress inet6Address = firstAddress(inet6Entries.get(normalized)); + return inet6Address != null ? inet6Address : firstAddress(inet4Entries.get(normalized)); + default: + throw new IllegalArgumentException("Unknown ResolvedAddressTypes " + resolvedAddressTypes); + } + } + + /** + * Resolves all addresses of a hostname against the entries in a hosts file, depending on the specified + * {@link ResolvedAddressTypes}. + * + * @param inetHost the hostname to resolve + * @param resolvedAddressTypes the address types to resolve + * @return all matching addresses or {@code null} in case the hostname cannot be resolved + */ + public List<InetAddress> addresses(String inetHost, ResolvedAddressTypes resolvedAddressTypes) { String normalized = normalize(inetHost); switch (resolvedAddressTypes) { case IPV4_ONLY: @@ -52,11 +78,13 @@ public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddress case IPV6_ONLY: return inet6Entries.get(normalized); case IPV4_PREFERRED: - Inet4Address inet4Address = inet4Entries.get(normalized); - return inet4Address != null? inet4Address : inet6Entries.get(normalized); + List<InetAddress> allInet4Addresses = inet4Entries.get(normalized); + return allInet4Addresses != null ? allAddresses(allInet4Addresses, inet6Entries.get(normalized)) : + inet6Entries.get(normalized); case IPV6_PREFERRED: - Inet6Address inet6Address = inet6Entries.get(normalized); - return inet6Address != null? inet6Address : inet4Entries.get(normalized); + List<InetAddress> allInet6Addresses = inet6Entries.get(normalized); + return allInet6Addresses != null ? allAddresses(allInet6Addresses, inet4Entries.get(normalized)) : + inet4Entries.get(normalized); default: throw new IllegalArgumentException("Unknown ResolvedAddressTypes " + resolvedAddressTypes); } @@ -67,13 +95,27 @@ String normalize(String inetHost) { return inetHost.toLowerCase(Locale.ENGLISH); } - private static HostsFileEntries parseEntries() { + private static List<InetAddress> allAddresses(List<InetAddress> a, List<InetAddress> b) { + List<InetAddress> result = new ArrayList<InetAddress>(a.size() + (b == null ? 0 : b.size())); + result.addAll(a); + if (b != null) { + result.addAll(b); + } + return result; + } + + private static InetAddress firstAddress(List<InetAddress> addresses) { + return addresses != null && !addresses.isEmpty() ? addresses.get(0) : null; + } + + private static HostsFileEntriesProvider parseEntries() { if (PlatformDependent.isWindows()) { // Ony windows there seems to be no standard for the encoding used for the hosts file, so let us // try multiple until we either were able to parse it or there is none left and so we return an - // empty intstance. - return HostsFileParser.parseSilently(Charset.defaultCharset(), CharsetUtil.UTF_16, CharsetUtil.UTF_8); + // empty instance. + return HostsFileEntriesProvider.parser() + .parseSilently(Charset.defaultCharset(), CharsetUtil.UTF_16, CharsetUtil.UTF_8); } - return HostsFileParser.parseSilently(); + return HostsFileEntriesProvider.parser().parseSilently(); } } diff --git a/resolver/src/main/java/io/netty/resolver/HostsFileEntries.java b/resolver/src/main/java/io/netty/resolver/HostsFileEntries.java index dd47bd4c1e2..7c795225520 100644 --- a/resolver/src/main/java/io/netty/resolver/HostsFileEntries.java +++ b/resolver/src/main/java/io/netty/resolver/HostsFileEntries.java @@ -22,7 +22,9 @@ import java.util.Map; /** - * A container of hosts file entries + * A container of hosts file entries. + * The mappings contain only the first entry per hostname. + * Consider using {@link HostsFileEntriesProvider} when mappings with all entries per hostname are needed. */ public final class HostsFileEntries { diff --git a/resolver/src/main/java/io/netty/resolver/HostsFileEntriesProvider.java b/resolver/src/main/java/io/netty/resolver/HostsFileEntriesProvider.java new file mode 100644 index 00000000000..8f0edb1fc6e --- /dev/null +++ b/resolver/src/main/java/io/netty/resolver/HostsFileEntriesProvider.java @@ -0,0 +1,311 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.resolver; + +import io.netty.util.NetUtil; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.Inet4Address; +import java.net.InetAddress; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.regex.Pattern; + +import static io.netty.util.internal.ObjectUtil.checkNotNull; + +/** + * A container of hosts file entries + */ +public final class HostsFileEntriesProvider { + + public interface Parser { + + /** + * Parses the hosts file at standard OS location using the system default {@link Charset} for decoding. + * + * @return a new {@link HostsFileEntriesProvider} + * @throws IOException file could not be read + */ + HostsFileEntriesProvider parse() throws IOException; + + /** + * Parses the hosts file at standard OS location using the given {@link Charset}s one after another until + * parse something or none is left. + * + * @param charsets the {@link Charset}s to try as file encodings when parsing + * @return a new {@link HostsFileEntriesProvider} + * @throws IOException file could not be read + */ + HostsFileEntriesProvider parse(Charset... charsets) throws IOException; + + /** + * Parses the provided hosts file using the given {@link Charset}s one after another until + * parse something or none is left. In case {@link Charset}s are not provided, + * the system default {@link Charset} is used for decoding. + * + * @param file the file to be parsed + * @param charsets the {@link Charset}s to try as file encodings when parsing, in case {@link Charset}s + * are not provided, the system default {@link Charset} is used for decoding + * @return a new {@link HostsFileEntriesProvider} + * @throws IOException file could not be read + */ + HostsFileEntriesProvider parse(File file, Charset... charsets) throws IOException; + + /** + * Performs the parsing operation using the provided reader of hosts file format. + * + * @param reader the reader of hosts file format + * @return a new {@link HostsFileEntriesProvider} + */ + HostsFileEntriesProvider parse(Reader reader) throws IOException; + + /** + * Parses the hosts file at standard OS location using the system default {@link Charset} for decoding. + * + * @return a new {@link HostsFileEntriesProvider} + */ + HostsFileEntriesProvider parseSilently(); + + /** + * Parses the hosts file at standard OS location using the given {@link Charset}s one after another until + * parse something or none is left. + * + * @param charsets the {@link Charset}s to try as file encodings when parsing + * @return a new {@link HostsFileEntriesProvider} + */ + HostsFileEntriesProvider parseSilently(Charset... charsets); + + /** + * Parses the provided hosts file using the given {@link Charset}s one after another until + * parse something or none is left. In case {@link Charset}s are not provided, + * the system default {@link Charset} is used for decoding. + * + * @param file the file to be parsed + * @param charsets the {@link Charset}s to try as file encodings when parsing, in case {@link Charset}s + * are not provided, the system default {@link Charset} is used for decoding + * @return a new {@link HostsFileEntriesProvider} + */ + HostsFileEntriesProvider parseSilently(File file, Charset... charsets); + } + + /** + * Creates a parser for {@link HostsFileEntriesProvider}. + * + * @return a new {@link HostsFileEntriesProvider.Parser} + */ + public static Parser parser() { + return new ParserImpl(); + } + + static final HostsFileEntriesProvider EMPTY = + new HostsFileEntriesProvider( + Collections.<String, List<InetAddress>>emptyMap(), + Collections.<String, List<InetAddress>>emptyMap()); + + private final Map<String, List<InetAddress>> ipv4Entries; + private final Map<String, List<InetAddress>> ipv6Entries; + + HostsFileEntriesProvider(Map<String, List<InetAddress>> ipv4Entries, Map<String, List<InetAddress>> ipv6Entries) { + this.ipv4Entries = Collections.unmodifiableMap(new HashMap<String, List<InetAddress>>(ipv4Entries)); + this.ipv6Entries = Collections.unmodifiableMap(new HashMap<String, List<InetAddress>>(ipv6Entries)); + } + + /** + * The IPv4 entries. + * + * @return the IPv4 entries + */ + public Map<String, List<InetAddress>> ipv4Entries() { + return ipv4Entries; + } + + /** + * The IPv6 entries. + * + * @return the IPv6 entries + */ + public Map<String, List<InetAddress>> ipv6Entries() { + return ipv6Entries; + } + + private static final class ParserImpl implements Parser { + + private static final String WINDOWS_DEFAULT_SYSTEM_ROOT = "C:\\Windows"; + private static final String WINDOWS_HOSTS_FILE_RELATIVE_PATH = "\\system32\\drivers\\etc\\hosts"; + private static final String X_PLATFORMS_HOSTS_FILE_PATH = "/etc/hosts"; + + private static final Pattern WHITESPACES = Pattern.compile("[ \t]+"); + + private static final InternalLogger logger = InternalLoggerFactory.getInstance(Parser.class); + + @Override + public HostsFileEntriesProvider parse() throws IOException { + return parse(locateHostsFile(), Charset.defaultCharset()); + } + + @Override + public HostsFileEntriesProvider parse(Charset... charsets) throws IOException { + return parse(locateHostsFile(), charsets); + } + + @Override + public HostsFileEntriesProvider parse(File file, Charset... charsets) throws IOException { + checkNotNull(file, "file"); + checkNotNull(charsets, "charsets"); + if (charsets.length == 0) { + charsets = new Charset[]{Charset.defaultCharset()}; + } + if (file.exists() && file.isFile()) { + for (Charset charset : charsets) { + BufferedReader reader = new BufferedReader( + new InputStreamReader(new FileInputStream(file), charset)); + try { + HostsFileEntriesProvider entries = parse(reader); + if (entries != HostsFileEntriesProvider.EMPTY) { + return entries; + } + } finally { + reader.close(); + } + } + } + return HostsFileEntriesProvider.EMPTY; + } + + @Override + public HostsFileEntriesProvider parse(Reader reader) throws IOException { + checkNotNull(reader, "reader"); + BufferedReader buff = new BufferedReader(reader); + try { + Map<String, List<InetAddress>> ipv4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> ipv6Entries = new HashMap<String, List<InetAddress>>(); + String line; + while ((line = buff.readLine()) != null) { + // remove comment + int commentPosition = line.indexOf('#'); + if (commentPosition != -1) { + line = line.substring(0, commentPosition); + } + // skip empty lines + line = line.trim(); + if (line.isEmpty()) { + continue; + } + + // split + List<String> lineParts = new ArrayList<String>(); + for (String s : WHITESPACES.split(line)) { + if (!s.isEmpty()) { + lineParts.add(s); + } + } + + // a valid line should be [IP, hostname, alias*] + if (lineParts.size() < 2) { + // skip invalid line + continue; + } + + byte[] ipBytes = NetUtil.createByteArrayFromIpAddressString(lineParts.get(0)); + + if (ipBytes == null) { + // skip invalid IP + continue; + } + + // loop over hostname and aliases + for (int i = 1; i < lineParts.size(); i++) { + String hostname = lineParts.get(i); + String hostnameLower = hostname.toLowerCase(Locale.ENGLISH); + InetAddress address = InetAddress.getByAddress(hostname, ipBytes); + List<InetAddress> addresses; + if (address instanceof Inet4Address) { + addresses = ipv4Entries.get(hostnameLower); + if (addresses == null) { + addresses = new ArrayList<InetAddress>(); + ipv4Entries.put(hostnameLower, addresses); + } + } else { + addresses = ipv6Entries.get(hostnameLower); + if (addresses == null) { + addresses = new ArrayList<InetAddress>(); + ipv6Entries.put(hostnameLower, addresses); + } + } + addresses.add(address); + } + } + return ipv4Entries.isEmpty() && ipv6Entries.isEmpty() ? + HostsFileEntriesProvider.EMPTY : + new HostsFileEntriesProvider(ipv4Entries, ipv6Entries); + } finally { + try { + buff.close(); + } catch (IOException e) { + logger.warn("Failed to close a reader", e); + } + } + } + + @Override + public HostsFileEntriesProvider parseSilently() { + return parseSilently(locateHostsFile(), Charset.defaultCharset()); + } + + @Override + public HostsFileEntriesProvider parseSilently(Charset... charsets) { + return parseSilently(locateHostsFile(), charsets); + } + + @Override + public HostsFileEntriesProvider parseSilently(File file, Charset... charsets) { + try { + return parse(file, charsets); + } catch (IOException e) { + if (logger.isWarnEnabled()) { + logger.warn("Failed to load and parse hosts file at " + file.getPath(), e); + } + return HostsFileEntriesProvider.EMPTY; + } + } + + private static File locateHostsFile() { + File hostsFile; + if (PlatformDependent.isWindows()) { + hostsFile = new File(System.getenv("SystemRoot") + WINDOWS_HOSTS_FILE_RELATIVE_PATH); + if (!hostsFile.exists()) { + hostsFile = new File(WINDOWS_DEFAULT_SYSTEM_ROOT + WINDOWS_HOSTS_FILE_RELATIVE_PATH); + } + } else { + hostsFile = new File(X_PLATFORMS_HOSTS_FILE_PATH); + } + return hostsFile; + } + } +} diff --git a/resolver/src/main/java/io/netty/resolver/HostsFileParser.java b/resolver/src/main/java/io/netty/resolver/HostsFileParser.java index c73e3480298..af181f940c3 100644 --- a/resolver/src/main/java/io/netty/resolver/HostsFileParser.java +++ b/resolver/src/main/java/io/netty/resolver/HostsFileParser.java @@ -15,63 +15,31 @@ */ package io.netty.resolver; -import io.netty.util.NetUtil; -import io.netty.util.internal.PlatformDependent; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import java.io.BufferedReader; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStreamReader; import java.io.Reader; import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; import java.nio.charset.Charset; -import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.HashMap; import java.util.Map; -import java.util.regex.Pattern; - -import static io.netty.util.internal.ObjectUtil.*; /** * A parser for hosts files. + * The produced mappings contain only the first entry per hostname. + * Consider using {@link HostsFileEntriesProvider} when mappings with all entries per hostname are needed. */ public final class HostsFileParser { - private static final String WINDOWS_DEFAULT_SYSTEM_ROOT = "C:\\Windows"; - private static final String WINDOWS_HOSTS_FILE_RELATIVE_PATH = "\\system32\\drivers\\etc\\hosts"; - private static final String X_PLATFORMS_HOSTS_FILE_PATH = "/etc/hosts"; - - private static final Pattern WHITESPACES = Pattern.compile("[ \t]+"); - - private static final InternalLogger logger = InternalLoggerFactory.getInstance(HostsFileParser.class); - - private static File locateHostsFile() { - File hostsFile; - if (PlatformDependent.isWindows()) { - hostsFile = new File(System.getenv("SystemRoot") + WINDOWS_HOSTS_FILE_RELATIVE_PATH); - if (!hostsFile.exists()) { - hostsFile = new File(WINDOWS_DEFAULT_SYSTEM_ROOT + WINDOWS_HOSTS_FILE_RELATIVE_PATH); - } - } else { - hostsFile = new File(X_PLATFORMS_HOSTS_FILE_PATH); - } - return hostsFile; - } - /** * Parse hosts file at standard OS location using the systems default {@link Charset} for decoding. * * @return a {@link HostsFileEntries} */ public static HostsFileEntries parseSilently() { - return parseSilently(Charset.defaultCharset()); + return hostsFileEntries(HostsFileEntriesProvider.parser().parseSilently()); } /** @@ -82,15 +50,7 @@ public static HostsFileEntries parseSilently() { * @return a {@link HostsFileEntries} */ public static HostsFileEntries parseSilently(Charset... charsets) { - File hostsFile = locateHostsFile(); - try { - return parse(hostsFile, charsets); - } catch (IOException e) { - if (logger.isWarnEnabled()) { - logger.warn("Failed to load and parse hosts file at " + hostsFile.getPath(), e); - } - return HostsFileEntries.EMPTY; - } + return hostsFileEntries(HostsFileEntriesProvider.parser().parseSilently(charsets)); } /** @@ -100,7 +60,7 @@ public static HostsFileEntries parseSilently(Charset... charsets) { * @throws IOException file could not be read */ public static HostsFileEntries parse() throws IOException { - return parse(locateHostsFile()); + return hostsFileEntries(HostsFileEntriesProvider.parser().parse()); } /** @@ -111,7 +71,7 @@ public static HostsFileEntries parse() throws IOException { * @throws IOException file could not be read */ public static HostsFileEntries parse(File file) throws IOException { - return parse(file, Charset.defaultCharset()); + return hostsFileEntries(HostsFileEntriesProvider.parser().parse(file)); } /** @@ -123,23 +83,7 @@ public static HostsFileEntries parse(File file) throws IOException { * @throws IOException file could not be read */ public static HostsFileEntries parse(File file, Charset... charsets) throws IOException { - checkNotNull(file, "file"); - checkNotNull(charsets, "charsets"); - if (file.exists() && file.isFile()) { - for (Charset charset: charsets) { - BufferedReader reader = new BufferedReader( - new InputStreamReader(new FileInputStream(file), charset)); - try { - HostsFileEntries entries = parse(reader); - if (entries != HostsFileEntries.EMPTY) { - return entries; - } - } finally { - reader.close(); - } - } - } - return HostsFileEntries.EMPTY; + return hostsFileEntries(HostsFileEntriesProvider.parser().parse(file, charsets)); } /** @@ -150,75 +94,7 @@ public static HostsFileEntries parse(File file, Charset... charsets) throws IOEx * @throws IOException file could not be read */ public static HostsFileEntries parse(Reader reader) throws IOException { - checkNotNull(reader, "reader"); - BufferedReader buff = new BufferedReader(reader); - try { - Map<String, Inet4Address> ipv4Entries = new HashMap<String, Inet4Address>(); - Map<String, Inet6Address> ipv6Entries = new HashMap<String, Inet6Address>(); - String line; - while ((line = buff.readLine()) != null) { - // remove comment - int commentPosition = line.indexOf('#'); - if (commentPosition != -1) { - line = line.substring(0, commentPosition); - } - // skip empty lines - line = line.trim(); - if (line.isEmpty()) { - continue; - } - - // split - List<String> lineParts = new ArrayList<String>(); - for (String s: WHITESPACES.split(line)) { - if (!s.isEmpty()) { - lineParts.add(s); - } - } - - // a valid line should be [IP, hostname, alias*] - if (lineParts.size() < 2) { - // skip invalid line - continue; - } - - byte[] ipBytes = NetUtil.createByteArrayFromIpAddressString(lineParts.get(0)); - - if (ipBytes == null) { - // skip invalid IP - continue; - } - - // loop over hostname and aliases - for (int i = 1; i < lineParts.size(); i ++) { - String hostname = lineParts.get(i); - String hostnameLower = hostname.toLowerCase(Locale.ENGLISH); - InetAddress address = InetAddress.getByAddress(hostname, ipBytes); - if (address instanceof Inet4Address) { - Inet4Address previous = ipv4Entries.put(hostnameLower, (Inet4Address) address); - if (previous != null) { - // restore, we want to keep the first entry - ipv4Entries.put(hostnameLower, previous); - } - } else { - Inet6Address previous = ipv6Entries.put(hostnameLower, (Inet6Address) address); - if (previous != null) { - // restore, we want to keep the first entry - ipv6Entries.put(hostnameLower, previous); - } - } - } - } - return ipv4Entries.isEmpty() && ipv6Entries.isEmpty() ? - HostsFileEntries.EMPTY : - new HostsFileEntries(ipv4Entries, ipv6Entries); - } finally { - try { - buff.close(); - } catch (IOException e) { - logger.warn("Failed to close a reader", e); - } - } + return hostsFileEntries(HostsFileEntriesProvider.parser().parse(reader)); } /** @@ -226,4 +102,22 @@ public static HostsFileEntries parse(Reader reader) throws IOException { */ private HostsFileParser() { } + + @SuppressWarnings("unchecked") + private static HostsFileEntries hostsFileEntries(HostsFileEntriesProvider provider) { + return provider == HostsFileEntriesProvider.EMPTY ? HostsFileEntries.EMPTY : + new HostsFileEntries((Map<String, Inet4Address>) toMapWithSingleValue(provider.ipv4Entries()), + (Map<String, Inet6Address>) toMapWithSingleValue(provider.ipv6Entries())); + } + + private static Map<String, ?> toMapWithSingleValue(Map<String, List<InetAddress>> fromMapWithListValue) { + Map<String, InetAddress> result = new HashMap<String, InetAddress>(); + for (Map.Entry<String, List<InetAddress>> entry : fromMapWithListValue.entrySet()) { + List<InetAddress> value = entry.getValue(); + if (!value.isEmpty()) { + result.put(entry.getKey(), value.get(0)); + } + } + return result; + } }
diff --git a/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java b/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java index b3485f5978d..4a79cd2cdce 100644 --- a/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java +++ b/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java @@ -22,9 +22,16 @@ import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + public class DefaultHostsFileEntriesResolverTest { /** @@ -32,53 +39,103 @@ public class DefaultHostsFileEntriesResolverTest { * HostsFileParser tries to resolve hostnames as case-sensitive */ @Test - public void testCaseInsensitivity() throws Exception { + public void testCaseInsensitivity() { DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(); //normalized somehow - Assert.assertEquals(resolver.normalize("localhost"), resolver.normalize("LOCALHOST")); + assertEquals(resolver.normalize("localhost"), resolver.normalize("LOCALHOST")); } @Test public void shouldntFindWhenAddressTypeDoesntMatch() { - Map<String, Inet4Address> inet4Entries = new HashMap<String, Inet4Address>(); - Map<String, Inet6Address> inet6Entries = new HashMap<String, Inet6Address>(); + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); - inet4Entries.put("localhost", NetUtil.LOCALHOST4); + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntries(inet4Entries, inet6Entries)); + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV6_ONLY); - Assert.assertNull("Should pick an IPv6 address", address); + assertNull("Should pick an IPv6 address", address); } @Test public void shouldPickIpv4WhenBothAreDefinedButIpv4IsPreferred() { - Map<String, Inet4Address> inet4Entries = new HashMap<String, Inet4Address>(); - Map<String, Inet6Address> inet6Entries = new HashMap<String, Inet6Address>(); + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); - inet4Entries.put("localhost", NetUtil.LOCALHOST4); - inet6Entries.put("localhost", NetUtil.LOCALHOST6); + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntries(inet4Entries, inet6Entries)); + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV4_PREFERRED); - Assert.assertTrue("Should pick an IPv4 address", address instanceof Inet4Address); + assertTrue("Should pick an IPv4 address", address instanceof Inet4Address); } @Test public void shouldPickIpv6WhenBothAreDefinedButIpv6IsPreferred() { - Map<String, Inet4Address> inet4Entries = new HashMap<String, Inet4Address>(); - Map<String, Inet6Address> inet6Entries = new HashMap<String, Inet6Address>(); + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); - inet4Entries.put("localhost", NetUtil.LOCALHOST4); - inet6Entries.put("localhost", NetUtil.LOCALHOST6); + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntries(inet4Entries, inet6Entries)); + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV6_PREFERRED); - Assert.assertTrue("Should pick an IPv6 address", address instanceof Inet6Address); + assertTrue("Should pick an IPv6 address", address instanceof Inet6Address); + } + + @Test + public void shouldntFindWhenAddressesTypeDoesntMatch() { + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + + DefaultHostsFileEntriesResolver resolver = + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + + List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV6_ONLY); + assertNull("Should pick an IPv6 address", addresses); + } + + @Test + public void shouldPickIpv4FirstWhenBothAreDefinedButIpv4IsPreferred() { + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); + + DefaultHostsFileEntriesResolver resolver = + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + + List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV4_PREFERRED); + assertNotNull(addresses); + assertEquals(2, addresses.size()); + assertTrue("Should pick an IPv4 address", addresses.get(0) instanceof Inet4Address); + assertTrue("Should pick an IPv6 address", addresses.get(1) instanceof Inet6Address); + } + + @Test + public void shouldPickIpv6FirstWhenBothAreDefinedButIpv6IsPreferred() { + Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); + Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + + inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); + + DefaultHostsFileEntriesResolver resolver = + new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + + List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV6_PREFERRED); + assertNotNull(addresses); + assertEquals(2, addresses.size()); + assertTrue("Should pick an IPv6 address", addresses.get(0) instanceof Inet6Address); + assertTrue("Should pick an IPv4 address", addresses.get(1) instanceof Inet4Address); } } diff --git a/resolver/src/test/java/io/netty/resolver/HostsFileEntriesProviderTest.java b/resolver/src/test/java/io/netty/resolver/HostsFileEntriesProviderTest.java new file mode 100644 index 00000000000..2b5c5414f79 --- /dev/null +++ b/resolver/src/test/java/io/netty/resolver/HostsFileEntriesProviderTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.resolver; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.net.InetAddress; +import java.nio.charset.Charset; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class HostsFileEntriesProviderTest { + + @Test + void testParse() throws IOException { + String hostsString = new StringBuilder() + .append("127.0.0.1 host1").append("\n") // single hostname, separated with blanks + .append("::1 host1").append("\n") // same as above, but IPv6 + .append("\n") // empty line + .append("192.168.0.1\thost2").append("\n") // single hostname, separated with tabs + .append("#comment").append("\n") // comment at the beginning of the line + .append(" #comment ").append("\n") // comment in the middle of the line + .append("192.168.0.2 host3 #comment").append("\n") // comment after hostname + .append("192.168.0.3 host4 host5 host6").append("\n") // multiple aliases + .append("192.168.0.4 host4").append("\n") // host mapped to a second address, must be considered + .append("192.168.0.5 HOST7").append("\n") // uppercase host, should match lowercase host + .append("192.168.0.6 host7").append("\n") // must be considered + .toString(); + + HostsFileEntriesProvider entries = HostsFileEntriesProvider.parser() + .parse(new BufferedReader(new StringReader(hostsString))); + Map<String, List<InetAddress>> inet4Entries = entries.ipv4Entries(); + Map<String, List<InetAddress>> inet6Entries = entries.ipv6Entries(); + + assertEquals(7, inet4Entries.size(), "Expected 7 IPv4 entries"); + assertEquals(1, inet6Entries.size(), "Expected 1 IPv6 entries"); + + assertEquals(1, inet4Entries.get("host1").size()); + assertEquals("127.0.0.1", inet4Entries.get("host1").get(0).getHostAddress()); + + assertEquals(1, inet4Entries.get("host2").size()); + assertEquals("192.168.0.1", inet4Entries.get("host2").get(0).getHostAddress()); + + assertEquals(1, inet4Entries.get("host3").size()); + assertEquals("192.168.0.2", inet4Entries.get("host3").get(0).getHostAddress()); + + assertEquals(2, inet4Entries.get("host4").size()); + assertEquals("192.168.0.3", inet4Entries.get("host4").get(0).getHostAddress()); + assertEquals("192.168.0.4", inet4Entries.get("host4").get(1).getHostAddress()); + + assertEquals(1, inet4Entries.get("host5").size()); + assertEquals("192.168.0.3", inet4Entries.get("host5").get(0).getHostAddress()); + + assertEquals(1, inet4Entries.get("host6").size()); + assertEquals("192.168.0.3", inet4Entries.get("host6").get(0).getHostAddress()); + + assertNotNull(inet4Entries.get("host7"), "Uppercase host doesn't resolve"); + assertEquals(2, inet4Entries.get("host7").size()); + assertEquals("192.168.0.5", inet4Entries.get("host7").get(0).getHostAddress()); + assertEquals("192.168.0.6", inet4Entries.get("host7").get(1).getHostAddress()); + + assertEquals(1, inet6Entries.get("host1").size()); + assertEquals("0:0:0:0:0:0:0:1", inet6Entries.get("host1").get(0).getHostAddress()); + } + + @Test + void testCharsetInputValidation() { + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() throws IOException { + HostsFileEntriesProvider.parser().parse((Charset[]) null); + } + }); + + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() throws IOException { + HostsFileEntriesProvider.parser().parse(new File(""), (Charset[]) null); + } + }); + + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() { + HostsFileEntriesProvider.parser().parseSilently((Charset[]) null); + } + }); + + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() { + HostsFileEntriesProvider.parser().parseSilently(new File(""), (Charset[]) null); + } + }); + } + + @Test + void testFileInputValidation() { + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() throws IOException { + HostsFileEntriesProvider.parser().parse((File) null); + } + }); + + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() { + HostsFileEntriesProvider.parser().parseSilently((File) null); + } + }); + } + + @Test + void testReaderInputValidation() { + assertThrows(NullPointerException.class, new Executable() { + @Override + public void execute() throws IOException { + HostsFileEntriesProvider.parser().parse((Reader) null); + } + }); + } +}
train
test
"2021-05-11T14:07:15"
"2020-12-03T10:51:50Z"
violetagg
val
netty/netty/11263_11264
netty/netty
netty/netty/11263
netty/netty/11264
[ "keyword_pr_to_issue" ]
ccca3959fd64b133edca855925fb68d077c3a8a2
7c955a19dce2e7ea95e15a5c578e7e71d0d4a238
[]
[]
"2021-05-17T23:08:19Z"
[]
Remove dependency on log4J 1.x branch
Netty, and thus all downstream projects (e.g. vert.x, Quarkus etc) show a dependency on log4j-1.2.17 which as been flagged with CVE: 20-12-2019. From a vulnerability scan: > Severity: High > > CVE: 20-12-2019 > > Description: Included in Log4j 1.2 is a SocketServer class that is vulnerable to deserialization of untrusted data which can be exploited to remotely execute arbitrary code when combined with a deserialization gadget when listening to untrusted network traffic for log data. This affects Log4j versions up to 1.2 up to 1.2.17. > > Published: 20-12-2019 > > Top Fix: > Upgrade to version org.apache.logging.log4j:log4j-core:2.0.  The reported vulnerability was not checked for vulnerability effectiveness and is suggested to be examined using Effective Usage Analysis. There are 2 occurrences that need upgrade: https://github.com/netty/netty/blob/ccca3959fd64b133edca855925fb68d077c3a8a2/common/src/main/java/io/netty/util/internal/logging/Log4JLoggerFactory.java#L18 Replace with: ```java import org.apache.logging.log4j.Logger; ``` https://github.com/netty/netty/blob/d34212439068091bcec29a8fad4df82f0a82c638/common/src/main/java/io/netty/util/internal/logging/Log4JLogger.java#L42-L43 Replace with: ```java import org.apache.logger.log4j.Level; import org.apache.logger.log4j.Logger; ```
[ "common/pom.xml", "pom.xml" ]
[ "common/pom.xml", "pom.xml" ]
[]
diff --git a/common/pom.xml b/common/pom.xml index 15a5dc12679..a930b94f1bd 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -64,8 +64,8 @@ <optional>true</optional> </dependency> <dependency> - <groupId>log4j</groupId> - <artifactId>log4j</artifactId> + <groupId>org.apache.logging.log4j</groupId> + <artifactId>log4j-1.2-api</artifactId> <optional>true</optional> </dependency> <dependency> diff --git a/pom.xml b/pom.xml index e677a89ff05..1478851ace8 100644 --- a/pom.xml +++ b/pom.xml @@ -728,9 +728,9 @@ <version>${log4j2.version}</version> </dependency> <dependency> - <groupId>log4j</groupId> - <artifactId>log4j</artifactId> - <version>1.2.17</version> + <groupId>org.apache.logging.log4j</groupId> + <artifactId>log4j-1.2-api</artifactId> + <version>2.14.1</version> <exclusions> <exclusion> <artifactId>mail</artifactId>
null
train
test
"2021-05-17T20:16:25"
"2021-05-17T22:05:05Z"
Stwissel
val
netty/netty/11280_11281
netty/netty
netty/netty/11280
netty/netty/11281
[ "keyword_pr_to_issue" ]
bed04fc5975c305750ffbdabcd66b352616d0c1d
9747739962dc9ca620417568d4f4de4d5659547a
[]
[]
"2021-05-19T03:07:24Z"
[]
Method 'io.netty.util.internal.MathUtil#isOutOfBounds' needs to check the 'capacity' parameter
### Expected behavior As `io.netty.util.internal.MathUtil#isOutOfBounds` is a public utility method, it should pass all kinds of test cases. Like: ``` import io.netty.util.internal.MathUtil; public static void main(String[] args) { boolean isOutOfBounds = MathUtil.isOutOfBounds(1, 2, Integer.MIN_VALUE); System.out.println(isOutOfBounds); } ``` It should print `true`, which means it is out of bound. ### Actual behavior It printed `false`, which means it is NOT out of bound. ### Steps to reproduce see code above. ### Minimal yet complete reproducer code (or URL to code) see code above. ### Netty version 4.1. ### JVM version (e.g. `java -version`) 1.8. ### OS version (e.g. `uname -a`) Darwin.
[ "common/src/main/java/io/netty/util/internal/MathUtil.java" ]
[ "common/src/main/java/io/netty/util/internal/MathUtil.java" ]
[ "common/src/test/java/io/netty/util/internal/MathUtilTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/MathUtil.java b/common/src/main/java/io/netty/util/internal/MathUtil.java index 68777e6d8e2..fd8e165eccf 100644 --- a/common/src/main/java/io/netty/util/internal/MathUtil.java +++ b/common/src/main/java/io/netty/util/internal/MathUtil.java @@ -57,11 +57,11 @@ public static int safeFindNextPositivePowerOfTwo(final int value) { * @param index The starting index. * @param length The length which will be utilized (starting from {@code index}). * @param capacity The capacity that {@code index + length} is allowed to be within. - * @return {@code true} if the requested {@code index} and {@code length} will fit within {@code capacity}. - * {@code false} if this would result in an index out of bounds exception. + * @return {@code false} if the requested {@code index} and {@code length} will fit within {@code capacity}. + * {@code true} if this would result in an index out of bounds exception. */ public static boolean isOutOfBounds(int index, int length, int capacity) { - return (index | length | (index + length) | (capacity - (index + length))) < 0; + return (index | length | capacity | (index + length) | (capacity - (index + length))) < 0; } /**
diff --git a/common/src/test/java/io/netty/util/internal/MathUtilTest.java b/common/src/test/java/io/netty/util/internal/MathUtilTest.java index 32628114dd9..2fd44ced4bc 100644 --- a/common/src/test/java/io/netty/util/internal/MathUtilTest.java +++ b/common/src/test/java/io/netty/util/internal/MathUtilTest.java @@ -68,6 +68,9 @@ public void testIsOutOfBounds() { assertTrue(isOutOfBounds(Integer.MAX_VALUE - 1, 1, Integer.MAX_VALUE - 1)); assertTrue(isOutOfBounds(Integer.MAX_VALUE - 1, 2, Integer.MAX_VALUE)); assertTrue(isOutOfBounds(1, Integer.MAX_VALUE, Integer.MAX_VALUE)); + assertTrue(isOutOfBounds(0, 1, Integer.MIN_VALUE)); + assertTrue(isOutOfBounds(0, 1, -1)); + assertTrue(isOutOfBounds(0, Integer.MAX_VALUE, 0)); } @Test
train
test
"2021-05-19T16:28:05"
"2021-05-19T02:28:26Z"
laosijikaichele
val
netty/netty/11279_11281
netty/netty
netty/netty/11279
netty/netty/11281
[ "keyword_pr_to_issue" ]
bed04fc5975c305750ffbdabcd66b352616d0c1d
9747739962dc9ca620417568d4f4de4d5659547a
[]
[]
"2021-05-19T03:07:24Z"
[]
Docs of method 'io.netty.util.internal.MathUtil#isOutOfBounds' needs to be modified
### Expected behavior the docs about the 'retun value' of the method `io.netty.util.internal.MathUtil#isOutOfBounds`is not correct. It shoud be: > * @return {@code false} if the requested {@code index} and {@code length} will fit within {@code capacity}. > * {@code true} if this would result in an index out of bounds exception. ### Actual behavior Actually it is the opposite: > * @return {@code true} if the requested {@code index} and {@code length} will fit within {@code capacity}. > * {@code false} if this would result in an index out of bounds exception. The original doc and code as following: > /** > * Determine if the requested {@code index} and {@code length} will fit within {@code capacity}. > * @param index The starting index. > * @param length The length which will be utilized (starting from {@code index}). > * @param capacity The capacity that {@code index + length} is allowed to be within. > * @return {@code true} if the requested {@code index} and {@code length} will fit within {@code capacity}. > * {@code false} if this would result in an index out of bounds exception. > */ > public static boolean isOutOfBounds(int index, int length, int capacity) { > return (index | length | (index + length) | (capacity - (index + length))) < 0; > } ### Steps to reproduce see above. ### Minimal yet complete reproducer code (or URL to code) see above. ### Netty version 4.1. ### JVM version (e.g. `java -version`) does not matter. ### OS version (e.g. `uname -a`) does not matter.
[ "common/src/main/java/io/netty/util/internal/MathUtil.java" ]
[ "common/src/main/java/io/netty/util/internal/MathUtil.java" ]
[ "common/src/test/java/io/netty/util/internal/MathUtilTest.java" ]
diff --git a/common/src/main/java/io/netty/util/internal/MathUtil.java b/common/src/main/java/io/netty/util/internal/MathUtil.java index 68777e6d8e2..fd8e165eccf 100644 --- a/common/src/main/java/io/netty/util/internal/MathUtil.java +++ b/common/src/main/java/io/netty/util/internal/MathUtil.java @@ -57,11 +57,11 @@ public static int safeFindNextPositivePowerOfTwo(final int value) { * @param index The starting index. * @param length The length which will be utilized (starting from {@code index}). * @param capacity The capacity that {@code index + length} is allowed to be within. - * @return {@code true} if the requested {@code index} and {@code length} will fit within {@code capacity}. - * {@code false} if this would result in an index out of bounds exception. + * @return {@code false} if the requested {@code index} and {@code length} will fit within {@code capacity}. + * {@code true} if this would result in an index out of bounds exception. */ public static boolean isOutOfBounds(int index, int length, int capacity) { - return (index | length | (index + length) | (capacity - (index + length))) < 0; + return (index | length | capacity | (index + length) | (capacity - (index + length))) < 0; } /**
diff --git a/common/src/test/java/io/netty/util/internal/MathUtilTest.java b/common/src/test/java/io/netty/util/internal/MathUtilTest.java index 32628114dd9..2fd44ced4bc 100644 --- a/common/src/test/java/io/netty/util/internal/MathUtilTest.java +++ b/common/src/test/java/io/netty/util/internal/MathUtilTest.java @@ -68,6 +68,9 @@ public void testIsOutOfBounds() { assertTrue(isOutOfBounds(Integer.MAX_VALUE - 1, 1, Integer.MAX_VALUE - 1)); assertTrue(isOutOfBounds(Integer.MAX_VALUE - 1, 2, Integer.MAX_VALUE)); assertTrue(isOutOfBounds(1, Integer.MAX_VALUE, Integer.MAX_VALUE)); + assertTrue(isOutOfBounds(0, 1, Integer.MIN_VALUE)); + assertTrue(isOutOfBounds(0, 1, -1)); + assertTrue(isOutOfBounds(0, Integer.MAX_VALUE, 0)); } @Test
test
test
"2021-05-19T16:28:05"
"2021-05-19T02:27:28Z"
laosijikaichele
val
netty/netty/11302_11317
netty/netty
netty/netty/11302
netty/netty/11317
[ "keyword_issue_to_pr" ]
0ff93519a43c7ef1e299f531461a5b665a85f143
f81149324ff42ee375d094b20ac481e40206f678
[ "@mariaccc let me fix this ... ", "Fixed by https://github.com/netty/netty/pull/11317 ... You can workaround it by using:\r\n\r\n```\r\n-Pboringssl\r\n```", "> Fixed by #11317 ... You can workaround it by using:\r\n> \r\n> ```\r\n> -Pboringssl\r\n> ```\r\n\r\nIt works!" ]
[]
"2021-05-26T10:21:44Z"
[]
Setting up development environment failed on Windows
### Expected behavior Setting up development environment on Windows ### Actual behavior Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final maven clean install failed ### Steps to reproduce maven clean install -X ### Minimal yet complete reproducer code (or URL to code) ``` [INFO] BUILD FAILURE [INFO] ------------------------------------------------------------------------ [INFO] Total time: 01:14 min [INFO] Finished at: 2021-05-26T10:09:00+08:00 [INFO] ------------------------------------------------------------------------ [ERROR] Failed to execute goal on project netty-handler: Could not resolve dependencies for project io.netty:netty-handler:jar:4.1.65.Final: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced -> [Help 1] org.apache.maven.lifecycle.LifecycleExecutionException: Failed to execute goal on project netty-handler: Could not resolve dependencies for project io.netty:netty-handler:jar:4.1.65.Final: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.getDependencies (LifecycleDependencyResolver.java:269) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.resolveProjectDependencies (LifecycleDependencyResolver.java:147) at org.apache.maven.lifecycle.internal.MojoExecutor.ensureDependenciesAreResolved (MojoExecutor.java:248) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:202) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) at org.codehaus.classworlds.Launcher.main (Launcher.java:47) Caused by: org.apache.maven.project.DependencyResolutionException: Could not resolve dependencies for project io.netty:netty-handler:jar:4.1.65.Final: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced at org.apache.maven.project.DefaultProjectDependenciesResolver.resolve (DefaultProjectDependenciesResolver.java:209) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.getDependencies (LifecycleDependencyResolver.java:243) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.resolveProjectDependencies (LifecycleDependencyResolver.java:147) at org.apache.maven.lifecycle.internal.MojoExecutor.ensureDependenciesAreResolved (MojoExecutor.java:248) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:202) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) at org.codehaus.classworlds.Launcher.main (Launcher.java:47) Caused by: org.eclipse.aether.resolution.DependencyResolutionException: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced at org.eclipse.aether.internal.impl.DefaultRepositorySystem.resolveDependencies (DefaultRepositorySystem.java:357) at org.apache.maven.project.DefaultProjectDependenciesResolver.resolve (DefaultProjectDependenciesResolver.java:202) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.getDependencies (LifecycleDependencyResolver.java:243) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.resolveProjectDependencies (LifecycleDependencyResolver.java:147) at org.apache.maven.lifecycle.internal.MojoExecutor.ensureDependenciesAreResolved (MojoExecutor.java:248) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:202) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) at org.codehaus.classworlds.Launcher.main (Launcher.java:47) Caused by: org.eclipse.aether.resolution.ArtifactResolutionException: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced at org.eclipse.aether.internal.impl.DefaultArtifactResolver.resolve (DefaultArtifactResolver.java:424) at org.eclipse.aether.internal.impl.DefaultArtifactResolver.resolveArtifacts (DefaultArtifactResolver.java:229) at org.eclipse.aether.internal.impl.DefaultRepositorySystem.resolveDependencies (DefaultRepositorySystem.java:340) at org.apache.maven.project.DefaultProjectDependenciesResolver.resolve (DefaultProjectDependenciesResolver.java:202) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.getDependencies (LifecycleDependencyResolver.java:243) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.resolveProjectDependencies (LifecycleDependencyResolver.java:147) at org.apache.maven.lifecycle.internal.MojoExecutor.ensureDependenciesAreResolved (MojoExecutor.java:248) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:202) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) at org.codehaus.classworlds.Launcher.main (Launcher.java:47) Caused by: org.eclipse.aether.transfer.ArtifactNotFoundException: Failure to find io.netty:netty-tcnative:jar:windows-x86_64:2.0.39.Final in https://repo.maven.apache.org/maven2 was cached in the local repository, resolution will not be reattempted until the update interval of central has elapsed or updates are forced at org.eclipse.aether.internal.impl.DefaultUpdateCheckManager.newException (DefaultUpdateCheckManager.java:218) at org.eclipse.aether.internal.impl.DefaultUpdateCheckManager.checkArtifact (DefaultUpdateCheckManager.java:193) at org.eclipse.aether.internal.impl.DefaultArtifactResolver.gatherDownloads (DefaultArtifactResolver.java:559) at org.eclipse.aether.internal.impl.DefaultArtifactResolver.performDownloads (DefaultArtifactResolver.java:483) at org.eclipse.aether.internal.impl.DefaultArtifactResolver.resolve (DefaultArtifactResolver.java:401) at org.eclipse.aether.internal.impl.DefaultArtifactResolver.resolveArtifacts (DefaultArtifactResolver.java:229) at org.eclipse.aether.internal.impl.DefaultRepositorySystem.resolveDependencies (DefaultRepositorySystem.java:340) at org.apache.maven.project.DefaultProjectDependenciesResolver.resolve (DefaultProjectDependenciesResolver.java:202) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.getDependencies (LifecycleDependencyResolver.java:243) at org.apache.maven.lifecycle.internal.LifecycleDependencyResolver.resolveProjectDependencies (LifecycleDependencyResolver.java:147) at org.apache.maven.lifecycle.internal.MojoExecutor.ensureDependenciesAreResolved (MojoExecutor.java:248) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:202) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) at org.codehaus.classworlds.Launcher.main (Launcher.java:47) [ERROR] [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/DependencyResolutionException [ERROR] [ERROR] After correcting the problems, you can resume the build with the command [ERROR] mvn <args> -rf :netty-handler ``` ### Netty version netty-4.1.65.Final ### JVM version (e.g. `java -version`) adopt-openjdk-11 ### OS version (e.g. `uname -a`) Windows10 20H2 19042.985
[ "pom.xml" ]
[ "pom.xml" ]
[]
diff --git a/pom.xml b/pom.xml index fb63f491991..a1282890a47 100644 --- a/pom.xml +++ b/pom.xml @@ -361,6 +361,15 @@ <profile> <id>boringssl</id> + <activation> + <!-- + Automatically active on windows as we only release static boringssl version of + netty-tcnative for windows. + --> + <os> + <family>windows</family> + </os> + </activation> <properties> <tcnative.artifactId>netty-tcnative-boringssl-static</tcnative.artifactId> <tcnative.classifier />
null
train
test
"2021-05-26T12:12:11"
"2021-05-26T02:27:33Z"
mariaccc
val
netty/netty/11334_11335
netty/netty
netty/netty/11334
netty/netty/11335
[ "keyword_pr_to_issue" ]
18e92304a700c1b3664f5a3cf24ee3ed58bafbdd
103f1d269ed0e4cb335241286e53973703574626
[ "@liubao68 I think I have found the reason.\r\n\r\n@normanmaurer except a check that was missing in the multipartDecoder, i've found a possible issue while debugging within `UnpooledUnsafeDirectByteBuf` (at least there).\r\nThe reason we did not get this issue before in the Junit test was because the underlying check of access to the `undecodedChunk` while using `buffer.getByte(fullPosition)` is only checking the following the following in MathUtil:\r\n\r\n /**\r\n * Determine if the requested {@code index} and {@code length} will fit within {@code capacity}.\r\n * @param index The starting index.\r\n * @param length The length which will be utilized (starting from {@code index}).\r\n * @param capacity The capacity that {@code index + length} is allowed to be within.\r\n * @return {@code false} if the requested {@code index} and {@code length} will fit within {@code capacity}.\r\n * {@code true} if this would result in an index out of bounds exception.\r\n */\r\n public static boolean isOutOfBounds(int index, int length, int capacity) {\r\n return (index | length | capacity | (index + length) | (capacity - (index + length))) < 0;\r\n }\r\n\r\nBut the underlying buffer has a capacity bigger than writerIndex, so accessing `buffer.getByte(writerIndex)` does not thrown an `IndexOutOfBoundsException` (index = writerIndex, length = 1, capacity = 2048).\r\n\r\nSo when I try to reproduce this issue, I am in fact unable to do so, since there is never such an issue but it should.\r\n\r\nI will propose first a fix for the multipart decoder (simple).\r\nBut I don't know how to adress this second issue (it should raized an `IndexOutOfBoundsException` if it is a `read` operation). The implication could be very high (since used everywhere).\r\n\r\nThe stacktrace to understand where it comes:\r\n\r\n isOutOfBounds:64, MathUtil (io.netty.util.internal)\r\n checkRangeBounds:1389, AbstractByteBuf (io.netty.buffer)\r\n checkIndex0:1397, AbstractByteBuf (io.netty.buffer)\r\n checkIndex:1384, AbstractByteBuf (io.netty.buffer)\r\n checkIndex:1379, AbstractByteBuf (io.netty.buffer)\r\n getByte:85, UnpooledUnsafeDirectByteBuf (io.netty.buffer)\r\n findDelimiter:239, HttpPostBodyUtil (io.netty.handler.codec.http.multipart)\r\n loadDataMultipartOptimized:1172, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n getFileUpload:926, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n decodeMultipart:572, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n parseBodyMultipart:463, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n parseBody:432, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n offer:347, HttpPostMultipartRequestDecoder (io.netty.handler.codec.http.multipart)\r\n\r\n" ]
[]
"2021-05-29T10:59:02Z"
[]
HttpPostMultipartRequestDecoder IndexOutOfBoundsException error with netty 4.1.65.Final
The problem happens when using latests vert.x version. see https://github.com/eclipse-vertx/vert.x/issues/3949 But I guess is a problem with netty. ### Expected behavior Uploading multiple form datas/files will success. ### Actual behavior Fails ramdomly. ### Steps to reproduce See original issue. I have a project test case, but not have a minimal test case with netty yet. ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.65.Final . After I change to 4.1.59.Final, this problem is gone. ### JVM version (e.g. `java -version`) ### OS version (e.g. `uname -a`)
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java index f174327d912..e93dfaf216a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java @@ -233,13 +233,15 @@ static int findDelimiter(ByteBuf buffer, int index, byte[] delimiter, boolean pr newOffset += posDelimiter; toRead -= posDelimiter; // Now check for delimiter - delimiterNotFound = false; - for (int i = 0; i < delimiterLength; i++) { - if (buffer.getByte(newOffset + i) != delimiter[i]) { - newOffset++; - toRead--; - delimiterNotFound = true; - break; + if (toRead >= delimiterLength) { + delimiterNotFound = false; + for (int i = 0; i < delimiterLength; i++) { + if (buffer.getByte(newOffset + i) != delimiter[i]) { + newOffset++; + toRead--; + delimiterNotFound = true; + break; + } } } if (!delimiterNotFound) {
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java index d7d3ab8342b..fb7e08202dd 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java @@ -95,6 +95,63 @@ public void testDecodeFullHttpRequestWithInvalidPayloadReleaseBuffer() { } } + @Test + public void testDelimiterExceedLeftSpaceInCurrentBuffer() { + String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466"; + String suffix = '\n' + delimiter + "--\n"; + byte[] bsuffix = suffix.getBytes(CharsetUtil.UTF_8); + int partOfDelimiter = bsuffix.length / 2; + int bytesLastChunk = 355 - partOfDelimiter; // to try to have an out of bound since content is > delimiter + byte[] bsuffix1 = Arrays.copyOf(bsuffix, partOfDelimiter); + byte[] bsuffix2 = Arrays.copyOfRange(bsuffix, partOfDelimiter, bsuffix.length); + String prefix = delimiter + "\n" + + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + bytesLastChunk + "\n\n"; + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", prefix.length() + bytesLastChunk + suffix.length()); + + // Factory using Memory mode + HttpDataFactory factory = new DefaultHttpDataFactory(false); + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(buf)); + assertNotNull((HttpData) decoder.currentPartialHttpData()); + buf.release(); + // Chunk less than Delimiter size but containing part of delimiter + byte[] body = new byte[bytesLastChunk + bsuffix1.length]; + Arrays.fill(body, (byte) 2); + for (int i = 0; i < bsuffix1.length; i++) { + body[bytesLastChunk + i] = bsuffix1[i]; + } + ByteBuf content = Unpooled.wrappedBuffer(body); + decoder.offer(new DefaultHttpContent(content)); // Ouf of range before here + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content.release(); + content = Unpooled.wrappedBuffer(bsuffix2); + decoder.offer(new DefaultHttpContent(content)); + assertNull((HttpData) decoder.currentPartialHttpData()); + content.release(); + decoder.offer(new DefaultLastHttpContent()); + FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0); + assertEquals(data.length(), bytesLastChunk); + assertEquals(true, data.isInMemory()); + + InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(1, httpData.refCnt(), "Before cleanAllHttpData should be 1"); + } + factory.cleanAllHttpData(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(1, httpData.refCnt(), "After cleanAllHttpData should be 1 if in Memory"); + } + decoder.destroy(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(0, httpData.refCnt(), "RefCnt should be 0"); + } + } + private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory) throws IOException { int nbChunks = 100; @@ -184,7 +241,7 @@ private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, bo } factory.cleanAllHttpData(); for (InterfaceHttpData httpData : httpDatas) { - assertEquals(inMemory? 1 : 0, httpData.refCnt(), "Before cleanAllHttpData should be 1 if in Memory"); + assertEquals(inMemory? 1 : 0, httpData.refCnt(), "After cleanAllHttpData should be 1 if in Memory"); } decoder.destroy(); for (InterfaceHttpData httpData : httpDatas) {
train
test
"2021-05-28T15:03:18"
"2021-05-29T02:21:26Z"
liubao68
val
netty/netty/11357_11358
netty/netty
netty/netty/11357
netty/netty/11358
[ "keyword_pr_to_issue" ]
6621e4a60f24064e8e5757678179056b42a31436
9f8bfa348e5437417cdfce0cd9d5a3c8ecec71a9
[]
[ "Wrong annotation type\r\n```suggestion\r\n @Test\r\n```", "```suggestion\r\n @Test\r\n```" ]
"2021-06-03T02:44:28Z"
[]
IllegalReferenceCountException with compression enabled but not used if HttpResponse implements HttpContent and not LastHttpContent
Turns out my fix for https://github.com/netty/netty/issues/11092 was incomplete. It fixes the case where the content is compressed, but it will also fail if the client does not specify an Accept-Encoding.
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java index e9c50260f34..49d263655de 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java @@ -142,7 +142,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List<Object> ou if (isFull) { out.add(ReferenceCountUtil.retain(res)); } else { - out.add(res); + out.add(ReferenceCountUtil.retain(res)); // Pass through all following contents. state = State.PASS_THROUGH; } @@ -165,7 +165,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List<Object> ou if (isFull) { out.add(ReferenceCountUtil.retain(res)); } else { - out.add(res); + out.add(ReferenceCountUtil.retain(res)); // Pass through all following contents. state = State.PASS_THROUGH; }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java index 5663e62ca6e..43184b8b001 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java @@ -44,6 +44,8 @@ import org.junit.jupiter.api.Test; +import java.nio.charset.StandardCharsets; + import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; @@ -217,6 +219,65 @@ public void testChunkedContentWithAssembledResponse() throws Exception { assertThat(ch.readOutbound(), is(nullValue())); } + @Test + public void testChunkedContentWithAssembledResponseIdentityEncoding() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + ch.writeOutbound(res); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("Hell")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("o, w")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("orld")); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + + @Test + public void testContentWithAssembledResponseIdentityEncodingHttp10() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/")); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_0, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + ch.writeOutbound(res); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("Hell")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("o, w")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("orld")); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + @Test public void testChunkedContentWithTrailingHeader() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor());
train
test
"2021-06-02T08:41:17"
"2021-06-03T02:39:29Z"
stuartwdouglas
val
netty/netty/11391_11400
netty/netty
netty/netty/11391
netty/netty/11400
[ "keyword_pr_to_issue" ]
8d76f402b1d27eba44cf98877c86128280df4a0a
2abe20a6b5f53a5e8c508b6abb9eeed9562cb5d7
[ "@mostroverkhov SGTM, would you like to send a PR?" ]
[ "nit: using `if (args.length > 3)` will remove redundancy for common arguments:\r\n\r\n```java\r\nif (args.length > 3) {\r\n taskQueueFactory = (EventLoopTaskQueueFactory) args[3];\r\n}\r\nif (args.length > 4) {\r\n tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[4];\r\n}\r\n```", "Javadoc?", "How about adding a protected final getter method with the same name instead of exposing a field?", "How about adding a protected final getter method with the same name instead of exposing a field?", "There is always from 3 to 5 arguments for `EventLoopGroups` that exist in this project, so my thinking was that current form is simpler. Conditionals form looks better where there is wider range of optional arguments (>2) - can change if It is more convenient.\r\n\r\n", "Most \"non-convenience\" constructors lacked javadocs, so I omitted them for uniformity - will add.", "Child classes are defined in own packages outside `io.netty.util.concurrent`, so such getters are inaccessible by children test code - children would need helper method that only delegates to said getter.\r\n", "see above comment", "> Can we make the same changes to all `EventLoop(Group)` implementations such as `NioEventLoop(Group)`?\r\n\r\nYes, I will add them once approach for single implementation is accepted", "Sounds good, just a nit so feel free to ignore.", "Or for example\r\n```\r\nSingleThreadEventLoop {\r\n protected final Queue<Runnable> tailTaskQueue0() {}\r\n}\r\n```\r\n```\r\nEpollEventLoop extends SingleThreadEventLoop {\r\n /*package-private*/ final Queue<Runnable> tailTaskQueue() {\r\n return tailTaskQueue0();\r\n }\r\n}\r\n```\r\nIs this what you are suggesting @trustin ?", "Not thrilled about `protected` visibility for methods only required for tests, can we just have package local visibility?", "Me neither - but java does not allow narrowing visibility in subclasses (keep in mind `SingleThreadEventExecutor` and `SingleThreadEventLoop` are in different packages). \r\n\r\nI know about 2 approaches for this - using single package across multiple modules so `package-private` members are accessible across whole project, or `internal` package with public visibility by default - with convention that only project code calls `internal` classes. However, `netty` follows neither ¯\\_(ツ)_/¯ - logically related classes are scattered across multiple packages. Can you hint a simpler solution?\r\n\r\nGiven \"mechanical\" nature of this change (just exposing constructor parameter in child classes from parent) I am thinking about removing tests after verifying they pass - does It make sense? It seems every way of having this functionality tested is \"awkward\" (under existing package organization)", "> keep in mind SingleThreadEventExecutor and SingleThreadEventLoop are in different packages\r\n\r\nAah I didn't realize this, thanks.\r\n\r\n> Given \"mechanical\" nature of this change (just exposing constructor parameter in child classes from parent) I am thinking about removing tests after verifying they pass - does It make sense?\r\n\r\nAgreed, these tests aren't super useful specially when we have to expose `protected` methods for them.", "Also put an `@UnstableApi` on it", "+1... Let's just get rid of it then", "nit: you can replace nested `if` blocks with sequential which makes it easier to read IMHO.\r\n\r\n```java\r\nif (argsLength > 3) {\r\n taskQueueFactory = (EventLoopTaskQueueFactory) args[3];\r\n}\r\nif (argsLength > 4) {\r\n tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[4];\r\n}\r\n```" ]
"2021-06-18T19:34:31Z"
[]
Tail tasks queue: configure separately from tasks queue
IO transports (primarily epoll, but this also applies to kqueue, nio) cant be configured with separate `tail tasks` queue factory - instead single queue factory is used for both normal tasks and tail tasks. Their parent [SingleThreadEventLoop](https://github.com/netty/netty/blob/0d5774a82b470833c5f8a84fa4708142a4801f52/transport/src/main/java/io/netty/channel/SingleThreadEventLoop.java#L63) does have constructor accepting both `taskQueue` and `tailTaskQueue`. I use `SingleThreadEventLoop.executeAfterEventLoopIteration(Runnable)` for internal tasks executed strictly from eventloop, and would prefer to configure queue other than MPSC.
[ "transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java", "transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java", "transport/src/main/java/io/netty/channel/nio/NioEventLoop.java", "transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java" ]
[ "transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java", "transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java", "transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java", "transport/src/main/java/io/netty/channel/nio/NioEventLoop.java", "transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java" ]
[ "transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java" ]
diff --git a/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java b/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java index c9cb838ed80..c3b2f6016ac 100644 --- a/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java +++ b/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoop.java @@ -86,8 +86,8 @@ public int get() throws Exception { EpollEventLoop(EventLoopGroup parent, Executor executor, int maxEvents, SelectStrategy strategy, RejectedExecutionHandler rejectedExecutionHandler, - EventLoopTaskQueueFactory queueFactory) { - super(parent, executor, false, newTaskQueue(queueFactory), newTaskQueue(queueFactory), + EventLoopTaskQueueFactory taskQueueFactory, EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(parent, executor, false, newTaskQueue(taskQueueFactory), newTaskQueue(tailTaskQueueFactory), rejectedExecutionHandler); selectStrategy = ObjectUtil.checkNotNull(strategy, "strategy"); if (maxEvents == 0) { diff --git a/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java b/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java index 80de7fe0fad..ee1467d9149 100644 --- a/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java +++ b/transport-native-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java @@ -21,6 +21,7 @@ import io.netty.channel.EventLoopTaskQueueFactory; import io.netty.channel.MultithreadEventLoopGroup; import io.netty.channel.SelectStrategyFactory; +import io.netty.channel.SingleThreadEventLoop; import io.netty.util.concurrent.EventExecutorChooserFactory; import io.netty.util.concurrent.RejectedExecutionHandler; import io.netty.util.concurrent.RejectedExecutionHandlers; @@ -134,6 +135,28 @@ public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooser super(nThreads, executor, chooserFactory, 0, selectStrategyFactory, rejectedExecutionHandler, queueFactory); } + /** + * @param nThreads the number of threads that will be used by this instance. + * @param executor the Executor to use, or {@code null} if default one should be used. + * @param chooserFactory the {@link EventExecutorChooserFactory} to use. + * @param selectStrategyFactory the {@link SelectStrategyFactory} to use. + * @param rejectedExecutionHandler the {@link RejectedExecutionHandler} to use. + * @param taskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#execute(Runnable)}, + * or {@code null} if default one should be used. + * @param tailTaskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#executeAfterEventLoopIteration(Runnable)}, + * or {@code null} if default one should be used. + */ + public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory, + SelectStrategyFactory selectStrategyFactory, + RejectedExecutionHandler rejectedExecutionHandler, + EventLoopTaskQueueFactory taskQueueFactory, + EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(nThreads, executor, chooserFactory, 0, selectStrategyFactory, rejectedExecutionHandler, taskQueueFactory, + tailTaskQueueFactory); + } + /** * @deprecated This method will be removed in future releases, and is not guaranteed to have any impacts. */ @@ -146,9 +169,21 @@ public void setIoRatio(int ioRatio) { @Override protected EventLoop newChild(Executor executor, Object... args) throws Exception { - EventLoopTaskQueueFactory queueFactory = args.length == 4 ? (EventLoopTaskQueueFactory) args[3] : null; - return new EpollEventLoop(this, executor, (Integer) args[0], - ((SelectStrategyFactory) args[1]).newSelectStrategy(), - (RejectedExecutionHandler) args[2], queueFactory); + Integer maxEvents = (Integer) args[0]; + SelectStrategyFactory selectStrategyFactory = (SelectStrategyFactory) args[1]; + RejectedExecutionHandler rejectedExecutionHandler = (RejectedExecutionHandler) args[2]; + EventLoopTaskQueueFactory taskQueueFactory = null; + EventLoopTaskQueueFactory tailTaskQueueFactory = null; + + int argsLength = args.length; + if (argsLength > 3) { + taskQueueFactory = (EventLoopTaskQueueFactory) args[3]; + } + if (argsLength > 4) { + tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[4]; + } + return new EpollEventLoop(this, executor, maxEvents, + selectStrategyFactory.newSelectStrategy(), + rejectedExecutionHandler, taskQueueFactory, tailTaskQueueFactory); } } diff --git a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java index 6791768a7d2..cdd57b30ae8 100644 --- a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java +++ b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoop.java @@ -73,8 +73,8 @@ public int get() throws Exception { KQueueEventLoop(EventLoopGroup parent, Executor executor, int maxEvents, SelectStrategy strategy, RejectedExecutionHandler rejectedExecutionHandler, - EventLoopTaskQueueFactory queueFactory) { - super(parent, executor, false, newTaskQueue(queueFactory), newTaskQueue(queueFactory), + EventLoopTaskQueueFactory taskQueueFactory, EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(parent, executor, false, newTaskQueue(taskQueueFactory), newTaskQueue(tailTaskQueueFactory), rejectedExecutionHandler); this.selectStrategy = ObjectUtil.checkNotNull(strategy, "strategy"); this.kqueueFd = Native.newKQueue(); diff --git a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java index 1be9de7574d..30c726da503 100644 --- a/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java +++ b/transport-native-kqueue/src/main/java/io/netty/channel/kqueue/KQueueEventLoopGroup.java @@ -20,6 +20,7 @@ import io.netty.channel.EventLoopTaskQueueFactory; import io.netty.channel.MultithreadEventLoopGroup; import io.netty.channel.SelectStrategyFactory; +import io.netty.channel.SingleThreadEventLoop; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.EventExecutorChooserFactory; import io.netty.util.concurrent.RejectedExecutionHandler; @@ -133,6 +134,28 @@ public KQueueEventLoopGroup(int nThreads, Executor executor, EventExecutorChoose rejectedExecutionHandler, queueFactory); } + /** + * @param nThreads the number of threads that will be used by this instance. + * @param executor the Executor to use, or {@code null} if default one should be used. + * @param chooserFactory the {@link EventExecutorChooserFactory} to use. + * @param selectStrategyFactory the {@link SelectStrategyFactory} to use. + * @param rejectedExecutionHandler the {@link RejectedExecutionHandler} to use. + * @param taskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#execute(Runnable)}, + * or {@code null} if default one should be used. + * @param tailTaskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#executeAfterEventLoopIteration(Runnable)}, + * or {@code null} if default one should be used. + */ + public KQueueEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory, + SelectStrategyFactory selectStrategyFactory, + RejectedExecutionHandler rejectedExecutionHandler, + EventLoopTaskQueueFactory taskQueueFactory, + EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(nThreads, executor, chooserFactory, 0, selectStrategyFactory, rejectedExecutionHandler, taskQueueFactory, + tailTaskQueueFactory); + } + /** * Sets the percentage of the desired amount of time spent for I/O in the child event loops. The default value is * {@code 50}, which means the event loop will try to spend the same amount of time for I/O as for non-I/O tasks. @@ -145,10 +168,21 @@ public void setIoRatio(int ioRatio) { @Override protected EventLoop newChild(Executor executor, Object... args) throws Exception { - EventLoopTaskQueueFactory queueFactory = args.length == 4 ? (EventLoopTaskQueueFactory) args[3] : null; - - return new KQueueEventLoop(this, executor, (Integer) args[0], - ((SelectStrategyFactory) args[1]).newSelectStrategy(), - (RejectedExecutionHandler) args[2], queueFactory); + Integer maxEvents = (Integer) args[0]; + SelectStrategyFactory selectStrategyFactory = (SelectStrategyFactory) args[1]; + RejectedExecutionHandler rejectedExecutionHandler = (RejectedExecutionHandler) args[2]; + EventLoopTaskQueueFactory taskQueueFactory = null; + EventLoopTaskQueueFactory tailTaskQueueFactory = null; + + int argsLength = args.length; + if (argsLength > 3) { + taskQueueFactory = (EventLoopTaskQueueFactory) args[3]; + } + if (argsLength > 4) { + tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[4]; + } + return new KQueueEventLoop(this, executor, maxEvents, + selectStrategyFactory.newSelectStrategy(), + rejectedExecutionHandler, taskQueueFactory, tailTaskQueueFactory); } } diff --git a/transport/src/main/java/io/netty/channel/nio/NioEventLoop.java b/transport/src/main/java/io/netty/channel/nio/NioEventLoop.java index 5acdff64d5f..719037d9417 100644 --- a/transport/src/main/java/io/netty/channel/nio/NioEventLoop.java +++ b/transport/src/main/java/io/netty/channel/nio/NioEventLoop.java @@ -134,8 +134,8 @@ public Void run() { NioEventLoop(NioEventLoopGroup parent, Executor executor, SelectorProvider selectorProvider, SelectStrategy strategy, RejectedExecutionHandler rejectedExecutionHandler, - EventLoopTaskQueueFactory queueFactory) { - super(parent, executor, false, newTaskQueue(queueFactory), newTaskQueue(queueFactory), + EventLoopTaskQueueFactory taskQueueFactory, EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(parent, executor, false, newTaskQueue(taskQueueFactory), newTaskQueue(tailTaskQueueFactory), rejectedExecutionHandler); this.provider = ObjectUtil.checkNotNull(selectorProvider, "selectorProvider"); this.selectStrategy = ObjectUtil.checkNotNull(strategy, "selectStrategy"); diff --git a/transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java b/transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java index 89f7b40bf1b..9854e9c03c0 100644 --- a/transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java +++ b/transport/src/main/java/io/netty/channel/nio/NioEventLoopGroup.java @@ -16,11 +16,12 @@ package io.netty.channel.nio; import io.netty.channel.Channel; -import io.netty.channel.EventLoop; import io.netty.channel.DefaultSelectStrategyFactory; +import io.netty.channel.EventLoop; import io.netty.channel.EventLoopTaskQueueFactory; import io.netty.channel.MultithreadEventLoopGroup; import io.netty.channel.SelectStrategyFactory; +import io.netty.channel.SingleThreadEventLoop; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.EventExecutorChooserFactory; import io.netty.util.concurrent.RejectedExecutionHandler; @@ -119,6 +120,30 @@ public NioEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFa rejectedExecutionHandler, taskQueueFactory); } + /** + * @param nThreads the number of threads that will be used by this instance. + * @param executor the Executor to use, or {@code null} if default one should be used. + * @param chooserFactory the {@link EventExecutorChooserFactory} to use. + * @param selectorProvider the {@link SelectorProvider} to use. + * @param selectStrategyFactory the {@link SelectStrategyFactory} to use. + * @param rejectedExecutionHandler the {@link RejectedExecutionHandler} to use. + * @param taskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#execute(Runnable)}, + * or {@code null} if default one should be used. + * @param tailTaskQueueFactory the {@link EventLoopTaskQueueFactory} to use for + * {@link SingleThreadEventLoop#executeAfterEventLoopIteration(Runnable)}, + * or {@code null} if default one should be used. + */ + public NioEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory, + SelectorProvider selectorProvider, + SelectStrategyFactory selectStrategyFactory, + RejectedExecutionHandler rejectedExecutionHandler, + EventLoopTaskQueueFactory taskQueueFactory, + EventLoopTaskQueueFactory tailTaskQueueFactory) { + super(nThreads, executor, chooserFactory, selectorProvider, selectStrategyFactory, + rejectedExecutionHandler, taskQueueFactory, tailTaskQueueFactory); + } + /** * Sets the percentage of the desired amount of time spent for I/O in the child event loops. The default value is * {@code 50}, which means the event loop will try to spend the same amount of time for I/O as for non-I/O tasks. @@ -141,8 +166,21 @@ public void rebuildSelectors() { @Override protected EventLoop newChild(Executor executor, Object... args) throws Exception { - EventLoopTaskQueueFactory queueFactory = args.length == 4 ? (EventLoopTaskQueueFactory) args[3] : null; - return new NioEventLoop(this, executor, (SelectorProvider) args[0], - ((SelectStrategyFactory) args[1]).newSelectStrategy(), (RejectedExecutionHandler) args[2], queueFactory); + SelectorProvider selectorProvider = (SelectorProvider) args[0]; + SelectStrategyFactory selectStrategyFactory = (SelectStrategyFactory) args[1]; + RejectedExecutionHandler rejectedExecutionHandler = (RejectedExecutionHandler) args[2]; + EventLoopTaskQueueFactory taskQueueFactory = null; + EventLoopTaskQueueFactory tailTaskQueueFactory = null; + + int argsLength = args.length; + if (argsLength > 3) { + taskQueueFactory = (EventLoopTaskQueueFactory) args[3]; + } + if (argsLength > 4) { + tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[4]; + } + return new NioEventLoop(this, executor, selectorProvider, + selectStrategyFactory.newSelectStrategy(), + rejectedExecutionHandler, taskQueueFactory, tailTaskQueueFactory); } }
diff --git a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java index 02e55fd6aaa..f3d42ae3f24 100644 --- a/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java +++ b/transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java @@ -15,13 +15,13 @@ */ package io.netty.channel.epoll; +import io.netty.testsuite.transport.AbstractSingleThreadEventLoopTest; import io.netty.channel.DefaultSelectStrategyFactory; import io.netty.channel.EventLoop; import io.netty.channel.EventLoopGroup; import io.netty.channel.ServerChannel; import io.netty.channel.socket.ServerSocketChannel; import io.netty.channel.unix.FileDescriptor; -import io.netty.testsuite.transport.AbstractSingleThreadEventLoopTest; import io.netty.util.concurrent.DefaultThreadFactory; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.RejectedExecutionHandlers; @@ -61,7 +61,8 @@ public void testScheduleBigDelayNotOverflow() { final EventLoopGroup group = new EpollEventLoop(null, new ThreadPerTaskExecutor(new DefaultThreadFactory(getClass())), 0, - DefaultSelectStrategyFactory.INSTANCE.newSelectStrategy(), RejectedExecutionHandlers.reject(), null) { + DefaultSelectStrategyFactory.INSTANCE.newSelectStrategy(), RejectedExecutionHandlers.reject(), + null, null) { @Override void handleLoopException(Throwable t) { capture.set(t);
train
test
"2021-06-30T16:17:57"
"2021-06-15T18:22:16Z"
mostroverkhov
val
netty/netty/11398_11404
netty/netty
netty/netty/11398
netty/netty/11404
[ "keyword_pr_to_issue" ]
0c9a86db812573fbdc9f39b4ec4f29188248ff3a
956e1da2adb597164ff4a69aa19f97e0d7dde0be
[ "@dapengzhang0 ouch... that slipped through. Let me take care. ", "@pmlopes @vietj any idea on how to remove the usage of the annotations and just do everything with config files etc ?", "Also @trustin ", "@normanmaurer Oracle changed the artifact coordinates of the artifacts to https://search.maven.org/artifact/org.graalvm.nativeimage/svm/21.1.0/jar it seems these ones are licensed under GPL2 + Classpath Exception. \r\n\r\nAnd the sdk one is licensed under the Universal Permissive Licence 1.0.\r\n\r\nI'm not sure which one would be best. The annotations are only needed for substitution classes, the remaining configuration can be done using property files only.\r\n\r\nPerhaps a new review of which substitutions are in use should be done and we could try to see if they're still needed or we can achieve the same with just property config.", "@pmlopes I have basically 0 % knowledge of graal / svm so I am a bit lost what to do... I think if we cant figure stuff out we may just need to remove the whole substitution stuff.\r\n\r\n\r\nThat said I was also wondering if it is a license problem at all as it is a \"provided\" dependency and so 100 % optional, but I am not a lawyer so 🤷 ", "So, I had a look at the usages and if we were to apply the patch:\r\n\r\n```patch\r\ndiff --git a/common/pom.xml b/common/pom.xml\r\nindex 8c9fd060b6..f812999b52 100644\r\n--- a/common/pom.xml\r\n+++ b/common/pom.xml\r\n@@ -39,7 +39,7 @@\r\n \r\n <dependencies>\r\n <dependency>\r\n- <groupId>com.oracle.substratevm</groupId>\r\n+ <groupId>org.graalvm.nativeimage</groupId>\r\n <artifactId>svm</artifactId>\r\n <version>${graalvm.version}</version>\r\n <!-- Provided scope as it is only needed for compiling the SVM substitution classes -->\r\ndiff --git a/pom.xml b/pom.xml\r\nindex 6a13ce1f6a..bc6118e9f2 100644\r\n--- a/pom.xml\r\n+++ b/pom.xml\r\n@@ -495,7 +495,7 @@\r\n <skipAutobahnTestsuite>false</skipAutobahnTestsuite>\r\n <skipHttp2Testsuite>false</skipHttp2Testsuite>\r\n <skipJapicmp>false</skipJapicmp>\r\n- <graalvm.version>19.2.1</graalvm.version>\r\n+ <graalvm.version>19.3.6</graalvm.version>\r\n <brotli4j.version>1.4.2</brotli4j.version>\r\n <!-- By default skip native testsuite as it requires a custom environment with graalvm installed -->\r\n <skipNativeImageTestsuite>true</skipNativeImageTestsuite>\r\ndiff --git a/testsuite-native-image-client/pom.xml b/testsuite-native-image-client/pom.xml\r\nindex 3aeee97540..bc752c24a7 100644\r\n--- a/testsuite-native-image-client/pom.xml\r\n+++ b/testsuite-native-image-client/pom.xml\r\n@@ -65,7 +65,7 @@\r\n <build>\r\n <plugins>\r\n <plugin>\r\n- <groupId>com.oracle.substratevm</groupId>\r\n+ <groupId>org.graalvm.nativeimage</groupId>\r\n <artifactId>native-image-maven-plugin</artifactId>\r\n <version>${graalvm.version}</version>\r\n <executions>\r\n```\r\n\r\nThis would make the change the license from GPL2 to GPL2+CE http://openjdk.java.net/legal/gplv2+ce.html (which by the way is the same as the OpenJDK, but I make your words, mine too, I'm not a lawyer).\r\n\r\nAbout going the extra mile and get rid of the annotations, it would be possible but it means some (maybe large) refactoring in `netty-common`, the affected classes are:\r\n\r\n* NetUtil\r\n* CleanerJava6\r\n* PlatformDependent0\r\n* PlatformDependent\r\n* UnsafeRefArrayAccess (shaded jctools)\r\n", "@pmlopes this sounds like all we need (I think)... Can you open a PR for it so we get the attribution right ?" ]
[]
"2021-06-21T09:30:18Z"
[]
Netty depends on GraalVM which is under GPL license
### Netty version 4.1.63.Final Because Netty started to [depend](https://github.com/netty/netty/blob/netty-4.1.63.Final/common/pom.xml#L42) on [GraalVM](https://search.maven.org/artifact/com.oracle.substratevm/svm/19.2.1/jar) under GPL license, although only annotations are used, we can not update netty version in our internal repository with exactly the same source code and its dependency's source code. Is it possible not to use those annotations in the source code?
[ "common/pom.xml", "pom.xml" ]
[ "common/pom.xml", "pom.xml" ]
[ "testsuite-native-image-client-runtime-init/pom.xml", "testsuite-native-image-client/pom.xml", "testsuite-native-image/pom.xml" ]
diff --git a/common/pom.xml b/common/pom.xml index 8c9fd060b63..f812999b52f 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -39,7 +39,7 @@ <dependencies> <dependency> - <groupId>com.oracle.substratevm</groupId> + <groupId>org.graalvm.nativeimage</groupId> <artifactId>svm</artifactId> <version>${graalvm.version}</version> <!-- Provided scope as it is only needed for compiling the SVM substitution classes --> diff --git a/pom.xml b/pom.xml index 6a13ce1f6a7..bc6118e9f26 100644 --- a/pom.xml +++ b/pom.xml @@ -495,7 +495,7 @@ <skipAutobahnTestsuite>false</skipAutobahnTestsuite> <skipHttp2Testsuite>false</skipHttp2Testsuite> <skipJapicmp>false</skipJapicmp> - <graalvm.version>19.2.1</graalvm.version> + <graalvm.version>19.3.6</graalvm.version> <brotli4j.version>1.4.2</brotli4j.version> <!-- By default skip native testsuite as it requires a custom environment with graalvm installed --> <skipNativeImageTestsuite>true</skipNativeImageTestsuite>
diff --git a/testsuite-native-image-client-runtime-init/pom.xml b/testsuite-native-image-client-runtime-init/pom.xml index aaf9bf623f8..c29f9e2e116 100644 --- a/testsuite-native-image-client-runtime-init/pom.xml +++ b/testsuite-native-image-client-runtime-init/pom.xml @@ -59,7 +59,7 @@ <build> <plugins> <plugin> - <groupId>com.oracle.substratevm</groupId> + <groupId>org.graalvm.nativeimage</groupId> <artifactId>native-image-maven-plugin</artifactId> <version>${graalvm.version}</version> <executions> diff --git a/testsuite-native-image-client/pom.xml b/testsuite-native-image-client/pom.xml index 3aeee97540e..bc752c24a7f 100644 --- a/testsuite-native-image-client/pom.xml +++ b/testsuite-native-image-client/pom.xml @@ -65,7 +65,7 @@ <build> <plugins> <plugin> - <groupId>com.oracle.substratevm</groupId> + <groupId>org.graalvm.nativeimage</groupId> <artifactId>native-image-maven-plugin</artifactId> <version>${graalvm.version}</version> <executions> diff --git a/testsuite-native-image/pom.xml b/testsuite-native-image/pom.xml index 57b694b792a..3647de6923d 100644 --- a/testsuite-native-image/pom.xml +++ b/testsuite-native-image/pom.xml @@ -80,7 +80,7 @@ <build> <plugins> <plugin> - <groupId>com.oracle.substratevm</groupId> + <groupId>org.graalvm.nativeimage</groupId> <artifactId>native-image-maven-plugin</artifactId> <version>${graalvm.version}</version> <executions>
train
test
"2021-06-21T08:54:58"
"2021-06-17T23:03:21Z"
dapengzhang0
val
netty/netty/11393_11457
netty/netty
netty/netty/11393
netty/netty/11457
[ "keyword_pr_to_issue" ]
0b2e955aff1555616e0d5b1d5047f1737347bdfe
ede7a604f185cd716032ecbb356b6ea5130f7d0d
[ "Enum sounds like a good idea. \r\n\r\n@normanmaurer WDYT? I can do a PR.", "Following the pattern of `ApplicationProtocolNames`[1] will keep it consistent, i.e. define a class containing the constants for the names instead of a java enum.\r\n\r\n[1] https://github.com/netty/netty/blob/4.1/handler/src/main/java/io/netty/handler/ssl/ApplicationProtocolNames.java", "@NiteshKant Sounds a good idea." ]
[ "I think we should add `@Deprecated` annotations on the protocols that aren't TLS v1.2 or TLS v1.3. Use of those older protocols is discouraged now a days, as far as I understand.", "Are there any of these that should be discouraged?", "Cipher comes below SSL/TLS protocols. If we deprecate them then ciphers will fall under the same category. So marking ciphers `@Deprecated` is not needed.", "@normanmaurer This looks like a very good practice. WDYT?", "Okay, sounds good.", "Yep... Also add a `@deprecated` javadoc tag and tell why it is deprecated " ]
"2021-07-06T15:32:36Z"
[]
No constant for ssl protocol selection
This is a minor issue but one that I think would be easy to fix. Please educate me if there's a complexity or nuance that I'm not seeing. Netty lets the user choose the tls version. ``` clientSslContext = SslContextBuilder .forClient() .protocols("TSLv1.3") ... .build(); ``` There's no constant/enum defined for this that I can find in netty. Should there be a public string/enum for customers to use when making this selection? The only definition I can find is this constant but it is package private - https://github.com/netty/netty/blob/a98c60283b38830607780e039c7dd52715ec5814/handler/src/main/java/io/netty/handler/ssl/SslUtils.java#L66
[ "handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java", "handler/src/main/java/io/netty/handler/ssl/OpenSsl.java", "handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java", "handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java", "handler/src/main/java/io/netty/handler/ssl/SslUtils.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/Ciphers.java", "handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java", "handler/src/main/java/io/netty/handler/ssl/OpenSsl.java", "handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java", "handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java", "handler/src/main/java/io/netty/handler/ssl/SslProtocols.java", "handler/src/main/java/io/netty/handler/ssl/SslUtils.java" ]
[ "handler/src/test/java/io/netty/handler/ssl/CipherSuiteCanaryTest.java", "handler/src/test/java/io/netty/handler/ssl/CloseNotifyTest.java", "handler/src/test/java/io/netty/handler/ssl/DelegatingSslContextTest.java", "handler/src/test/java/io/netty/handler/ssl/JdkSslEngineTest.java", "handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java", "handler/src/test/java/io/netty/handler/ssl/OpenSslPrivateKeyMethodTest.java", "handler/src/test/java/io/netty/handler/ssl/OpenSslServerContextTest.java", "handler/src/test/java/io/netty/handler/ssl/ParameterizedSslHandlerTest.java", "handler/src/test/java/io/netty/handler/ssl/RenegotiateTest.java", "handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java", "handler/src/test/java/io/netty/handler/ssl/SniHandlerTest.java", "handler/src/test/java/io/netty/handler/ssl/SslHandlerTest.java" ]
diff --git a/handler/src/main/java/io/netty/handler/ssl/Ciphers.java b/handler/src/main/java/io/netty/handler/ssl/Ciphers.java new file mode 100644 index 00000000000..caa390a447e --- /dev/null +++ b/handler/src/main/java/io/netty/handler/ssl/Ciphers.java @@ -0,0 +1,758 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.ssl; + +import io.netty.util.internal.ObjectUtil; + +import java.util.Arrays; + +/** + * Cipher suites + */ +public final class Ciphers { + + /** + * TLS_AES_256_GCM_SHA384 + */ + public static final String TLS_AES_256_GCM_SHA384 = "TLS_AES_256_GCM_SHA384"; + + /** + * TLS_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_CHACHA20_POLY1305_SHA256 = "TLS_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_AES_128_GCM_SHA256 + */ + public static final String TLS_AES_128_GCM_SHA256 = "TLS_AES_128_GCM_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM8 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM8 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM8"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_CCM"; + + /** + * TLS_DHE_RSA_WITH_AES_256_CBC_CCM8 + */ + public static final String TLS_DHE_RSA_WITH_AES_256_CBC_CCM8 = "TLS_DHE_RSA_WITH_AES_256_CBC_CCM8"; + + /** + * TLS_DHE_RSA_WITH_AES_256_CBC_CCM + */ + public static final String TLS_DHE_RSA_WITH_AES_256_CBC_CCM = "TLS_DHE_RSA_WITH_AES_256_CBC_CCM"; + + /** + * TLS_ECDHE_ECDSA_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_ECDHE_ECDSA_WITH_ARIA256_GCM_SHA384 = "TLS_ECDHE_ECDSA_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_RSA_WITH_ECDHE_ARIA256_GCM_SHA384 + */ + public static final String TLS_RSA_WITH_ECDHE_ARIA256_GCM_SHA384 = "TLS_RSA_WITH_ECDHE_ARIA256_GCM_SHA384"; + + /** + * TLS_DHE_DSS_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_DHE_DSS_WITH_ARIA256_GCM_SHA384 = "TLS_DHE_DSS_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_DHE_RSA_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_DHE_RSA_WITH_ARIA256_GCM_SHA384 = "TLS_DHE_RSA_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_DH_anon_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_DH_anon_WITH_AES_256_GCM_SHA384 = "TLS_DH_anon_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM8 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM8 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM8"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_CCM"; + + /** + * TLS_DHE_RSA_WITH_AES_128_CBC_CCM8 + */ + public static final String TLS_DHE_RSA_WITH_AES_128_CBC_CCM8 = "TLS_DHE_RSA_WITH_AES_128_CBC_CCM8"; + + /** + * TLS_DHE_RSA_WITH_AES_128_CBC_CCM + */ + public static final String TLS_DHE_RSA_WITH_AES_128_CBC_CCM = "TLS_DHE_RSA_WITH_AES_128_CBC_CCM"; + + /** + * TLS_ECDHE_ECDSA_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_ECDHE_ECDSA_WITH_ARIA128_GCM_SHA256 = "TLS_ECDHE_ECDSA_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_RSA_WITH_ECDHE_ARIA128_GCM_SHA256 + */ + public static final String TLS_RSA_WITH_ECDHE_ARIA128_GCM_SHA256 = "TLS_RSA_WITH_ECDHE_ARIA128_GCM_SHA256"; + + /** + * TLS_DHE_DSS_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_ARIA128_GCM_SHA256 = "TLS_DHE_DSS_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_DHE_RSA_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_ARIA128_GCM_SHA256 = "TLS_DHE_RSA_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_DH_anon_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_DH_anon_WITH_AES_128_GCM_SHA256 = "TLS_DH_anon_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"; + + /** + * TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_ECDHE_ECDSA_WITH_CAMELLIA256_SHA384 = "TLS_ECDHE_ECDSA_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_ECDHE_RSA_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_ECDHE_RSA_WITH_CAMELLIA256_SHA384 = "TLS_ECDHE_RSA_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_DHE_RSA_WITH_CAMELLIA256_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_CAMELLIA256_SHA256 = "TLS_DHE_RSA_WITH_CAMELLIA256_SHA256"; + + /** + * TLS_DHE_DSS_WITH_CAMELLIA256_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_CAMELLIA256_SHA256 = "TLS_DHE_DSS_WITH_CAMELLIA256_SHA256"; + + /** + * TLS_DH_anon_WITH_AES_256_CBC_SHA256 + */ + public static final String TLS_DH_anon_WITH_AES_256_CBC_SHA256 = "TLS_DH_anon_WITH_AES_256_CBC_SHA256"; + + /** + * TLS_DH_anon_WITH_CAMELLIA256_SHA256 + */ + public static final String TLS_DH_anon_WITH_CAMELLIA256_SHA256 = "TLS_DH_anon_WITH_CAMELLIA256_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_ECDHE_ECDSA_WITH_CAMELLIA128_SHA256 = "TLS_ECDHE_ECDSA_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_ECDHE_RSA_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_ECDHE_RSA_WITH_CAMELLIA128_SHA256 = "TLS_ECDHE_RSA_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_DHE_RSA_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_DHE_RSA_WITH_CAMELLIA128_SHA256 = "TLS_DHE_RSA_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_DHE_DSS_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_DHE_DSS_WITH_CAMELLIA128_SHA256 = "TLS_DHE_DSS_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_DH_anon_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_DH_anon_WITH_AES_128_CBC_SHA256 = "TLS_DH_anon_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_DH_anon_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_DH_anon_WITH_CAMELLIA128_SHA256 = "TLS_DH_anon_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"; + + /** + * TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + */ + public static final String TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DHE_RSA_WITH_AES_256_CBC_SHA + */ + public static final String TLS_DHE_RSA_WITH_AES_256_CBC_SHA = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DHE_DSS_WITH_AES_256_CBC_SHA + */ + public static final String TLS_DHE_DSS_WITH_AES_256_CBC_SHA = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DHE_RSA_WITH_CAMELLIA256_SHA + */ + public static final String TLS_DHE_RSA_WITH_CAMELLIA256_SHA = "TLS_DHE_RSA_WITH_CAMELLIA256_SHA"; + + /** + * TLS_DHE_DSS_WITH_CAMELLIA256_SHA + */ + public static final String TLS_DHE_DSS_WITH_CAMELLIA256_SHA = "TLS_DHE_DSS_WITH_CAMELLIA256_SHA"; + + /** + * TLS_ECDH_anon_WITH_AES_256_CBC_SHA + */ + public static final String TLS_ECDH_anon_WITH_AES_256_CBC_SHA = "TLS_ECDH_anon_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DH_anon_WITH_AES_256_CBC_SHA + */ + public static final String TLS_DH_anon_WITH_AES_256_CBC_SHA = "TLS_DH_anon_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DH_anon_WITH_CAMELLIA256_SHA + */ + public static final String TLS_DH_anon_WITH_CAMELLIA256_SHA = "TLS_DH_anon_WITH_CAMELLIA256_SHA"; + + /** + * TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + */ + public static final String TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"; + + /** + * TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + */ + public static final String TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DHE_RSA_WITH_AES_128_CBC_SHA + */ + public static final String TLS_DHE_RSA_WITH_AES_128_CBC_SHA = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DHE_DSS_WITH_AES_128_CBC_SHA + */ + public static final String TLS_DHE_DSS_WITH_AES_128_CBC_SHA = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DHE_RSA_WITH_SEED_SHA + */ + public static final String TLS_DHE_RSA_WITH_SEED_SHA = "TLS_DHE_RSA_WITH_SEED_SHA"; + + /** + * TLS_DHE_DSS_WITH_SEED_SHA + */ + public static final String TLS_DHE_DSS_WITH_SEED_SHA = "TLS_DHE_DSS_WITH_SEED_SHA"; + + /** + * TLS_DHE_RSA_WITH_CAMELLIA128_SHA + */ + public static final String TLS_DHE_RSA_WITH_CAMELLIA128_SHA = "TLS_DHE_RSA_WITH_CAMELLIA128_SHA"; + + /** + * TLS_DHE_DSS_WITH_CAMELLIA128_SHA + */ + public static final String TLS_DHE_DSS_WITH_CAMELLIA128_SHA = "TLS_DHE_DSS_WITH_CAMELLIA128_SHA"; + + /** + * TLS_ECDH_anon_WITH_AES_128_CBC_SHA + */ + public static final String TLS_ECDH_anon_WITH_AES_128_CBC_SHA = "TLS_ECDH_anon_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DH_anon_WITH_AES_128_CBC_SHA + */ + public static final String TLS_DH_anon_WITH_AES_128_CBC_SHA = "TLS_DH_anon_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DH_anon_WITH_SEED_SHA + */ + public static final String TLS_DH_anon_WITH_SEED_SHA = "TLS_DH_anon_WITH_SEED_SHA"; + + /** + * TLS_DH_anon_WITH_CAMELLIA128_SHA + */ + public static final String TLS_DH_anon_WITH_CAMELLIA128_SHA = "TLS_DH_anon_WITH_CAMELLIA128_SHA"; + + /** + * TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = + "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_DHE_PSK_WITH_AES_256_CBC_CCM8 + */ + public static final String TLS_DHE_PSK_WITH_AES_256_CBC_CCM8 = "TLS_DHE_PSK_WITH_AES_256_CBC_CCM8"; + + /** + * TLS_DHE_PSK_WITH_AES_256_CBC_CCM + */ + public static final String TLS_DHE_PSK_WITH_AES_256_CBC_CCM = "TLS_DHE_PSK_WITH_AES_256_CBC_CCM"; + + /** + * TLS_RSA_PSK_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_RSA_PSK_WITH_ARIA256_GCM_SHA384 = "TLS_RSA_PSK_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_DHE_PSK_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_DHE_PSK_WITH_ARIA256_GCM_SHA384 = "TLS_DHE_PSK_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_RSA_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_RSA_WITH_AES_256_GCM_SHA384 = "TLS_RSA_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_RSA_WITH_AES_256_CBC_CCM8 + */ + public static final String TLS_RSA_WITH_AES_256_CBC_CCM8 = "TLS_RSA_WITH_AES_256_CBC_CCM8"; + + /** + * TLS_RSA_WITH_AES_256_CBC_CCM + */ + public static final String TLS_RSA_WITH_AES_256_CBC_CCM = "TLS_RSA_WITH_AES_256_CBC_CCM"; + + /** + * TLS_RSA_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_RSA_WITH_ARIA256_GCM_SHA384 = "TLS_RSA_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_PSK_WITH_AES_256_GCM_SHA384 + */ + public static final String TLS_PSK_WITH_AES_256_GCM_SHA384 = "TLS_PSK_WITH_AES_256_GCM_SHA384"; + + /** + * TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 + */ + public static final String TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256"; + + /** + * TLS_PSK_WITH_AES_256_CBC_CCM8 + */ + public static final String TLS_PSK_WITH_AES_256_CBC_CCM8 = "TLS_PSK_WITH_AES_256_CBC_CCM8"; + + /** + * TLS_PSK_WITH_AES_256_CBC_CCM + */ + public static final String TLS_PSK_WITH_AES_256_CBC_CCM = "TLS_PSK_WITH_AES_256_CBC_CCM"; + + /** + * TLS_PSK_WITH_ARIA256_GCM_SHA384 + */ + public static final String TLS_PSK_WITH_ARIA256_GCM_SHA384 = "TLS_PSK_WITH_ARIA256_GCM_SHA384"; + + /** + * TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_DHE_PSK_WITH_AES_128_CBC_CCM8 + */ + public static final String TLS_DHE_PSK_WITH_AES_128_CBC_CCM8 = "TLS_DHE_PSK_WITH_AES_128_CBC_CCM8"; + + /** + * TLS_DHE_PSK_WITH_AES_128_CBC_CCM + */ + public static final String TLS_DHE_PSK_WITH_AES_128_CBC_CCM = "TLS_DHE_PSK_WITH_AES_128_CBC_CCM"; + + /** + * TLS_RSA_PSK_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_RSA_PSK_WITH_ARIA128_GCM_SHA256 = "TLS_RSA_PSK_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_DHE_PSK_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_DHE_PSK_WITH_ARIA128_GCM_SHA256 = "TLS_DHE_PSK_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_RSA_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_RSA_WITH_AES_128_GCM_SHA256 = "TLS_RSA_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_RSA_WITH_AES_128_CBC_CCM8 + */ + public static final String TLS_RSA_WITH_AES_128_CBC_CCM8 = "TLS_RSA_WITH_AES_128_CBC_CCM8"; + + /** + * TLS_RSA_WITH_AES_128_CBC_CCM + */ + public static final String TLS_RSA_WITH_AES_128_CBC_CCM = "TLS_RSA_WITH_AES_128_CBC_CCM"; + + /** + * TLS_RSA_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_RSA_WITH_ARIA128_GCM_SHA256 = "TLS_RSA_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_PSK_WITH_AES_128_GCM_SHA256 + */ + public static final String TLS_PSK_WITH_AES_128_GCM_SHA256 = "TLS_PSK_WITH_AES_128_GCM_SHA256"; + + /** + * TLS_PSK_WITH_AES_128_CBC_CCM8 + */ + public static final String TLS_PSK_WITH_AES_128_CBC_CCM8 = "TLS_PSK_WITH_AES_128_CBC_CCM8"; + + /** + * TLS_PSK_WITH_AES_128_CBC_CCM + */ + public static final String TLS_PSK_WITH_AES_128_CBC_CCM = "TLS_PSK_WITH_AES_128_CBC_CCM"; + + /** + * TLS_PSK_WITH_ARIA128_GCM_SHA256 + */ + public static final String TLS_PSK_WITH_ARIA128_GCM_SHA256 = "TLS_PSK_WITH_ARIA128_GCM_SHA256"; + + /** + * TLS_RSA_WITH_AES_256_CBC_SHA256 + */ + public static final String TLS_RSA_WITH_AES_256_CBC_SHA256 = "TLS_RSA_WITH_AES_256_CBC_SHA256"; + + /** + * TLS_RSA_WITH_CAMELLIA256_SHA256 + */ + public static final String TLS_RSA_WITH_CAMELLIA256_SHA256 = "TLS_RSA_WITH_CAMELLIA256_SHA256"; + + /** + * TLS_RSA_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_RSA_WITH_AES_128_CBC_SHA256 = "TLS_RSA_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_RSA_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_RSA_WITH_CAMELLIA128_SHA256 = "TLS_RSA_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA + */ + public static final String TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA"; + + /** + * TLS_SRP_DSS_WITH_AES_256_CBC_SHA + */ + public static final String TLS_SRP_DSS_WITH_AES_256_CBC_SHA = "TLS_SRP_DSS_WITH_AES_256_CBC_SHA"; + + /** + * TLS_SRP_RSA_WITH_AES_256_CBC_SHA + */ + public static final String TLS_SRP_RSA_WITH_AES_256_CBC_SHA = "TLS_SRP_RSA_WITH_AES_256_CBC_SHA"; + + /** + * TLS_SRP_WITH_AES_256_CBC_SHA + */ + public static final String TLS_SRP_WITH_AES_256_CBC_SHA = "TLS_SRP_WITH_AES_256_CBC_SHA"; + + /** + * TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_RSA_PSK_WITH_AES_256_CBC_SHA + */ + public static final String TLS_RSA_PSK_WITH_AES_256_CBC_SHA = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA"; + + /** + * TLS_DHE_PSK_WITH_AES_256_CBC_SHA + */ + public static final String TLS_DHE_PSK_WITH_AES_256_CBC_SHA = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA"; + + /** + * TLS_ECDHE_PSK_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_ECDHE_PSK_WITH_CAMELLIA256_SHA384 = "TLS_ECDHE_PSK_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_RSA_PSK_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_RSA_PSK_WITH_CAMELLIA256_SHA384 = "TLS_RSA_PSK_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_DHE_PSK_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_DHE_PSK_WITH_CAMELLIA256_SHA384 = "TLS_DHE_PSK_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_RSA_WITH_AES_256_CBC_SHA + */ + public static final String TLS_RSA_WITH_AES_256_CBC_SHA = "TLS_RSA_WITH_AES_256_CBC_SHA"; + + /** + * TLS_RSA_WITH_CAMELLIA256_SHA + */ + public static final String TLS_RSA_WITH_CAMELLIA256_SHA = "TLS_RSA_WITH_CAMELLIA256_SHA"; + + /** + * TLS_PSK_WITH_AES_256_CBC_SHA384 + */ + public static final String TLS_PSK_WITH_AES_256_CBC_SHA384 = "TLS_PSK_WITH_AES_256_CBC_SHA384"; + + /** + * TLS_PSK_WITH_AES_256_CBC_SHA + */ + public static final String TLS_PSK_WITH_AES_256_CBC_SHA = "TLS_PSK_WITH_AES_256_CBC_SHA"; + + /** + * TLS_PSK_WITH_CAMELLIA256_SHA384 + */ + public static final String TLS_PSK_WITH_CAMELLIA256_SHA384 = "TLS_PSK_WITH_CAMELLIA256_SHA384"; + + /** + * TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA + */ + public static final String TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA"; + + /** + * TLS_SRP_DSS_WITH_AES_128_CBC_SHA + */ + public static final String TLS_SRP_DSS_WITH_AES_128_CBC_SHA = "TLS_SRP_DSS_WITH_AES_128_CBC_SHA"; + + /** + * TLS_SRP_RSA_WITH_AES_128_CBC_SHA + */ + public static final String TLS_SRP_RSA_WITH_AES_128_CBC_SHA = "TLS_SRP_RSA_WITH_AES_128_CBC_SHA"; + + /** + * TLS_SRP_WITH_AES_128_CBC_SHA + */ + public static final String TLS_SRP_WITH_AES_128_CBC_SHA = "TLS_SRP_WITH_AES_128_CBC_SHA"; + + /** + * TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_RSA_PSK_WITH_AES_128_CBC_SHA + */ + public static final String TLS_RSA_PSK_WITH_AES_128_CBC_SHA = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA"; + + /** + * TLS_DHE_PSK_WITH_AES_128_CBC_SHA + */ + public static final String TLS_DHE_PSK_WITH_AES_128_CBC_SHA = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA"; + + /** + * TLS_ECDHE_PSK_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_ECDHE_PSK_WITH_CAMELLIA128_SHA256 = "TLS_ECDHE_PSK_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_RSA_PSK_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_RSA_PSK_WITH_CAMELLIA128_SHA256 = "TLS_RSA_PSK_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_DHE_PSK_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_DHE_PSK_WITH_CAMELLIA128_SHA256 = "TLS_DHE_PSK_WITH_CAMELLIA128_SHA256"; + + /** + * TLS_RSA_WITH_AES_128_CBC_SHA + */ + public static final String TLS_RSA_WITH_AES_128_CBC_SHA = "TLS_RSA_WITH_AES_128_CBC_SHA"; + + /** + * TLS_RSA_WITH_SEED_SHA + */ + public static final String TLS_RSA_WITH_SEED_SHA = "TLS_RSA_WITH_SEED_SHA"; + + /** + * TLS_RSA_WITH_CAMELLIA128_SHA + */ + public static final String TLS_RSA_WITH_CAMELLIA128_SHA = "TLS_RSA_WITH_CAMELLIA128_SHA"; + + /** + * TLS_RSA_WITH_IDEA_CBC_SHA + */ + public static final String TLS_RSA_WITH_IDEA_CBC_SHA = "TLS_RSA_WITH_IDEA_CBC_SHA"; + + /** + * TLS_PSK_WITH_AES_128_CBC_SHA256 + */ + public static final String TLS_PSK_WITH_AES_128_CBC_SHA256 = "TLS_PSK_WITH_AES_128_CBC_SHA256"; + + /** + * TLS_PSK_WITH_AES_128_CBC_SHA + */ + public static final String TLS_PSK_WITH_AES_128_CBC_SHA = "TLS_PSK_WITH_AES_128_CBC_SHA"; + + /** + * TLS_PSK_WITH_CAMELLIA128_SHA256 + */ + public static final String TLS_PSK_WITH_CAMELLIA128_SHA256 = "TLS_PSK_WITH_CAMELLIA128_SHA256"; + + private Ciphers() { + // Prevent outside initialization + } +} diff --git a/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java b/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java index b5d5e3431b4..a452924829a 100644 --- a/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java +++ b/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java @@ -107,8 +107,8 @@ private static String[] defaultProtocols(SSLContext context, SSLEngine engine) { List<String> protocols = new ArrayList<String>(); addIfSupported( supportedProtocolsSet, protocols, - SslUtils.PROTOCOL_TLS_V1_3, SslUtils.PROTOCOL_TLS_V1_2, - SslUtils.PROTOCOL_TLS_V1_1, SslUtils.PROTOCOL_TLS_V1); + SslProtocols.TLS_v1_3, SslProtocols.TLS_v1_2, + SslProtocols.TLS_v1_1, SslProtocols.TLS_v1); if (!protocols.isEmpty()) { return protocols.toArray(EmptyArrays.EMPTY_STRINGS); @@ -154,7 +154,7 @@ private static List<String> defaultCiphers(SSLEngine engine, Set<String> support private static boolean isTlsV13Supported(String[] protocols) { for (String protocol: protocols) { - if (SslUtils.PROTOCOL_TLS_V1_3.equals(protocol)) { + if (SslProtocols.TLS_v1_3.equals(protocol)) { return true; } } diff --git a/handler/src/main/java/io/netty/handler/ssl/OpenSsl.java b/handler/src/main/java/io/netty/handler/ssl/OpenSsl.java index 92f1ff7a91d..b425b36c152 100644 --- a/handler/src/main/java/io/netty/handler/ssl/OpenSsl.java +++ b/handler/src/main/java/io/netty/handler/ssl/OpenSsl.java @@ -348,26 +348,26 @@ public final class OpenSsl { Set<String> protocols = new LinkedHashSet<String>(6); // Seems like there is no way to explicitly disable SSLv2Hello in openssl so it is always enabled - protocols.add(PROTOCOL_SSL_V2_HELLO); + protocols.add(SslProtocols.SSL_v2_HELLO); if (doesSupportProtocol(SSL.SSL_PROTOCOL_SSLV2, SSL.SSL_OP_NO_SSLv2)) { - protocols.add(PROTOCOL_SSL_V2); + protocols.add(SslProtocols.SSL_v2); } if (doesSupportProtocol(SSL.SSL_PROTOCOL_SSLV3, SSL.SSL_OP_NO_SSLv3)) { - protocols.add(PROTOCOL_SSL_V3); + protocols.add(SslProtocols.SSL_v3); } if (doesSupportProtocol(SSL.SSL_PROTOCOL_TLSV1, SSL.SSL_OP_NO_TLSv1)) { - protocols.add(PROTOCOL_TLS_V1); + protocols.add(SslProtocols.TLS_v1); } if (doesSupportProtocol(SSL.SSL_PROTOCOL_TLSV1_1, SSL.SSL_OP_NO_TLSv1_1)) { - protocols.add(PROTOCOL_TLS_V1_1); + protocols.add(SslProtocols.TLS_v1_1); } if (doesSupportProtocol(SSL.SSL_PROTOCOL_TLSV1_2, SSL.SSL_OP_NO_TLSv1_2)) { - protocols.add(PROTOCOL_TLS_V1_2); + protocols.add(SslProtocols.TLS_v1_2); } // This is only supported by java11 and later. if (tlsv13Supported && doesSupportProtocol(SSL.SSL_PROTOCOL_TLSV1_3, SSL.SSL_OP_NO_TLSv1_3)) { - protocols.add(PROTOCOL_TLS_V1_3); + protocols.add(SslProtocols.TLS_v1_3); TLSV13_SUPPORTED = true; } else { TLSV13_SUPPORTED = false; diff --git a/handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java b/handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java index c55543a71d1..1b3bed6ba2a 100644 --- a/handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java +++ b/handler/src/main/java/io/netty/handler/ssl/OpenSslTlsv13X509ExtendedTrustManager.java @@ -66,7 +66,7 @@ public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Soc private static SSLEngine wrapEngine(final SSLEngine engine) { final SSLSession session = engine.getHandshakeSession(); - if (session != null && SslUtils.PROTOCOL_TLS_V1_3.equals(session.getProtocol())) { + if (session != null && SslProtocols.TLS_v1_3.equals(session.getProtocol())) { return new JdkSslEngine(engine) { @Override public String getNegotiatedApplicationProtocol() { @@ -93,7 +93,7 @@ public String[] getPeerSupportedSignatureAlgorithms() { @Override public String getProtocol() { - return SslUtils.PROTOCOL_TLS_V1_2; + return SslProtocols.TLS_v1_2; } }; } else { @@ -181,7 +181,7 @@ public String getCipherSuite() { @Override public String getProtocol() { - return SslUtils.PROTOCOL_TLS_V1_2; + return SslProtocols.TLS_v1_2; } @Override diff --git a/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java b/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java index a37a14898f5..f033e644a40 100644 --- a/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java +++ b/handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java @@ -65,13 +65,6 @@ import javax.security.cert.X509Certificate; import static io.netty.handler.ssl.OpenSsl.memoryAddress; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_SSL_V2; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_SSL_V2_HELLO; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_SSL_V3; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_1; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_2; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_3; import static io.netty.handler.ssl.SslUtils.SSL_RECORD_HEADER_LENGTH; import static io.netty.util.internal.ObjectUtil.checkNotNull; import static io.netty.util.internal.ObjectUtil.checkNotNullArrayParam; @@ -360,7 +353,7 @@ public List<byte[]> getStatusResponses() { SSL.setMode(ssl, SSL.getMode(ssl) | SSL.SSL_MODE_ENABLE_PARTIAL_WRITE); } - if (isProtocolEnabled(SSL.getOptions(ssl), SSL.SSL_OP_NO_TLSv1_3, PROTOCOL_TLS_V1_3)) { + if (isProtocolEnabled(SSL.getOptions(ssl), SSL.SSL_OP_NO_TLSv1_3, SslProtocols.TLS_v1_3)) { final boolean enableTickets = clientMode ? ReferenceCountedOpenSslContext.CLIENT_ENABLE_SESSION_TICKET_TLSV13 : ReferenceCountedOpenSslContext.SERVER_ENABLE_SESSION_TICKET_TLSV13; @@ -1368,7 +1361,7 @@ private void rejectRemoteInitiatedRenegotiation() throws SSLHandshakeException { if (!isDestroyed() && SSL.getHandshakeCount(ssl) > 1 && // As we may count multiple handshakes when TLSv1.3 is used we should just ignore this here as // renegotiation is not supported in TLSv1.3 as per spec. - !SslUtils.PROTOCOL_TLS_V1_3.equals(session.getProtocol()) && handshakeState == HandshakeState.FINISHED) { + !SslProtocols.TLS_v1_3.equals(session.getProtocol()) && handshakeState == HandshakeState.FINISHED) { // TODO: In future versions me may also want to send a fatal_alert to the client and so notify it // that the renegotiation failed. shutdown(); @@ -1556,7 +1549,7 @@ public final String[] getEnabledCipherSuites() { if (!isDestroyed()) { enabled = SSL.getCiphers(ssl); int opts = SSL.getOptions(ssl); - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_3, PROTOCOL_TLS_V1_3)) { + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_3, SslProtocols.TLS_v1_3)) { extraCiphers = OpenSsl.EXTRA_SUPPORTED_TLS_1_3_CIPHERS; tls13Enabled = true; } else { @@ -1618,16 +1611,16 @@ public final void setEnabledCipherSuites(String[] cipherSuites) { // We have no ciphers that are compatible with none-TLSv1.3, let us explicit disable all other // protocols. if (cipherSuiteSpec.isEmpty()) { - protocols.remove(PROTOCOL_TLS_V1); - protocols.remove(PROTOCOL_TLS_V1_1); - protocols.remove(PROTOCOL_TLS_V1_2); - protocols.remove(PROTOCOL_SSL_V3); - protocols.remove(PROTOCOL_SSL_V2); - protocols.remove(PROTOCOL_SSL_V2_HELLO); + protocols.remove(SslProtocols.TLS_v1); + protocols.remove(SslProtocols.TLS_v1_1); + protocols.remove(SslProtocols.TLS_v1_2); + protocols.remove(SslProtocols.SSL_v3); + protocols.remove(SslProtocols.SSL_v2); + protocols.remove(SslProtocols.SSL_v2_HELLO); } // We have no ciphers that are compatible with TLSv1.3, let us explicit disable it. if (cipherSuiteSpecTLSv13.isEmpty()) { - protocols.remove(PROTOCOL_TLS_V1_3); + protocols.remove(SslProtocols.TLS_v1_3); } // Update the protocols but not cache the value. We only cache when we call it from the user // code or when we construct the engine. @@ -1650,7 +1643,7 @@ public final String[] getSupportedProtocols() { public final String[] getEnabledProtocols() { List<String> enabled = new ArrayList<String>(6); // Seems like there is no way to explicit disable SSLv2Hello in openssl so it is always enabled - enabled.add(PROTOCOL_SSL_V2_HELLO); + enabled.add(SslProtocols.SSL_v2_HELLO); int opts; synchronized (this) { @@ -1660,23 +1653,23 @@ public final String[] getEnabledProtocols() { return enabled.toArray(new String[0]); } } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1, PROTOCOL_TLS_V1)) { - enabled.add(PROTOCOL_TLS_V1); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1, SslProtocols.TLS_v1)) { + enabled.add(SslProtocols.TLS_v1); } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_1, PROTOCOL_TLS_V1_1)) { - enabled.add(PROTOCOL_TLS_V1_1); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_1, SslProtocols.TLS_v1_1)) { + enabled.add(SslProtocols.TLS_v1_1); } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_2, PROTOCOL_TLS_V1_2)) { - enabled.add(PROTOCOL_TLS_V1_2); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_2, SslProtocols.TLS_v1_2)) { + enabled.add(SslProtocols.TLS_v1_2); } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_3, PROTOCOL_TLS_V1_3)) { - enabled.add(PROTOCOL_TLS_V1_3); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_TLSv1_3, SslProtocols.TLS_v1_3)) { + enabled.add(SslProtocols.TLS_v1_3); } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_SSLv2, PROTOCOL_SSL_V2)) { - enabled.add(PROTOCOL_SSL_V2); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_SSLv2, SslProtocols.SSL_v2)) { + enabled.add(SslProtocols.SSL_v2); } - if (isProtocolEnabled(opts, SSL.SSL_OP_NO_SSLv3, PROTOCOL_SSL_V3)) { - enabled.add(PROTOCOL_SSL_V3); + if (isProtocolEnabled(opts, SSL.SSL_OP_NO_SSLv3, SslProtocols.SSL_v3)) { + enabled.add(SslProtocols.SSL_v3); } return enabled.toArray(new String[0]); } @@ -1710,42 +1703,42 @@ private void setEnabledProtocols0(String[] protocols, boolean cache) { if (!OpenSsl.SUPPORTED_PROTOCOLS_SET.contains(p)) { throw new IllegalArgumentException("Protocol " + p + " is not supported."); } - if (p.equals(PROTOCOL_SSL_V2)) { + if (p.equals(SslProtocols.SSL_v2)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2; } if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2) { // lgtm[java/constant-comparison] maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV2; } - } else if (p.equals(PROTOCOL_SSL_V3)) { + } else if (p.equals(SslProtocols.SSL_v3)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3; } if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3) { maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_SSLV3; } - } else if (p.equals(PROTOCOL_TLS_V1)) { + } else if (p.equals(SslProtocols.TLS_v1)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1; } if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1) { maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1; } - } else if (p.equals(PROTOCOL_TLS_V1_1)) { + } else if (p.equals(SslProtocols.TLS_v1_1)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1; } if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1) { maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_1; } - } else if (p.equals(PROTOCOL_TLS_V1_2)) { + } else if (p.equals(SslProtocols.TLS_v1_2)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2; } if (maxProtocolIndex < OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2) { maxProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_2; } - } else if (p.equals(PROTOCOL_TLS_V1_3)) { + } else if (p.equals(SslProtocols.TLS_v1_3)) { if (minProtocolIndex > OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3) { minProtocolIndex = OPENSSL_OP_NO_PROTOCOL_INDEX_TLSv1_3; } diff --git a/handler/src/main/java/io/netty/handler/ssl/SslProtocols.java b/handler/src/main/java/io/netty/handler/ssl/SslProtocols.java new file mode 100644 index 00000000000..c38e1cfab7c --- /dev/null +++ b/handler/src/main/java/io/netty/handler/ssl/SslProtocols.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.ssl; + +/** + * SSL/TLS protocols + */ +public final class SslProtocols { + + /** + * SSL v2 Hello + * + * @deprecated SSLv2Hello is no longer secure. Consider using {@link #TLS_v1_2} or {@link #TLS_v1_3} + */ + @Deprecated + public static final String SSL_v2_HELLO = "SSLv2Hello"; + + /** + * SSL v2 + * + * @deprecated SSLv2 is no longer secure. Consider using {@link #TLS_v1_2} or {@link #TLS_v1_3} + */ + @Deprecated + public static final String SSL_v2 = "SSLv2"; + + /** + * SSLv3 + * + * @deprecated SSLv3 is no longer secure. Consider using {@link #TLS_v1_2} or {@link #TLS_v1_3} + */ + @Deprecated + public static final String SSL_v3 = "SSLv3"; + + /** + * TLS v1 + * + * @deprecated TLSv1 is no longer secure. Consider using {@link #TLS_v1_2} or {@link #TLS_v1_3} + */ + @Deprecated + public static final String TLS_v1 = "TLSv1"; + + /** + * TLS v1.1 + * + * @deprecated TLSv1.1 is no longer secure. Consider using {@link #TLS_v1_2} or {@link #TLS_v1_3} + */ + @Deprecated + public static final String TLS_v1_1 = "TLSv1.1"; + + /** + * TLS v1.2 + */ + public static final String TLS_v1_2 = "TLSv1.2"; + + /** + * TLS v1.3 + */ + public static final String TLS_v1_3 = "TLSv1.3"; + + private SslProtocols() { + // Prevent outside initialization + } +} diff --git a/handler/src/main/java/io/netty/handler/ssl/SslUtils.java b/handler/src/main/java/io/netty/handler/ssl/SslUtils.java index 79bd2fc2579..9f3e9246305 100644 --- a/handler/src/main/java/io/netty/handler/ssl/SslUtils.java +++ b/handler/src/main/java/io/netty/handler/ssl/SslUtils.java @@ -56,14 +56,10 @@ final class SslUtils { asList("TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", "TLS_AES_128_GCM_SHA256", "TLS_AES_128_CCM_8_SHA256", "TLS_AES_128_CCM_SHA256"))); - // Protocols - static final String PROTOCOL_SSL_V2_HELLO = "SSLv2Hello"; - static final String PROTOCOL_SSL_V2 = "SSLv2"; - static final String PROTOCOL_SSL_V3 = "SSLv3"; - static final String PROTOCOL_TLS_V1 = "TLSv1"; - static final String PROTOCOL_TLS_V1_1 = "TLSv1.1"; - static final String PROTOCOL_TLS_V1_2 = "TLSv1.2"; - static final String PROTOCOL_TLS_V1_3 = "TLSv1.3"; + + /** + * GMSSL Protocol Version + */ static final int GMSSL_PROTOCOL_VERSION = 0x101; static final String INVALID_CIPHER = "SSL_NULL_WITH_NULL_NULL"; @@ -157,7 +153,7 @@ static boolean isTLSv13SupportedByJDK(Provider provider) { private static boolean isTLSv13SupportedByJDK0(Provider provider) { try { return arrayContains(newInitContext(provider) - .getSupportedSSLParameters().getProtocols(), PROTOCOL_TLS_V1_3); + .getSupportedSSLParameters().getProtocols(), SslProtocols.TLS_v1_3); } catch (Throwable cause) { logger.debug("Unable to detect if JDK SSLEngine with provider {} supports TLSv1.3, assuming no", provider, cause); @@ -178,7 +174,7 @@ static boolean isTLSv13EnabledByJDK(Provider provider) { private static boolean isTLSv13EnabledByJDK0(Provider provider) { try { return arrayContains(newInitContext(provider) - .getDefaultSSLParameters().getProtocols(), PROTOCOL_TLS_V1_3); + .getDefaultSSLParameters().getProtocols(), SslProtocols.TLS_v1_3); } catch (Throwable cause) { logger.debug("Unable to detect if JDK SSLEngine with provider {} enables TLSv1.3 by default," + " assuming no", provider, cause); @@ -211,7 +207,7 @@ static SSLContext getSSLContext(String provider) } private static String getTlsVersion() { - return TLSV1_3_JDK_SUPPORTED ? PROTOCOL_TLS_V1_3 : PROTOCOL_TLS_V1_2; + return TLSV1_3_JDK_SUPPORTED ? SslProtocols.TLS_v1_3 : SslProtocols.TLS_v1_2; } static boolean arrayContains(String[] array, String value) {
diff --git a/handler/src/test/java/io/netty/handler/ssl/CipherSuiteCanaryTest.java b/handler/src/test/java/io/netty/handler/ssl/CipherSuiteCanaryTest.java index ff491d3a734..9b7398ba5ef 100644 --- a/handler/src/test/java/io/netty/handler/ssl/CipherSuiteCanaryTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/CipherSuiteCanaryTest.java @@ -124,7 +124,7 @@ public void testHandshake(SslProvider serverSslProvider, SslProvider clientSslPr .sslProvider(serverSslProvider) .ciphers(ciphers) // As this is not a TLSv1.3 cipher we should ensure we talk something else. - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build(); final ExecutorService executorService = delegate ? Executors.newCachedThreadPool() : null; @@ -134,7 +134,7 @@ public void testHandshake(SslProvider serverSslProvider, SslProvider clientSslPr .sslProvider(clientSslProvider) .ciphers(ciphers) // As this is not a TLSv1.3 cipher we should ensure we talk something else. - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .trustManager(InsecureTrustManagerFactory.INSTANCE) .build(); diff --git a/handler/src/test/java/io/netty/handler/ssl/CloseNotifyTest.java b/handler/src/test/java/io/netty/handler/ssl/CloseNotifyTest.java index 362de40a38e..ebf080c93ac 100644 --- a/handler/src/test/java/io/netty/handler/ssl/CloseNotifyTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/CloseNotifyTest.java @@ -37,8 +37,6 @@ import static io.netty.buffer.ByteBufUtil.writeAscii; import static io.netty.buffer.Unpooled.EMPTY_BUFFER; import static io.netty.handler.codec.ByteToMessageDecoder.MERGE_CUMULATOR; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_2; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_3; import static java.nio.charset.StandardCharsets.US_ASCII; import static java.util.Arrays.asList; import static org.hamcrest.MatcherAssert.assertThat; @@ -62,10 +60,10 @@ public String toString() { static Collection<Object[]> data() { return asList(new Object[][] { - { SslProvider.JDK, PROTOCOL_TLS_V1_2 }, - { SslProvider.JDK, PROTOCOL_TLS_V1_3 }, - { SslProvider.OPENSSL, PROTOCOL_TLS_V1_2 }, - { SslProvider.OPENSSL, PROTOCOL_TLS_V1_3 }, + { SslProvider.JDK, SslProtocols.TLS_v1_2 }, + { SslProvider.JDK, SslProtocols.TLS_v1_3 }, + { SslProvider.OPENSSL, SslProtocols.TLS_v1_2 }, + { SslProvider.OPENSSL, SslProtocols.TLS_v1_3 }, }); } @@ -75,7 +73,7 @@ static Collection<Object[]> data() { public void eventsOrder(SslProvider provider, String protocol) throws Exception { assumeTrue(provider != SslProvider.OPENSSL || OpenSsl.isAvailable(), "OpenSSL is not available"); - if (PROTOCOL_TLS_V1_3.equals(protocol)) { + if (SslProtocols.TLS_v1_3.equals(protocol)) { // Ensure we support TLSv1.3 assumeTrue(SslProvider.isTlsv13Supported(provider)); } @@ -144,7 +142,7 @@ public void eventsOrder(SslProvider provider, String protocol) throws Exception } private static boolean jdkTls13(SslProvider provider, String protocol) { - return provider == SslProvider.JDK && PROTOCOL_TLS_V1_3.equals(protocol); + return provider == SslProvider.JDK && SslProtocols.TLS_v1_3.equals(protocol); } private static EmbeddedChannel initChannel(SslProvider provider, String protocol, final boolean useClientMode, diff --git a/handler/src/test/java/io/netty/handler/ssl/DelegatingSslContextTest.java b/handler/src/test/java/io/netty/handler/ssl/DelegatingSslContextTest.java index 7316420f945..56b36dee37b 100644 --- a/handler/src/test/java/io/netty/handler/ssl/DelegatingSslContextTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/DelegatingSslContextTest.java @@ -25,7 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; public class DelegatingSslContextTest { - private static final String[] EXPECTED_PROTOCOLS = { SslUtils.PROTOCOL_TLS_V1_1 }; + private static final String[] EXPECTED_PROTOCOLS = { SslProtocols.TLS_v1_1 }; @Test public void testInitEngineOnNewEngine() throws Exception { diff --git a/handler/src/test/java/io/netty/handler/ssl/JdkSslEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/JdkSslEngineTest.java index 3b44f5155c6..f91dc148ade 100644 --- a/handler/src/test/java/io/netty/handler/ssl/JdkSslEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/JdkSslEngineTest.java @@ -299,7 +299,7 @@ public void testAlpnCompatibleProtocolsDifferentClientOrder() throws Exception { @Test public void testEnablingAnAlreadyDisabledSslProtocol() throws Exception { - testEnablingAnAlreadyDisabledSslProtocol(new String[]{}, new String[]{ SslUtils.PROTOCOL_TLS_V1_2 }); + testEnablingAnAlreadyDisabledSslProtocol(new String[]{}, new String[]{ SslProtocols.TLS_v1_2 }); } @Ignore /* Does the JDK support a "max certificate chain length"? */ diff --git a/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java index cf367fd8cf3..fdabd3a850f 100644 --- a/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java @@ -58,11 +58,6 @@ import static io.netty.handler.ssl.OpenSslTestUtils.checkShouldUseKeyManagerFactory; import static io.netty.handler.ssl.ReferenceCountedOpenSslEngine.MAX_PLAINTEXT_LENGTH; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_SSL_V2_HELLO; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_SSL_V3; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_1; -import static io.netty.handler.ssl.SslUtils.PROTOCOL_TLS_V1_2; import static io.netty.internal.tcnative.SSL.SSL_CVERIFY_IGNORED; import static java.lang.Integer.MAX_VALUE; import static org.junit.Assert.assertArrayEquals; @@ -241,8 +236,8 @@ public void testAlpnCompatibleProtocolsDifferentClientOrder() throws Exception { @Test public void testEnablingAnAlreadyDisabledSslProtocol() throws Exception { - testEnablingAnAlreadyDisabledSslProtocol(new String[]{PROTOCOL_SSL_V2_HELLO}, - new String[]{PROTOCOL_SSL_V2_HELLO, PROTOCOL_TLS_V1_2}); + testEnablingAnAlreadyDisabledSslProtocol(new String[]{SslProtocols.SSL_v2_HELLO}, + new String[]{SslProtocols.SSL_v2_HELLO, SslProtocols.TLS_v1_2}); } @Test public void testWrapBuffersNoWritePendingError() throws Exception { @@ -546,24 +541,24 @@ public void testWrapWithDifferentSizesTLSv1() throws Exception { .sslProvider(sslServerProvider()) .build()); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ECDHE-RSA-AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "AECDH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ADH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "EDH-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ADH-RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "IDEA-CBC-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "AECDH-RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ECDHE-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ECDHE-RSA-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1, "ECDHE-RSA-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ECDHE-RSA-AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "AECDH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ADH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "EDH-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ADH-RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "IDEA-CBC-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "AECDH-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ECDHE-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ECDHE-RSA-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1, "ECDHE-RSA-RC4-SHA"); } @Test @@ -577,21 +572,21 @@ public void testWrapWithDifferentSizesTLSv1_1() throws Exception { .sslProvider(sslServerProvider()) .build()); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "ECDHE-RSA-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "ECDHE-RSA-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "IDEA-CBC-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "AECDH-RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "ADH-RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "ECDHE-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "EDH-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "AECDH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "ADH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_1, "DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "ECDHE-RSA-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "ECDHE-RSA-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "IDEA-CBC-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "AECDH-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "ADH-RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "ECDHE-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "EDH-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "AECDH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "ADH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_1, "DES-CBC3-SHA"); } @Test @@ -605,31 +600,31 @@ public void testWrapWithDifferentSizesTLSv1_2() throws Exception { .sslProvider(sslServerProvider()) .build()); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES128-GCM-SHA256"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES256-SHA384"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AECDH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES256-GCM-SHA384"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES256-SHA256"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES128-GCM-SHA256"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES128-SHA256"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ADH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "EDH-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ADH-RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AES128-SHA256"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "AECDH-RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES256-GCM-SHA384"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_TLS_V1_2, "ECDHE-RSA-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES128-GCM-SHA256"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES256-SHA384"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AECDH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES256-GCM-SHA384"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES256-SHA256"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES128-GCM-SHA256"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES128-SHA256"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ADH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "EDH-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ADH-RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AES128-SHA256"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "AECDH-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES256-GCM-SHA384"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.TLS_v1_2, "ECDHE-RSA-RC4-SHA"); } @Test @@ -643,31 +638,31 @@ public void testWrapWithDifferentSizesSSLv3() throws Exception { .sslProvider(sslServerProvider()) .build()); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "AECDH-AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "AECDH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "DHE-RSA-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "EDH-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-RC4-MD5"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "IDEA-CBC-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "DHE-RSA-AES128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "AECDH-RC4-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "DHE-RSA-SEED-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "AECDH-AES256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ECDHE-RSA-DES-CBC3-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ADH-CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "DHE-RSA-CAMELLIA256-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "DHE-RSA-CAMELLIA128-SHA"); - testWrapWithDifferentSizes(PROTOCOL_SSL_V3, "ECDHE-RSA-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "AECDH-AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "AECDH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "DHE-RSA-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "EDH-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-RC4-MD5"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "IDEA-CBC-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "DHE-RSA-AES128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "AECDH-RC4-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "DHE-RSA-SEED-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "AECDH-AES256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ECDHE-RSA-DES-CBC3-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ADH-CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "DHE-RSA-CAMELLIA256-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "DHE-RSA-CAMELLIA128-SHA"); + testWrapWithDifferentSizes(SslProtocols.SSL_v3, "ECDHE-RSA-RC4-SHA"); } @Test @@ -1394,22 +1389,22 @@ public void testSessionLocalWhenNonMutualWithoutKeyManager() throws Exception { @Test public void testDefaultTLS1NotAcceptedByDefaultServer() throws Exception { - testDefaultTLS1NotAcceptedByDefault(null, PROTOCOL_TLS_V1); + testDefaultTLS1NotAcceptedByDefault(null, SslProtocols.TLS_v1); } @Test public void testDefaultTLS11NotAcceptedByDefaultServer() throws Exception { - testDefaultTLS1NotAcceptedByDefault(null, PROTOCOL_TLS_V1_1); + testDefaultTLS1NotAcceptedByDefault(null, SslProtocols.TLS_v1_1); } @Test public void testDefaultTLS1NotAcceptedByDefaultClient() throws Exception { - testDefaultTLS1NotAcceptedByDefault(PROTOCOL_TLS_V1, null); + testDefaultTLS1NotAcceptedByDefault(SslProtocols.TLS_v1, null); } @Test public void testDefaultTLS11NotAcceptedByDefaultClient() throws Exception { - testDefaultTLS1NotAcceptedByDefault(PROTOCOL_TLS_V1_1, null); + testDefaultTLS1NotAcceptedByDefault(SslProtocols.TLS_v1_1, null); } private void testDefaultTLS1NotAcceptedByDefault(String clientProtocol, String serverProtocol) throws Exception { diff --git a/handler/src/test/java/io/netty/handler/ssl/OpenSslPrivateKeyMethodTest.java b/handler/src/test/java/io/netty/handler/ssl/OpenSslPrivateKeyMethodTest.java index a966e4fe9aa..3e9a327f147 100644 --- a/handler/src/test/java/io/netty/handler/ssl/OpenSslPrivateKeyMethodTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/OpenSslPrivateKeyMethodTest.java @@ -117,10 +117,10 @@ private static void assumeCipherAvailable(SslProvider provider) throws NoSuchAlg if (provider == SslProvider.JDK) { SSLEngine engine = SSLContext.getDefault().createSSLEngine(); for (String c: engine.getSupportedCipherSuites()) { - if (RFC_CIPHER_NAME.equals(c)) { - cipherSupported = true; - break; - } + if (RFC_CIPHER_NAME.equals(c)) { + cipherSupported = true; + break; + } } } else { cipherSupported = OpenSsl.isCipherSuiteAvailable(RFC_CIPHER_NAME); @@ -141,11 +141,11 @@ private SslContext buildServerContext(OpenSslPrivateKeyMethod method) throws Exc final KeyManagerFactory kmf = OpenSslX509KeyManagerFactory.newKeyless(CERT.cert()); - return SslContextBuilder.forServer(kmf) + return SslContextBuilder.forServer(kmf) .sslProvider(SslProvider.OPENSSL) .ciphers(ciphers) // As this is not a TLSv1.3 cipher we should ensure we talk something else. - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .option(OpenSslContextOption.PRIVATE_KEY_METHOD, method) .build(); } @@ -155,13 +155,13 @@ private SslContext buildClientContext() throws Exception { .sslProvider(SslProvider.JDK) .ciphers(Collections.singletonList(RFC_CIPHER_NAME)) // As this is not a TLSv1.3 cipher we should ensure we talk something else. - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .trustManager(InsecureTrustManagerFactory.INSTANCE) .build(); } private static Executor delegateExecutor(boolean delegate) { - return delegate ? EXECUTOR : null; + return delegate ? EXECUTOR : null; } private static void assertThread(boolean delegate) { @@ -282,7 +282,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { Channel client = client(server, clientHandler); try { client.writeAndFlush(Unpooled.wrappedBuffer(new byte[] {'P', 'I', 'N', 'G'})) - .syncUninterruptibly(); + .syncUninterruptibly(); assertTrue(clientPromise.await(5L, TimeUnit.SECONDS), "client timeout"); assertTrue(serverPromise.await(5L, TimeUnit.SECONDS), "server timeout"); diff --git a/handler/src/test/java/io/netty/handler/ssl/OpenSslServerContextTest.java b/handler/src/test/java/io/netty/handler/ssl/OpenSslServerContextTest.java index a1b23436f38..17c0921f50c 100644 --- a/handler/src/test/java/io/netty/handler/ssl/OpenSslServerContextTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/OpenSslServerContextTest.java @@ -13,17 +13,13 @@ * License for the specific language governing permissions and limitations * under the License. */ - package io.netty.handler.ssl; -import org.junit.Assume; import org.junit.BeforeClass; import javax.net.ssl.SSLException; import java.io.File; -import static org.junit.Assume.assumeTrue; - public class OpenSslServerContextTest extends SslContextTest { @BeforeClass diff --git a/handler/src/test/java/io/netty/handler/ssl/ParameterizedSslHandlerTest.java b/handler/src/test/java/io/netty/handler/ssl/ParameterizedSslHandlerTest.java index db5da0c5ec0..13e1bd8f089 100644 --- a/handler/src/test/java/io/netty/handler/ssl/ParameterizedSslHandlerTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/ParameterizedSslHandlerTest.java @@ -405,7 +405,7 @@ private void testCloseNotify(SslProvider clientProvider, SslProvider serverProvi // Use TLSv1.2 as we depend on the fact that the handshake // is done in an extra round trip in the test which // is not true in TLSv1.3 - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build(); final SslContext sslClientCtx = SslContextBuilder.forClient() @@ -414,7 +414,7 @@ private void testCloseNotify(SslProvider clientProvider, SslProvider serverProvi // Use TLSv1.2 as we depend on the fact that the handshake // is done in an extra round trip in the test which // is not true in TLSv1.3 - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build(); EventLoopGroup group = new NioEventLoopGroup(); diff --git a/handler/src/test/java/io/netty/handler/ssl/RenegotiateTest.java b/handler/src/test/java/io/netty/handler/ssl/RenegotiateTest.java index a3f2c0cfde7..cd2bd44e3a7 100644 --- a/handler/src/test/java/io/netty/handler/ssl/RenegotiateTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/RenegotiateTest.java @@ -50,7 +50,7 @@ public void testRenegotiateServer() throws Throwable { try { final SslContext context = SslContextBuilder.forServer(cert.key(), cert.cert()) .sslProvider(serverSslProvider()) - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build(); ServerBootstrap sb = new ServerBootstrap(); @@ -105,7 +105,7 @@ public void operationComplete(Future<Channel> future) throws Exception { final SslContext clientContext = SslContextBuilder.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) .sslProvider(SslProvider.JDK) - .protocols(SslUtils.PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build(); Bootstrap bootstrap = new Bootstrap(); diff --git a/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java index d04a1ef9a7b..07fa482ab3a 100644 --- a/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java @@ -184,9 +184,9 @@ enum BufferType { static final class ProtocolCipherCombo { private static final ProtocolCipherCombo TLSV12 = new ProtocolCipherCombo( - PROTOCOL_TLS_V1_2, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + SslProtocols.TLS_v1_2, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); private static final ProtocolCipherCombo TLSV13 = new ProtocolCipherCombo( - PROTOCOL_TLS_V1_3, "TLS_AES_128_GCM_SHA256"); + SslProtocols.TLS_v1_3, "TLS_AES_128_GCM_SHA256"); final String protocol; final String cipher; @@ -532,13 +532,13 @@ public void testIncompatibleCiphers() throws Exception { // due to no shared/supported cipher. clientSslCtx = wrapContext(SslContextBuilder.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) - .protocols(PROTOCOL_TLS_V1_3, PROTOCOL_TLS_V1_2, PROTOCOL_TLS_V1) + .protocols(SslProtocols.TLS_v1_3, SslProtocols.TLS_v1_2, SslProtocols.TLS_v1) .sslContextProvider(clientSslContextProvider()) .sslProvider(sslClientProvider()) .build()); serverSslCtx = wrapContext(SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()) - .protocols(PROTOCOL_TLS_V1_3, PROTOCOL_TLS_V1_2, PROTOCOL_TLS_V1) + .protocols(SslProtocols.TLS_v1_3, SslProtocols.TLS_v1_2, SslProtocols.TLS_v1) .sslContextProvider(serverSslContextProvider()) .sslProvider(sslServerProvider()) .build()); @@ -1513,7 +1513,7 @@ protected void testEnablingAnAlreadyDisabledSslProtocol(String[] protocols1, Str assertArrayEquals(protocols1, enabledProtocols); // Enable a protocol that is currently disabled - sslEngine.setEnabledProtocols(new String[]{ PROTOCOL_TLS_V1_2 }); + sslEngine.setEnabledProtocols(new String[]{ SslProtocols.TLS_v1_2 }); // The protocol that was just enabled should be returned enabledProtocols = sslEngine.getEnabledProtocols(); @@ -1584,7 +1584,7 @@ protected void handshake(SSLEngine clientEngine, SSLEngine serverEngine) throws if (!clientHandshakeFinished || // After the handshake completes it is possible we have more data that was send by the server as // the server will send session updates after the handshake. In this case continue to unwrap. - SslUtils.PROTOCOL_TLS_V1_3.equals(clientEngine.getSession().getProtocol())) { + SslProtocols.TLS_v1_3.equals(clientEngine.getSession().getProtocol())) { int clientAppReadBufferPos = clientAppReadBuffer.position(); clientResult = clientEngine.unwrap(sTOc, clientAppReadBuffer); @@ -1700,7 +1700,7 @@ protected void setupHandlers(ApplicationProtocolConfig serverApn, ApplicationPro if (serverApn.protocol() == Protocol.NPN || serverApn.protocol() == Protocol.NPN_AND_ALPN) { // NPN is not really well supported with TLSv1.3 so force to use TLSv1.2 // See https://github.com/openssl/openssl/issues/3665 - serverCtxBuilder.protocols(PROTOCOL_TLS_V1_2); + serverCtxBuilder.protocols(SslProtocols.TLS_v1_2); } SslContextBuilder clientCtxBuilder = SslContextBuilder.forClient() @@ -1715,7 +1715,7 @@ protected void setupHandlers(ApplicationProtocolConfig serverApn, ApplicationPro if (clientApn.protocol() == Protocol.NPN || clientApn.protocol() == Protocol.NPN_AND_ALPN) { // NPN is not really well supported with TLSv1.3 so force to use TLSv1.2 // See https://github.com/openssl/openssl/issues/3665 - clientCtxBuilder.protocols(PROTOCOL_TLS_V1_2); + clientCtxBuilder.protocols(SslProtocols.TLS_v1_2); } setupHandlers(wrapContext(serverCtxBuilder.build()), wrapContext(clientCtxBuilder.build())); @@ -2043,9 +2043,9 @@ private String[] nonContiguousProtocols(SslProvider provider) { if (provider != null) { // conscrypt not correctly filters out TLSv1 and TLSv1.1 which is required now by the JDK. // https://github.com/google/conscrypt/issues/1013 - return new String[] { PROTOCOL_TLS_V1_2 }; + return new String[] { SslProtocols.TLS_v1_2 }; } - return new String[] {PROTOCOL_TLS_V1_2, PROTOCOL_TLS_V1}; + return new String[] {SslProtocols.TLS_v1_2, SslProtocols.TLS_v1}; } @Test @@ -2333,7 +2333,7 @@ public void testCloseNotifySequence() throws Exception { .sslContextProvider(clientSslContextProvider()) .sslProvider(sslClientProvider()) // This test only works for non TLSv1.3 for now - .protocols(PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build()); SSLEngine client = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT)); @@ -2342,7 +2342,7 @@ public void testCloseNotifySequence() throws Exception { .sslContextProvider(serverSslContextProvider()) .sslProvider(sslServerProvider()) // This test only works for non TLSv1.3 for now - .protocols(PROTOCOL_TLS_V1_2) + .protocols(SslProtocols.TLS_v1_2) .build()); SSLEngine server = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT)); @@ -2803,12 +2803,13 @@ public void testWrapDoesNotZeroOutSrc() throws Exception { @Test public void testDisableProtocols() throws Exception { - testDisableProtocols(PROTOCOL_SSL_V2, PROTOCOL_SSL_V2); - testDisableProtocols(PROTOCOL_SSL_V3, PROTOCOL_SSL_V2, PROTOCOL_SSL_V3); - testDisableProtocols(PROTOCOL_TLS_V1, PROTOCOL_SSL_V2, PROTOCOL_SSL_V3, PROTOCOL_TLS_V1); - testDisableProtocols(PROTOCOL_TLS_V1_1, PROTOCOL_SSL_V2, PROTOCOL_SSL_V3, PROTOCOL_TLS_V1, PROTOCOL_TLS_V1_1); - testDisableProtocols(PROTOCOL_TLS_V1_2, PROTOCOL_SSL_V2, - PROTOCOL_SSL_V3, PROTOCOL_TLS_V1, PROTOCOL_TLS_V1_1, PROTOCOL_TLS_V1_2); + testDisableProtocols(SslProtocols.SSL_v2, SslProtocols.SSL_v2); + testDisableProtocols(SslProtocols.SSL_v3, SslProtocols.SSL_v2, SslProtocols.SSL_v3); + testDisableProtocols(SslProtocols.TLS_v1, SslProtocols.SSL_v2, SslProtocols.SSL_v3, SslProtocols.TLS_v1); + testDisableProtocols(SslProtocols.TLS_v1_1, SslProtocols.SSL_v2, SslProtocols.SSL_v3, SslProtocols.TLS_v1, + SslProtocols.TLS_v1_1); + testDisableProtocols(SslProtocols.TLS_v1_2, SslProtocols.SSL_v2, + SslProtocols.SSL_v3, SslProtocols.TLS_v1, SslProtocols.TLS_v1_1, SslProtocols.TLS_v1_2); } private void testDisableProtocols(String protocol, String... disabledProtocols) throws Exception { @@ -2832,7 +2833,7 @@ private void testDisableProtocols(String protocol, String... disabledProtocols) for (String disabled : disabledProtocols) { supported.remove(disabled); } - if (supported.contains(PROTOCOL_SSL_V2_HELLO) && supported.size() == 1) { + if (supported.contains(SslProtocols.SSL_v2_HELLO) && supported.size() == 1) { // It's not allowed to set only PROTOCOL_SSL_V2_HELLO if using JDK SSLEngine. return; } @@ -4010,9 +4011,9 @@ public void testDefaultProtocolsIncludeTLSv13() throws Exception { } assertEquals(SslProvider.isTlsv13EnabledByDefault(sslClientProvider(), clientSslContextProvider()), - arrayContains(clientProtocols, PROTOCOL_TLS_V1_3)); + arrayContains(clientProtocols, SslProtocols.TLS_v1_3)); assertEquals(SslProvider.isTlsv13EnabledByDefault(sslServerProvider(), serverSslContextProvider()), - arrayContains(serverProtocols, PROTOCOL_TLS_V1_3)); + arrayContains(serverProtocols, SslProtocols.TLS_v1_3)); } protected SSLEngine wrapEngine(SSLEngine engine) { diff --git a/handler/src/test/java/io/netty/handler/ssl/SniHandlerTest.java b/handler/src/test/java/io/netty/handler/ssl/SniHandlerTest.java index 390ac684f39..d640eadf6c1 100644 --- a/handler/src/test/java/io/netty/handler/ssl/SniHandlerTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/SniHandlerTest.java @@ -611,7 +611,7 @@ protected void initChannel(Channel ch) throws Exception { * This is a {@link SslHandler} that will call {@code release()} on the {@link SslContext} when * the client disconnects. * - * @see SniHandlerTest#testReplaceHandler() + * @see SniHandlerTest#testReplaceHandler(SslProvider) */ private static class CustomSslHandler extends SslHandler { private final SslContext sslContext; diff --git a/handler/src/test/java/io/netty/handler/ssl/SslHandlerTest.java b/handler/src/test/java/io/netty/handler/ssl/SslHandlerTest.java index aec0c341731..efe401d5162 100644 --- a/handler/src/test/java/io/netty/handler/ssl/SslHandlerTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/SslHandlerTest.java @@ -836,7 +836,7 @@ public void testOutboundClosedAfterChannelInactive() throws Exception { @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) public void testHandshakeFailedByWriteBeforeChannelActive() throws Exception { final SslContext sslClientCtx = SslContextBuilder.forClient() - .protocols(SslUtils.PROTOCOL_SSL_V3) + .protocols(SslProtocols.SSL_v3) .trustManager(InsecureTrustManagerFactory.INSTANCE) .sslProvider(SslProvider.JDK).build(); @@ -1145,27 +1145,27 @@ protected void initChannel(Channel ch) { @Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testSessionTicketsWithTLSv12() throws Throwable { - testSessionTickets(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_2, true); + testSessionTickets(SslProvider.OPENSSL, SslProtocols.TLS_v1_2, true); } @Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testSessionTicketsWithTLSv13() throws Throwable { assumeTrue(SslProvider.isTlsv13Supported(SslProvider.OPENSSL)); - testSessionTickets(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_3, true); + testSessionTickets(SslProvider.OPENSSL, SslProtocols.TLS_v1_3, true); } @Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testSessionTicketsWithTLSv12AndNoKey() throws Throwable { - testSessionTickets(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_2, false); + testSessionTickets(SslProvider.OPENSSL, SslProtocols.TLS_v1_2, false); } @Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testSessionTicketsWithTLSv13AndNoKey() throws Throwable { assumeTrue(OpenSsl.isTlsv13Supported()); - testSessionTickets(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_3, false); + testSessionTickets(SslProvider.OPENSSL, SslProtocols.TLS_v1_3, false); } private static void testSessionTickets(SslProvider provider, String protocol, boolean withKey) throws Throwable { @@ -1223,7 +1223,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { // This test only works for non TLSv1.3 as TLSv1.3 will establish sessions after // the handshake is done. // See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_sess_set_get_cb.html - if (!SslUtils.PROTOCOL_TLS_V1_3.equals(engine.getSession().getProtocol())) { + if (!SslProtocols.TLS_v1_3.equals(engine.getSession().getProtocol())) { // First should not re-use the session try { assertEquals(handshakeCount > 1, engine.isSessionReused()); @@ -1297,7 +1297,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // This test only works for non TLSv1.3 as TLSv1.3 will establish sessions after // the handshake is done. // See https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_sess_set_get_cb.html - if (!SslUtils.PROTOCOL_TLS_V1_3.equals(engine.getSession().getProtocol())) { + if (!SslProtocols.TLS_v1_3.equals(engine.getSession().getProtocol())) { assertEquals(isReused, engine.isSessionReused()); } Object obj = queue.take(); @@ -1472,11 +1472,11 @@ private static void testHandshakeFailureCipherMissmatch(SslProvider provider, bo if (tls13) { clientCipher = "TLS_AES_128_GCM_SHA256"; serverCipher = "TLS_AES_256_GCM_SHA384"; - protocol = SslUtils.PROTOCOL_TLS_V1_3; + protocol = SslProtocols.TLS_v1_3; } else { clientCipher = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"; serverCipher = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"; - protocol = SslUtils.PROTOCOL_TLS_V1_2; + protocol = SslProtocols.TLS_v1_2; } final SslContext sslClientCtx = SslContextBuilder.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) @@ -1566,26 +1566,26 @@ protected void initChannel(Channel ch) { @Test public void testHandshakeEventsTls12JDK() throws Exception { - testHandshakeEvents(SslProvider.JDK, SslUtils.PROTOCOL_TLS_V1_2); + testHandshakeEvents(SslProvider.JDK, SslProtocols.TLS_v1_2); } @Test public void testHandshakeEventsTls12Openssl() throws Exception { OpenSsl.ensureAvailability(); - testHandshakeEvents(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_2); + testHandshakeEvents(SslProvider.OPENSSL, SslProtocols.TLS_v1_2); } @Test public void testHandshakeEventsTls13JDK() throws Exception { assumeTrue(SslProvider.isTlsv13Supported(SslProvider.JDK)); - testHandshakeEvents(SslProvider.JDK, SslUtils.PROTOCOL_TLS_V1_3); + testHandshakeEvents(SslProvider.JDK, SslProtocols.TLS_v1_3); } @Test public void testHandshakeEventsTls13Openssl() throws Exception { OpenSsl.ensureAvailability(); assumeTrue(SslProvider.isTlsv13Supported(SslProvider.OPENSSL)); - testHandshakeEvents(SslProvider.OPENSSL, SslUtils.PROTOCOL_TLS_V1_3); + testHandshakeEvents(SslProvider.OPENSSL, SslProtocols.TLS_v1_3); } private void testHandshakeEvents(SslProvider provider, String protocol) throws Exception {
train
test
"2021-07-07T08:26:27"
"2021-06-16T15:52:50Z"
danielbaniel
val
netty/netty/11568_11569
netty/netty
netty/netty/11568
netty/netty/11569
[ "keyword_pr_to_issue" ]
3f8dab5ad00a718dc8ad6c00e27b740783ea23c5
056eba4db4c1b466d6b45d261642a6aa6134fe38
[ "Work around can be fond in #11267\r\nImplement your own `shouldHandleUpgradeRequest`, and check `connection` header there.", "@dpy1123 Yeah, a missing `isEmpty()` looks about right. Would you like to make a PR for this?" ]
[ "Add a call to `ReferenceCountUtil.release(req)` to prevent the contained buffer from leaking." ]
"2021-08-11T08:41:42Z"
[]
HttpServerUpgradeHandler causing StringIndexOutOfBoundsException
We are using netty as a proxy server. And added HttpServerUpgradeHandler incase client wants to talk in h2c. But with a normal http1 request, we see the following exceptions. ```io.netty.handler.codec.DecoderException: java.lang.StringIndexOutOfBoundsException: String index out of range: -1 at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:98) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:436) at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324) at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296) at io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:251) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:650) at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:576) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at java.base/java.lang.Thread.run(Thread.java:829) Caused by: java.lang.StringIndexOutOfBoundsException: String index out of range: -1 at java.base/java.lang.AbstractStringBuilder.setLength(AbstractStringBuilder.java:275) at java.base/java.lang.StringBuilder.setLength(StringBuilder.java:85) at io.netty.handler.codec.http.HttpServerUpgradeHandler.upgrade(HttpServerUpgradeHandler.java:297) at io.netty.handler.codec.http.HttpServerUpgradeHandler.decode(HttpServerUpgradeHandler.java:239) at io.netty.handler.codec.http.HttpServerUpgradeHandler.decode(HttpServerUpgradeHandler.java:40) at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:88) ... 22 more ``` As far as I can see, it seems like the if conditon should also add isEmpty(). ``` if (connectionHeaderValues == null || connectionHeaderValues.isEmpty()) { return false; } ``` https://github.com/netty/netty/blob/3f8dab5ad00a718dc8ad6c00e27b740783ea23c5/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java#L326
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java index 88aa739b298..2aa6e157d56 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java @@ -323,7 +323,7 @@ private boolean upgrade(final ChannelHandlerContext ctx, final FullHttpRequest r // Make sure the CONNECTION header is present. List<String> connectionHeaderValues = request.headers().getAll(HttpHeaderNames.CONNECTION); - if (connectionHeaderValues == null) { + if (connectionHeaderValues == null || connectionHeaderValues.isEmpty()) { return false; }
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java index bc4552afb4f..a2e27e787f6 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java @@ -31,6 +31,7 @@ import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; import io.netty.util.CharsetUtil; +import io.netty.util.ReferenceCountUtil; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -189,4 +190,42 @@ protected boolean shouldHandleUpgradeRequest(HttpRequest req) { assertNull(channel.readOutbound()); assertFalse(channel.finishAndReleaseAll()); } + + @Test + public void upgradeFail() { + final HttpServerCodec httpServerCodec = new HttpServerCodec(); + final UpgradeCodecFactory factory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + return new TestUpgradeCodec(); + } + }; + + HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(httpServerCodec, factory); + + EmbeddedChannel channel = new EmbeddedChannel(httpServerCodec, upgradeHandler); + + // Build a h2c upgrade request, but without connection header. + String upgradeString = "GET / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Upgrade: h2c\r\n\r\n"; + ByteBuf upgrade = Unpooled.copiedBuffer(upgradeString, CharsetUtil.US_ASCII); + + assertTrue(channel.writeInbound(upgrade)); + assertNotNull(channel.pipeline().get(HttpServerCodec.class)); + assertNotNull(channel.pipeline().get(HttpServerUpgradeHandler.class)); // Should not be removed. + assertNull(channel.pipeline().get("marker")); + + HttpRequest req = channel.readInbound(); + assertEquals(HttpVersion.HTTP_1_1, req.protocolVersion()); + assertTrue(req.headers().contains(HttpHeaderNames.UPGRADE, "h2c", false)); + assertFalse(req.headers().contains(HttpHeaderNames.CONNECTION)); + ReferenceCountUtil.release(req); + assertNull(channel.readInbound()); + + // No response should be written because we're just passing through. + channel.flushOutbound(); + assertNull(channel.readOutbound()); + assertFalse(channel.finishAndReleaseAll()); + } }
val
test
"2021-08-10T19:53:15"
"2021-08-11T07:01:18Z"
dpy1123
val
netty/netty/11567_11609
netty/netty
netty/netty/11567
netty/netty/11609
[ "keyword_pr_to_issue" ]
9d25bc8a4e222ae7b938f1d90829d29e12b810c4
5e7eb3748daaf68a85616bad283489a59b44c4b7
[ "@jameskleeh I am currently not sure how to do this as the user can either use `netty-tcnative` or `netty-tcnative-boringssl-static`. Or you are saying we should add both ?", "Yeah I think both", "@jameskleeh want to provide a PR ?", "Sure", "@jameskleeh ping me once something is ready for review ", "@jameskleeh https://github.com/netty/netty/pull/11609" ]
[ "Should the `dependencyManagement` section here list all other artifacts produced by tcnative, like `netty-tcnative-openssl-static`, etc.? ", "Can netty use `netty-bom-ssl` instead of defining another constant for that?", "We only publish a static artifact for boringssl ", "I think this not works .. what exactly do you have in mind ?", "Ok, should this doc be updated: https://netty.io/wiki/forked-tomcat-native.html#artifacts?", "Like depending on the `bom-ssl` and not specifying a version for `tcnative`, letting it to be inferred through the bom", "From what I know this shouldn't be done ... I also don't think it's a big problem to just keep these in sync ", "If the `bom-ssl` is not used by this repo, it can be moved to https://github.com/netty/netty-tcnative", "Well you can still build these but we don't release them ", "Ok, sounds good", "Perhaps `${tcnative.artifactId}` for consistency.", "Where is `${tcnative.version}` being set? The pom says \"keep in sync\" but I don't see a property section here.", "Could the Netty version be specified in a property so that you only have to modify it in one place?", "`bom-ssl/pom.xml` ? to just `bom/pom.xml` now, correct?", "`netty-tcnative-boringssl-static` also has jars with different classifiers, should we list them all?\r\nhttps://repo1.maven.org/maven2/io/netty/netty-tcnative-boringssl-static/2.0.41.Final/", "Leftover from the previous iteration", "Same", "I guess we could ... Let me do this ", "This one is updated automatically by the release plugin ", "parent pom.xml ", "No we want to define these explicit" ]
"2021-08-23T13:34:06Z"
[]
Module compatibility
Netty 4.1.66 introduced a requirement on tc-native 2.0.40 with this commit https://github.com/netty/netty/commit/40fb6026efd2a9fc67049b437e4db5d6d81d688c This led to a user being confronted with an error after upgrading Micronaut (which includes 4.1.66) https://github.com/micronaut-projects/micronaut-core/issues/5920 Can we get the tc-native modules added to the netty bom so we can avoid this?
[ "bom/pom.xml" ]
[ "bom/pom.xml" ]
[]
diff --git a/bom/pom.xml b/bom/pom.xml index 429c477cffe..e05844a8d51 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -253,6 +253,60 @@ <version>4.1.68.Final-SNAPSHOT</version> <classifier>osx-x86_64</classifier> </dependency> + + <!-- Add netty-tcnative* as well as users need to ensure they use the correct version --> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative</artifactId> + <version>${tcnative.version}</version> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative</artifactId> + <version>${tcnative.version}</version> + <classifier>linux-x86_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative</artifactId> + <version>${tcnative.version}</version> + <classifier>linux-aarch_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative</artifactId> + <version>${tcnative.version}</version> + <classifier>osx-x86_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative-boringssl-static</artifactId> + <version>${tcnative.version}</version> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative-boringssl-static</artifactId> + <version>${tcnative.version}</version> + <classifier>linux-x86_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative-boringssl-static</artifactId> + <version>${tcnative.version}</version> + <classifier>linux-aarch_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative-boringssl-static</artifactId> + <version>${tcnative.version}</version> + <classifier>osx_64</classifier> + </dependency> + <dependency> + <groupId>io.netty</groupId> + <artifactId>netty-tcnative-boringssl-static</artifactId> + <version>${tcnative.version}</version> + <classifier>windows_64</classifier> + </dependency> </dependencies> </dependencyManagement> </project>
null
train
test
"2021-08-20T19:10:54"
"2021-08-10T18:35:12Z"
jameskleeh
val
netty/netty/11618_11621
netty/netty
netty/netty/11618
netty/netty/11621
[ "keyword_pr_to_issue" ]
6a7cccded07a46a0a7b50ac27ff3f86eb977859f
47bababfc718eaa676212f39d547dc8aefb1683b
[ "@violetagg sounds like a bug... want to provide a PR ?", "@normanmaurer yep let me do that" ]
[ "Looks like `diskAttribute.addContent()` may throw below in which case we may not release the `buffer`. I would suggest put a `try-catch(Exception)` in these methods before we assign `buffer` to an instance variable. This will make sure we release for any unexpected errors.", "mmm DiskAttribute itself releases?\r\nSee \r\nio.netty.handler.codec.http.multipart.DiskAttribute#addContent\r\nio.netty.handler.codec.http.multipart.AbstractDiskHttpData#addContent \r\n\r\nWe need to be careful to not release twice", "Yeah but if I read it correctly, the `ByteBuf` passed to it is different than what is used here:\r\n\r\n```java\r\ndiskAttribute.addContent(((MemoryAttribute) attribute).getByteBuf(), false)\r\n```", "Agree, it should be fixed now" ]
"2021-08-26T12:14:27Z"
[]
Memory leak in HttpPostMultipartRequestDecoder#loadDataMultipartOptimized
### Expected behavior No memory leak ### Actual behavior There is a memory leak in `io.netty.handler.codec.http.multipart.HttpPostMultipartRequestDecoder#loadDataMultipartOptimized` when the size exceeds `maxLimit`. Or the issue might be related to `io.netty.handler.codec.http.multipart.AbstractMemoryHttpData#addContent` when checking the size limit. ### Minimal yet complete reproducer code (or URL to code) Apply the patch below and run the command `mvn test -pl codec-http -Dtest=io.netty.handler.codec.http.multipart.HttpPostMultiPartRequestDecoderTest#testNotBadReleaseBuffersDuringDecodingMemoryFactory` Then you will be able to see that when `IOException: Size exceed allowed maximum capacity` is thrown the buffer is not released. ``` diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index a8566c0f7f..29106237cb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -1187,7 +1187,8 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest try { httpData.addContent(content, false); } catch (IOException e) { - throw new ErrorDataDecoderException(e); + throw new ErrorDataDecoderException("ByteBuf.refCnt() >>> " + content.refCnt(), e); + //throw new ErrorDataDecoderException(e); } undecodedChunk.readerIndex(startReaderIndex); undecodedChunk.writerIndex(startReaderIndex); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java index fb7e08202d..d339c422a3 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java @@ -311,6 +311,7 @@ public class HttpPostMultiPartRequestDecoderTest { request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); request.headers().set("content-length", nbItems * (prefix1.length() + prefix2.length() + 2 + bytesPerItem) + suffix.length()); + factory.setMaxLimit(500); HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); decoder.setDiscardThreshold(maxMemory); for (int rank = 0; rank < nbItems; rank++) { ``` ### Netty version 4.1.68.Final-SNAPSHOT ### JVM version (e.g. `java -version`) ### OS version (e.g. `uname -a`)
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java" ]
[ "codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java", "codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java" ]
[ "codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpDataTest.java" ]
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java index dc8dd02f1fa..f25801bd504 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java @@ -50,8 +50,14 @@ protected AbstractMemoryHttpData(String name, Charset charset, long size) { public void setContent(ByteBuf buffer) throws IOException { ObjectUtil.checkNotNull(buffer, "buffer"); long localsize = buffer.readableBytes(); - checkSize(localsize); + try { + checkSize(localsize); + } catch (IOException e) { + buffer.release(); + throw e; + } if (definedSize > 0 && definedSize < localsize) { + buffer.release(); throw new IOException("Out of size: " + localsize + " > " + definedSize); } @@ -99,8 +105,14 @@ public void addContent(ByteBuf buffer, boolean last) throws IOException { if (buffer != null) { long localsize = buffer.readableBytes(); - checkSize(size + localsize); + try { + checkSize(size + localsize); + } catch (IOException e) { + buffer.release(); + throw e; + } if (definedSize > 0 && definedSize < size + localsize) { + buffer.release(); throw new IOException("Out of size: " + (size + localsize) + " > " + definedSize); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java index f4f77f62b2b..6673ac619cb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java @@ -126,7 +126,12 @@ public void setValue(String value) throws IOException { @Override public void addContent(ByteBuf buffer, boolean last) throws IOException { final long newDefinedSize = size + buffer.readableBytes(); - checkSize(newDefinedSize); + try { + checkSize(newDefinedSize); + } catch (IOException e) { + buffer.release(); + throw e; + } if (definedSize > 0 && definedSize < newDefinedSize) { definedSize = newDefinedSize; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java index 266e566523a..72ac59c19a1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java @@ -43,12 +43,13 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { /** * Check if the new size is not reaching the max limit allowed. - * The limit is always computed in term of bytes. + * The limit is always computed in terms of bytes. */ void checkSize(long newSize) throws IOException; /** * Set the content from the ChannelBuffer (erase any previous data) + * <p>{@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link HttpData}. * * @param buffer * must be not null @@ -58,6 +59,7 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { /** * Add the content from the ChannelBuffer + * <p>{@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link HttpData}. * * @param buffer * must be not null except if last is set to False diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java index 4f323a94970..ac1db7490b0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java @@ -80,7 +80,12 @@ public void setValue(String value) throws IOException { @Override public void addContent(ByteBuf buffer, boolean last) throws IOException { int localsize = buffer.readableBytes(); - checkSize(size + localsize); + try { + checkSize(size + localsize); + } catch (IOException e) { + buffer.release(); + throw e; + } if (definedSize > 0 && definedSize < size + localsize) { definedSize = size + localsize; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java index fbd23fd2fba..f148e4e2563 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java @@ -122,16 +122,21 @@ public void checkSize(long newSize) throws IOException { @Override public void addContent(ByteBuf buffer, boolean last) throws IOException { if (attribute instanceof MemoryAttribute) { - checkSize(attribute.length() + buffer.readableBytes()); - if (attribute.length() + buffer.readableBytes() > limitSize) { - DiskAttribute diskAttribute = new DiskAttribute(attribute - .getName(), attribute.definedLength(), baseDir, deleteOnExit); - diskAttribute.setMaxSize(maxSize); - if (((MemoryAttribute) attribute).getByteBuf() != null) { - diskAttribute.addContent(((MemoryAttribute) attribute) - .getByteBuf(), false); + try { + checkSize(attribute.length() + buffer.readableBytes()); + if (attribute.length() + buffer.readableBytes() > limitSize) { + DiskAttribute diskAttribute = new DiskAttribute(attribute + .getName(), attribute.definedLength(), baseDir, deleteOnExit); + diskAttribute.setMaxSize(maxSize); + if (((MemoryAttribute) attribute).getByteBuf() != null) { + diskAttribute.addContent(((MemoryAttribute) attribute) + .getByteBuf(), false); + } + attribute = diskAttribute; } - attribute = diskAttribute; + } catch (IOException e) { + buffer.release(); + throw e; } } attribute.addContent(buffer, last); @@ -199,7 +204,12 @@ public void setCharset(Charset charset) { @Override public void setContent(ByteBuf buffer) throws IOException { - checkSize(buffer.readableBytes()); + try { + checkSize(buffer.readableBytes()); + } catch (IOException e) { + buffer.release(); + throw e; + } if (buffer.readableBytes() > limitSize) { if (attribute instanceof MemoryAttribute) { // change to Disk diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java index b5d5e673ce2..547a3639832 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java @@ -83,22 +83,27 @@ public void checkSize(long newSize) throws IOException { public void addContent(ByteBuf buffer, boolean last) throws IOException { if (fileUpload instanceof MemoryFileUpload) { - checkSize(fileUpload.length() + buffer.readableBytes()); - if (fileUpload.length() + buffer.readableBytes() > limitSize) { - DiskFileUpload diskFileUpload = new DiskFileUpload(fileUpload - .getName(), fileUpload.getFilename(), fileUpload - .getContentType(), fileUpload - .getContentTransferEncoding(), fileUpload.getCharset(), - definedSize, baseDir, deleteOnExit); - diskFileUpload.setMaxSize(maxSize); - ByteBuf data = fileUpload.getByteBuf(); - if (data != null && data.isReadable()) { - diskFileUpload.addContent(data.retain(), false); + try { + checkSize(fileUpload.length() + buffer.readableBytes()); + if (fileUpload.length() + buffer.readableBytes() > limitSize) { + DiskFileUpload diskFileUpload = new DiskFileUpload(fileUpload + .getName(), fileUpload.getFilename(), fileUpload + .getContentType(), fileUpload + .getContentTransferEncoding(), fileUpload.getCharset(), + definedSize, baseDir, deleteOnExit); + diskFileUpload.setMaxSize(maxSize); + ByteBuf data = fileUpload.getByteBuf(); + if (data != null && data.isReadable()) { + diskFileUpload.addContent(data.retain(), false); + } + // release old upload + fileUpload.release(); + + fileUpload = diskFileUpload; } - // release old upload - fileUpload.release(); - - fileUpload = diskFileUpload; + } catch (IOException e) { + buffer.release(); + throw e; } } fileUpload.addContent(buffer, last); @@ -181,7 +186,12 @@ public void setCharset(Charset charset) { @Override public void setContent(ByteBuf buffer) throws IOException { - checkSize(buffer.readableBytes()); + try { + checkSize(buffer.readableBytes()); + } catch (IOException e) { + buffer.release(); + throw e; + } if (buffer.readableBytes() > limitSize) { if (fileUpload instanceof MemoryFileUpload) { FileUpload memoryUpload = fileUpload;
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpDataTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpDataTest.java new file mode 100644 index 00000000000..17f90e70fa3 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpDataTest.java @@ -0,0 +1,135 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.util.CharsetUtil; +import org.assertj.core.api.ThrowableAssert; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + +class HttpDataTest { + private static final byte[] BYTES = new byte[64]; + + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @ParameterizedTest(name = "{displayName}({0})") + @MethodSource("data") + @interface ParameterizedHttpDataTest { + } + + static HttpData[] data() { + return new HttpData[]{ + new MemoryAttribute("test", 10), + new MemoryFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10), + new MixedAttribute("test", 10, -1), + new MixedFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10, -1), + new DiskAttribute("test", 10), + new DiskFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10) + }; + } + + @BeforeAll + static void setUp() { + Random rndm = new Random(); + rndm.nextBytes(BYTES); + } + + @ParameterizedHttpDataTest + void testAddContentEmptyBuffer(HttpData httpData) throws IOException { + ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer(); + httpData.addContent(content, false); + assertThat(content.refCnt()).isEqualTo(0); + } + + @Test + void testAddContentExceedsDefinedSizeDiskFileUpload() { + doTestAddContentExceedsSize( + new DiskFileUpload("test", "", "application/json", null, CharsetUtil.UTF_8, 10), + "Out of size: 64 > 10"); + } + + @Test + void testAddContentExceedsDefinedSizeMemoryFileUpload() { + doTestAddContentExceedsSize( + new MemoryFileUpload("test", "", "application/json", null, CharsetUtil.UTF_8, 10), + "Out of size: 64 > 10"); + } + + @ParameterizedHttpDataTest + void testAddContentExceedsMaxSize(final HttpData httpData) { + httpData.setMaxSize(10); + doTestAddContentExceedsSize(httpData, "Size exceed allowed maximum capacity"); + } + + @ParameterizedHttpDataTest + void testSetContentExceedsDefinedSize(final HttpData httpData) { + doTestSetContentExceedsSize(httpData, "Out of size: 64 > 10"); + } + + @ParameterizedHttpDataTest + void testSetContentExceedsMaxSize(final HttpData httpData) { + httpData.setMaxSize(10); + doTestSetContentExceedsSize(httpData, "Size exceed allowed maximum capacity"); + } + + private static void doTestAddContentExceedsSize(final HttpData httpData, String expectedMessage) { + final ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer(); + content.writeBytes(BYTES); + + assertThatExceptionOfType(IOException.class) + .isThrownBy(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws Throwable { + httpData.addContent(content, false); + } + }) + .withMessage(expectedMessage); + + assertThat(content.refCnt()).isEqualTo(0); + } + + private static void doTestSetContentExceedsSize(final HttpData httpData, String expectedMessage) { + final ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer(); + content.writeBytes(BYTES); + + assertThatExceptionOfType(IOException.class) + .isThrownBy(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws Throwable { + httpData.setContent(content); + } + }) + .withMessage(expectedMessage); + + assertThat(content.refCnt()).isEqualTo(0); + } +}
train
test
"2021-08-26T11:19:11"
"2021-08-25T19:00:19Z"
violetagg
val
netty/netty/11360_11626
netty/netty
netty/netty/11360
netty/netty/11626
[ "keyword_pr_to_issue" ]
88fcb5124b6d2fbe117e5cad66a4b50ab46c4a7a
7b8050ae5348d9de2a8b27590994046552cd4916
[]
[]
"2021-08-30T06:25:59Z"
[]
Support for "RSASSA-PSS" algorithm
Hi, While we are trying to validate a user certificate that is generated using the RSASSA-PSS algorithm. Netty OpenSSL engine is throwing an exception "sun.security.validator.ValidatorException: Certificate signature algorithm disabled" ### Actual behavior As the OpenSSL is supporting this algorithm now. So in Netty also should allow this algorithm. ### Netty version 4.1.65.Final Netty-tcnative = 2.0.36.Final ### JVM version (e.g. `java -version`) openjdk 11.0.10 2021-01-19 LTS OpenJDK Runtime Environment Zulu11.45+52-SA (build 11.0.10+9-LTS) OpenJDK 64-Bit Server VM Zulu11.45+52-SA (build 11.0.10+9-LTS, mixed mode) ### OS version (e.g. `uname -a`) 1-photon SMP Wed May 26 15:55:27 UTC 2021 x86_64 GNU/Linux
[ "handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java", "handler/src/test/resources/io/netty/handler/ssl/rsapss-signing-ext.txt" ]
[ "handler/src/test/java/io/netty/handler/ssl/ConscryptOpenSslEngineInteropTest.java", "handler/src/test/java/io/netty/handler/ssl/ConscryptSslEngineTest.java", "handler/src/test/java/io/netty/handler/ssl/JdkConscryptSslEngineInteropTest.java", "handler/src/test/java/io/netty/handler/ssl/JdkOpenSslEngineInteroptTest.java", "handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java", "handler/src/test/java/io/netty/handler/ssl/OpenSslJdkSslEngineInteroptTest.java", "handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java", "handler/src/test/resources/io/netty/handler/ssl/generate-certificate.sh", "handler/src/test/resources/io/netty/handler/ssl/rsapss-ca-cert.cert" ]
diff --git a/handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java b/handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java index 963a9b616ab..5924325893d 100644 --- a/handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java +++ b/handler/src/main/java/io/netty/handler/ssl/ExtendedOpenSslSession.java @@ -42,6 +42,7 @@ abstract class ExtendedOpenSslSession extends ExtendedSSLSession implements Open private static final String[] LOCAL_SUPPORTED_SIGNATURE_ALGORITHMS = { "SHA512withRSA", "SHA512withECDSA", "SHA384withRSA", "SHA384withECDSA", "SHA256withRSA", "SHA256withECDSA", "SHA224withRSA", "SHA224withECDSA", "SHA1withRSA", "SHA1withECDSA", + "RSASSA-PSS", }; private final OpenSslSession wrapped; diff --git a/handler/src/test/resources/io/netty/handler/ssl/rsapss-signing-ext.txt b/handler/src/test/resources/io/netty/handler/ssl/rsapss-signing-ext.txt new file mode 100644 index 00000000000..9716541c0d9 --- /dev/null +++ b/handler/src/test/resources/io/netty/handler/ssl/rsapss-signing-ext.txt @@ -0,0 +1,21 @@ +[ ext ] +extendedKeyUsage = clientAuth +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +#subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + + +[ exts ] +extendedKeyUsage = serverAuth +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +#subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +#subjectAltName = @alt_names + +[ extca ] +authorityKeyIdentifier = keyid,issuer +basicConstraints=CA:TRUE +subjectKeyIdentifier = hash + +[alt_names] +DNS.1 = aws-dev-node.skylo.local
diff --git a/handler/src/test/java/io/netty/handler/ssl/ConscryptOpenSslEngineInteropTest.java b/handler/src/test/java/io/netty/handler/ssl/ConscryptOpenSslEngineInteropTest.java index 8084220ca30..9800c21e76a 100644 --- a/handler/src/test/java/io/netty/handler/ssl/ConscryptOpenSslEngineInteropTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/ConscryptOpenSslEngineInteropTest.java @@ -18,6 +18,8 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.condition.DisabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLSessionContext; diff --git a/handler/src/test/java/io/netty/handler/ssl/ConscryptSslEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/ConscryptSslEngineTest.java index 6e825f6c7e6..7ecb8fa83fc 100644 --- a/handler/src/test/java/io/netty/handler/ssl/ConscryptSslEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/ConscryptSslEngineTest.java @@ -69,8 +69,15 @@ protected void invalidateSessionsAndAssert(SSLSessionContext context) { } @Disabled("Possible Conscrypt bug") + @Override public void testSessionCacheTimeout(SSLEngineTestParam param) throws Exception { // Skip // https://github.com/google/conscrypt/issues/851 } + + @Disabled("Not supported") + @Override + public void testRSASSAPSS(SSLEngineTestParam param) { + // skip + } } diff --git a/handler/src/test/java/io/netty/handler/ssl/JdkConscryptSslEngineInteropTest.java b/handler/src/test/java/io/netty/handler/ssl/JdkConscryptSslEngineInteropTest.java index 5a2f91c86c5..aa44fce7401 100644 --- a/handler/src/test/java/io/netty/handler/ssl/JdkConscryptSslEngineInteropTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/JdkConscryptSslEngineInteropTest.java @@ -81,8 +81,15 @@ protected void invalidateSessionsAndAssert(SSLSessionContext context) { } @Disabled("Possible Conscrypt bug") + @Override public void testSessionCacheTimeout(SSLEngineTestParam param) { // Skip // https://github.com/google/conscrypt/issues/851 } + + @Disabled("Not supported") + @Override + public void testRSASSAPSS(SSLEngineTestParam param) { + // skip + } } diff --git a/handler/src/test/java/io/netty/handler/ssl/JdkOpenSslEngineInteroptTest.java b/handler/src/test/java/io/netty/handler/ssl/JdkOpenSslEngineInteroptTest.java index cf4565635ce..2d2830ba397 100644 --- a/handler/src/test/java/io/netty/handler/ssl/JdkOpenSslEngineInteroptTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/JdkOpenSslEngineInteroptTest.java @@ -18,6 +18,8 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.condition.DisabledOnOs; import org.junit.jupiter.api.condition.OS; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import javax.net.ssl.SSLEngine; import java.util.ArrayList; @@ -192,6 +194,14 @@ public void testSessionCacheSize(SSLEngineTestParam param) throws Exception { super.testSessionCacheSize(param); } + @MethodSource("newTestParams") + @ParameterizedTest + @Override + public void testRSASSAPSS(SSLEngineTestParam param) throws Exception { + checkShouldUseKeyManagerFactory(); + super.testRSASSAPSS(param); + } + @Override protected SSLEngine wrapEngine(SSLEngine engine) { return Java8SslTestUtils.wrapSSLEngineForTesting(engine); diff --git a/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java index 709cdcd3a82..2b5c2d04cf7 100644 --- a/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/OpenSslEngineTest.java @@ -25,6 +25,7 @@ import io.netty.util.CharsetUtil; import io.netty.util.internal.EmptyArrays; import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.ResourcesUtil; import org.junit.AssumptionViolatedException; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -35,6 +36,7 @@ import javax.crypto.Cipher; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLEngineResult.HandshakeStatus; @@ -42,12 +44,14 @@ import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.SSLParameters; import javax.net.ssl.X509ExtendedKeyManager; +import java.io.File; import java.net.Socket; import java.nio.ByteBuffer; import java.security.AlgorithmConstraints; import java.security.AlgorithmParameters; import java.security.CryptoPrimitive; import java.security.Key; +import java.security.KeyStore; import java.security.Principal; import java.security.PrivateKey; import java.security.cert.X509Certificate; @@ -55,6 +59,7 @@ import java.util.Arrays; import java.util.List; import java.util.Set; +import java.util.concurrent.TimeUnit; import static io.netty.handler.ssl.OpenSslTestUtils.checkShouldUseKeyManagerFactory; import static io.netty.handler.ssl.ReferenceCountedOpenSslEngine.MAX_PLAINTEXT_LENGTH; @@ -1559,4 +1564,12 @@ protected void assertSessionReusedForEngine(SSLEngine clientEngine, SSLEngine se protected boolean isSessionMaybeReused(SSLEngine engine) { return unwrapEngine(engine).isSessionReused(); } + + @MethodSource("newTestParams") + @ParameterizedTest + @Override + public void testRSASSAPSS(SSLEngineTestParam param) throws Exception { + checkShouldUseKeyManagerFactory(); + super.testRSASSAPSS(param); + } } diff --git a/handler/src/test/java/io/netty/handler/ssl/OpenSslJdkSslEngineInteroptTest.java b/handler/src/test/java/io/netty/handler/ssl/OpenSslJdkSslEngineInteroptTest.java index f4d43e4b4c9..6d4e175b439 100644 --- a/handler/src/test/java/io/netty/handler/ssl/OpenSslJdkSslEngineInteroptTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/OpenSslJdkSslEngineInteroptTest.java @@ -17,6 +17,8 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import javax.net.ssl.SSLEngine; @@ -148,6 +150,14 @@ public void testSessionCacheSize(SSLEngineTestParam param) throws Exception { super.testSessionCacheSize(param); } + @MethodSource("newTestParams") + @ParameterizedTest + @Override + public void testRSASSAPSS(SSLEngineTestParam param) throws Exception { + checkShouldUseKeyManagerFactory(); + super.testRSASSAPSS(param); + } + @Override protected SSLEngine wrapEngine(SSLEngine engine) { return Java8SslTestUtils.wrapSSLEngineForTesting(engine); diff --git a/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java b/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java index c2995b7fd11..a6fa867697d 100644 --- a/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java +++ b/handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java @@ -780,11 +780,11 @@ protected boolean mySetupMutualAuthServerIsValidException(Throwable cause) { protected void mySetupMutualAuthServerInitSslHandler(SslHandler handler) { } - private void mySetupMutualAuth(final SSLEngineTestParam param, KeyManagerFactory serverKMF, - final File serverTrustManager, - KeyManagerFactory clientKMF, File clientTrustManager, - ClientAuth clientAuth, final boolean failureExpected, - final boolean serverInitEngine) + protected void mySetupMutualAuth(final SSLEngineTestParam param, KeyManagerFactory serverKMF, + final File serverTrustManager, + KeyManagerFactory clientKMF, File clientTrustManager, + ClientAuth clientAuth, final boolean failureExpected, + final boolean serverInitEngine) throws SSLException, InterruptedException { serverSslCtx = wrapContext(param, SslContextBuilder.forServer(serverKMF) @@ -908,7 +908,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E clientChannel = ccf.channel(); } - private static void rethrowIfNotNull(Throwable error) { + protected static void rethrowIfNotNull(Throwable error) { if (error != null) { throw new AssertionFailedError("Expected no error", error); } @@ -4162,6 +4162,36 @@ public void testDefaultProtocolsIncludeTLSv13(SSLEngineTestParam param) throws E arrayContains(serverProtocols, SslProtocols.TLS_v1_3)); } + @MethodSource("newTestParams") + @ParameterizedTest + public void testRSASSAPSS(SSLEngineTestParam param) throws Exception { + char[] password = "password".toCharArray(); + + final KeyStore serverKeyStore = KeyStore.getInstance("PKCS12"); + serverKeyStore.load(getClass().getResourceAsStream("rsaValidations-server-keystore.p12"), password); + + final KeyStore clientKeyStore = KeyStore.getInstance("PKCS12"); + clientKeyStore.load(getClass().getResourceAsStream("rsaValidation-user-certs.p12"), password); + + final KeyManagerFactory serverKeyManagerFactory = + KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + serverKeyManagerFactory.init(serverKeyStore, password); + final KeyManagerFactory clientKeyManagerFactory = + KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + clientKeyManagerFactory.init(clientKeyStore, password); + + File commonChain = ResourcesUtil.getFile(getClass(), "rsapss-ca-cert.cert"); + ClientAuth auth = ClientAuth.REQUIRE; + + mySetupMutualAuth(param, serverKeyManagerFactory, commonChain, clientKeyManagerFactory, commonChain, + auth, false, true); + + assertTrue(clientLatch.await(10, TimeUnit.SECONDS)); + rethrowIfNotNull(clientException); + assertTrue(serverLatch.await(5, TimeUnit.SECONDS)); + rethrowIfNotNull(serverException); + } + protected SSLEngine wrapEngine(SSLEngine engine) { return engine; } diff --git a/handler/src/test/resources/io/netty/handler/ssl/generate-certificate.sh b/handler/src/test/resources/io/netty/handler/ssl/generate-certificate.sh new file mode 100755 index 00000000000..b9c5bd119b6 --- /dev/null +++ b/handler/src/test/resources/io/netty/handler/ssl/generate-certificate.sh @@ -0,0 +1,18 @@ +# Generate CA key and certificate. +openssl req -x509 -newkey rsa:2048 -days 3650 -keyout rsapss-ca-key.pem -out rsapss-ca-cert.cert -subj "/C=GB/O=Netty/OU=netty-parent/CN=west.int" -sigopt rsa_padding_mode:pss -sha256 -sigopt rsa_pss_saltlen:20 + +# Generate user key nand. +openssl req -newkey rsa:2048 -keyout rsapss-user-key.pem -out rsaValidation-req.pem -subj "/C=GB/O=Netty/OU=netty-parent/CN=c1" -sigopt rsa_padding_mode:pss -sha256 -sigopt rsa_pss_saltlen:20 + +# Sign user cert request using CA certificate. +openssl x509 -req -in rsaValidation-req.pem -days 365 -extensions ext -extfile rsapss-signing-ext.txt -CA rsapss-ca-cert.cert -CAkey rsapss-ca-key.pem -CAcreateserial -out rsapss-user-singed.cert -sigopt rsa_padding_mode:pss -sha256 -sigopt rsa_pss_saltlen:20 + +# Create user certificate keystore. +openssl pkcs12 -export -out rsaValidation-user-certs.p12 -inkey rsapss-user-key.pem -in rsapss-user-singed.cert + +# create keystore for the +openssl pkcs12 -in rsapss-ca-cert.cert -inkey rsapss-ca-key.pem -passin pass:password -certfile rsapss-ca-cert.cert -export -out rsaValidations-server-keystore.p12 -passout pass:password -name localhost + + +# Create Trustore to verify the EndEntity certificate we have created. +keytool -importcert -storetype PKCS12 -keystore rsaValidations-truststore.p12 -storepass password -alias ca -file rsapss-ca-cert.cert -noprompt diff --git a/handler/src/test/resources/io/netty/handler/ssl/rsapss-ca-cert.cert b/handler/src/test/resources/io/netty/handler/ssl/rsapss-ca-cert.cert new file mode 100644 index 00000000000..f767e738d3b --- /dev/null +++ b/handler/src/test/resources/io/netty/handler/ssl/rsapss-ca-cert.cert @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxTCCAoKgAwIBAgIUJ2aZ084kIATHBPDJFXVu7SJ4uVcwOAYJKoZIhvcNAQEK +MCugDTALBglghkgBZQMEAgGhGjAYBgkqhkiG9w0BAQgwCwYJYIZIAWUDBAIBMEcx +CzAJBgNVBAYTAkdCMQ4wDAYDVQQKDAVOZXR0eTEVMBMGA1UECwwMbmV0dHktcGFy +ZW50MREwDwYDVQQDDAh3ZXN0LmludDAeFw0yMTA4MjkwNjAxMTNaFw0zMTA4Mjcw +NjAxMTNaMEcxCzAJBgNVBAYTAkdCMQ4wDAYDVQQKDAVOZXR0eTEVMBMGA1UECwwM +bmV0dHktcGFyZW50MREwDwYDVQQDDAh3ZXN0LmludDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAL+xcxKjWgbYHIRqnP3Sw91SNTwY85ocb+3D4xh7/F6w +cYgXwxgaHRKlk97HUzYZGFEb34BG89EOdDa1DvwxAMaN8sirefjrMLpvmfUD3Yti +kGKj+CM3gh5wFSb9mPPoY/S61+KoRSAeMKeYyFQh5IIJyVqN5mrziu0+t04X4YEw +9nATkmoS1V27Ucmo3OTkNNamqlXqVeiLKhvHtMViRGua8HwfEmjvFOTfyFHudcAz +NFFH9JR9C2g9wuokcWFD3sdFfOZ4DJVN35NrXCO4FhxxcjHOXKRdbtsucFHqCPaE +fVL0qrlkAm3pd9jKnBujC5sQbritg0uvmVuoxzy1jIUCAwEAAaNTMFEwHQYDVR0O +BBYEFAzguQlpxd/3TPhYZqEryBQ6lUdJMB8GA1UdIwQYMBaAFAzguQlpxd/3TPhY +ZqEryBQ6lUdJMA8GA1UdEwEB/wQFMAMBAf8wOAYJKoZIhvcNAQEKMCugDTALBglg +hkgBZQMEAgGhGjAYBgkqhkiG9w0BAQgwCwYJYIZIAWUDBAIBA4IBAQB3jsUwdyFO +9u/abLBGuETWbyuLX7NA9yvQL7cei40fJdsZZpZkHDJvNnrblpdaeFjuAI4vmAqz +odiHzZodSaFCwODFX8oYyBcMTHW99UYiGywskF1NnJKq13r4kP7+7w7ZaE/5YukW +VSeCXTHp1c0umuieluG87MZH4dCZgrvzpZwBeGoLLNyMyo4qHwYfkZiG2rTRpVX3 ++VsWnMOaRVMYrzTB2tPZyAZyRMEfTd0fNi7ufSu6ywrOdziTu6Y1qVh18qDKpPsG +eaSCNQoO5D9vUbiFjxKPJe8hZ0bDWTbVKRpeIrQMeHXnXGPEV5rPOcJUzwnDsGqI +gqr6XlcEs+lp +-----END CERTIFICATE-----
test
test
"2021-08-26T21:53:07"
"2021-06-03T05:11:21Z"
kushalagrawal
val
netty/netty/11655_11656
netty/netty
netty/netty/11655
netty/netty/11656
[ "keyword_pr_to_issue" ]
21fb48e4d22b5d030b165200e663c11f545f5576
a329857ec20cc1b93ceead6307c6849f93b3f101
[]
[ "revert ?", "Not here... add it to `CipherSuiteConverter `", "sorry, didn't understand this point. Failure which I see in the last build is : \r\n```\r\nFailures: \r\n2021-09-04T19:13:09.0782449Z [ERROR] JdkOpenSslEngineInteroptTest.testMutualAuthDiffCerts:78->SSLEngineTest.testMutualAuthDiffCerts:633->SSLEngineTest.runTest:1267->SSLEngineTest.writeAndVerifyReceived:1296 expected: <true> but was: <false>\r\n```\r\n\r\nBut why we are adding `@UnstableApi` to `CipherSuitesConverter` class?\r\n", "I see the point after reading the documentation about it.", "updated." ]
"2021-09-04T11:09:12Z"
[]
ability to CipherSuitesConverter outside of the io.netty.handler.ssl
In our use case, we are required to convert the java cipher suites are required to be converted to Openssl. I found that netty already has a `CipherSuitesConverter`. As we are already using netty, we could have reused the class if it was not made the package private. IMO it would be no harm if we make it public to be used for the project using netty already. I will raise a pull request for the same.
[ "handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java" ]
[ "handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java" ]
[]
diff --git a/handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java b/handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java index 2dd81371adb..a9ac8307c1d 100644 --- a/handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java +++ b/handler/src/main/java/io/netty/handler/ssl/CipherSuiteConverter.java @@ -17,6 +17,7 @@ package io.netty.handler.ssl; import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.UnstableApi; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; @@ -34,7 +35,8 @@ * * @see <a href="https://en.wikipedia.org/wiki/Cipher_suite">Wikipedia page about cipher suite</a> */ -final class CipherSuiteConverter { +@UnstableApi +public final class CipherSuiteConverter { private static final InternalLogger logger = InternalLoggerFactory.getInstance(CipherSuiteConverter.class); @@ -150,7 +152,7 @@ static boolean isO2JCached(String key, String protocol, String value) { * * @return {@code null} if the conversion has failed */ - static String toOpenSsl(String javaCipherSuite, boolean boringSSL) { + public static String toOpenSsl(String javaCipherSuite, boolean boringSSL) { String converted = j2o.get(javaCipherSuite); if (converted != null) { return converted; @@ -279,7 +281,7 @@ private static String toOpenSslHmacAlgo(String hmacAlgo) { * @param protocol The cryptographic protocol (i.e. SSL, TLS, ...). * @return The translated cipher suite name according to java conventions. This will not be {@code null}. */ - static String toJava(String openSslCipherSuite, String protocol) { + public static String toJava(String openSslCipherSuite, String protocol) { Map<String, String> p2j = o2j.get(openSslCipherSuite); if (p2j == null) { p2j = cacheFromOpenSsl(openSslCipherSuite);
null
test
test
"2021-09-05T20:05:15"
"2021-09-04T10:57:07Z"
kushalagrawal
val
netty/netty/11652_11663
netty/netty
netty/netty/11652
netty/netty/11663
[ "keyword_pr_to_issue" ]
a329857ec20cc1b93ceead6307c6849f93b3f101
a53eb80901b8bf678e2ff65e4715d25c4fec638c
[ "Sounds good . Will provide more feedback on the PR" ]
[]
"2021-09-07T17:01:45Z"
[]
Issues on UDS path binding
I think there are couple issues relate to the UDS path binding logic: 1. In the JNI `netty_unix_socket_bindDomainSocket` method, when the upper level passes in a [longer-than-limit UDS path](https://github.com/netty/netty/blob/d58d8a1df832a5185193368e589cb4a488709ffe/transport-native-unix-common/src/main/c/netty_unix_socket.c#L768). a. the `unlink()` is called on a different [path](https://github.com/netty/netty/blob/d58d8a1df832a5185193368e589cb4a488709ffe/transport-native-unix-common/src/main/c/netty_unix_socket.c#L772) than the [one](https://github.com/netty/netty/blob/d58d8a1df832a5185193368e589cb4a488709ffe/transport-native-unix-common/src/main/c/netty_unix_socket.c#L776) the code later `bind` to. b. Netty is [quietly truncating the path](https://github.com/netty/netty/blob/d58d8a1df832a5185193368e589cb4a488709ffe/transport-native-unix-common/src/main/c/netty_unix_socket.c#L770) if user provides a over the limit path. This can be an issue if we have a non-Netty client which has a different path handling logic. 2. According to the manual, "a UNIX domain socket can be bound to a *null-terminated* filesystem pathname", but Netty is not doing anything about this requirement. Although there are some [messes](https://man.archlinux.org/man/core/man-pages/unix.7.en#BUGS) on handling this requirement, we should not simply ignore it. This is also an issue if we have a non-Netty client. I am proposing a possible fix for all these issues. We can just validate the `socketPath` passed in and let Java throw exceptions if: 1) the `socketPath` is longer than `sizeof(addr.sun_path)`, 2) or, the `socketPath` has the same length as `sizeof(addr.sun_path)` but the last byte is not '\0'. A similar thing also need to be done on the client side. This can ensure the UDS paths Netty can use have maximum compatibility with other UDS servers/clients. I can send a PR if the change mentioned above is desirable. Thank you.
[ "transport-native-unix-common/src/main/c/netty_unix_socket.c" ]
[ "transport-native-unix-common/src/main/c/netty_unix_socket.c" ]
[ "transport-native-epoll/src/test/java/io/netty/channel/epoll/LinuxSocketTest.java" ]
diff --git a/transport-native-unix-common/src/main/c/netty_unix_socket.c b/transport-native-unix-common/src/main/c/netty_unix_socket.c index 2524b412209..a6cd08f3086 100644 --- a/transport-native-unix-common/src/main/c/netty_unix_socket.c +++ b/transport-native-unix-common/src/main/c/netty_unix_socket.c @@ -764,12 +764,13 @@ static jint netty_unix_socket_bindDomainSocket(JNIEnv* env, jclass clazz, jint f jbyte* socket_path = (*env)->GetByteArrayElements(env, socketPath, 0); jint socket_path_len = (*env)->GetArrayLength(env, socketPath); - if (socket_path_len > sizeof(addr.sun_path)) { - socket_path_len = sizeof(addr.sun_path); + + if (socket_path_len > sizeof(addr.sun_path) || (socket_path_len == sizeof(addr.sun_path) && socket_path[socket_path_len] != '\0')) { + return -ENAMETOOLONG; } memcpy(addr.sun_path, socket_path, socket_path_len); - if (unlink((const char*) socket_path) == -1 && errno != ENOENT) { + if (unlink((const char*) addr.sun_path) == -1 && errno != ENOENT) { return -errno; } @@ -791,8 +792,9 @@ static jint netty_unix_socket_connectDomainSocket(JNIEnv* env, jclass clazz, jin jbyte* socket_path = (*env)->GetByteArrayElements(env, socketPath, 0); socket_path_len = (*env)->GetArrayLength(env, socketPath); - if (socket_path_len > sizeof(addr.sun_path)) { - socket_path_len = sizeof(addr.sun_path); + + if (socket_path_len > sizeof(addr.sun_path) || (socket_path_len == sizeof(addr.sun_path) && socket_path[socket_path_len] != '\0')) { + return -ENAMETOOLONG; } memcpy(addr.sun_path, socket_path, socket_path_len);
diff --git a/transport-native-epoll/src/test/java/io/netty/channel/epoll/LinuxSocketTest.java b/transport-native-epoll/src/test/java/io/netty/channel/epoll/LinuxSocketTest.java index ff85d3ac12c..9f16b2bfbfe 100644 --- a/transport-native-epoll/src/test/java/io/netty/channel/epoll/LinuxSocketTest.java +++ b/transport-native-epoll/src/test/java/io/netty/channel/epoll/LinuxSocketTest.java @@ -15,6 +15,13 @@ */ package io.netty.channel.epoll; +import io.netty.channel.unix.DomainSocketAddress; +import io.netty.channel.unix.Errors.NativeIoException; +import io.netty.channel.unix.Socket; +import java.nio.charset.Charset; +import java.util.Random; +import java.util.UUID; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -65,4 +72,28 @@ public void execute() throws Throwable { socket.close(); } } + + @Test + public void testUnixDomainSocketTooLongPathFails() throws IOException { + // Most systems has a limit for UDS path of 108, 255 is generally too long. + StringBuilder socketPath = new StringBuilder("/tmp/"); + while (socketPath.length() < 255) { + socketPath.append(UUID.randomUUID()); + } + + final DomainSocketAddress domainSocketAddress = new DomainSocketAddress( + socketPath.toString()); + final Socket socket = Socket.newSocketDomain(); + try { + Exception exception = Assertions.assertThrows(NativeIoException.class, new Executable() { + @Override + public void execute() throws Throwable { + socket.bind(domainSocketAddress); + } + }); + Assertions.assertTrue(exception.getMessage().contains("too long")); + } finally { + socket.close(); + } + } }
val
test
"2021-09-06T14:57:19"
"2021-09-02T23:10:55Z"
ran-su
val
netty/netty/11637_11667
netty/netty
netty/netty/11637
netty/netty/11667
[ "keyword_pr_to_issue" ]
a0c9b2f9e15112cb52001f26fa3cb6772f53d674
4377ece51640fe0eb0b21fac512a86443cf409ab
[ "For the heap _in general_, the free memory on the heap is not the same as the amount of memory that could be freed if the GC was to do a run, so we can only realistically do this for direct memory where we have precise accounting of what memory is in use and what is not. But then there's also fragmentation; having X free bytes doesn't necessarily mean there's room for an X-sized allocation.\r\n\r\nBut to count the free bytes, you can iterate the arenas (`pool.arenaMetrics()`), and then iterate the chunk lists for each (`arena.chunkLists`) and then for each chunk in those you can get the `freeBytes()`. Not super convenient, but if you are up for it you could make a PR to improve that." ]
[]
"2021-09-08T14:10:42Z"
[]
Add support getting the pooled memory size for each PooledByteBufAllocator instance
### Motivation Our server has its own implementation to decide whether to provide service for users according to the real-time total free memory of the heap memory and the direct memory (mainly the memory used by `io.netty.buffer.PooledByteBufAllocator` for our server). If our server detects it's running out of physical memory, it will just respond with the client requests with something like `Service Unavailable`. But the problem is: We cannot know the free memory of PooledByteBufAllocator efficiently. For example, we have allocated and cached 8GB chunks in a `PooledByteBufAllocator`, but there is no property to know: what exactly the free memory is, 1GB or 8GB? ### Solution The [PR](https://github.com/netty/netty/pull/5133/files) in 2016 is similar to what we want and it provides a new property `pooledBytes` to record the free memory of the arena but we want the free memory in the scope of each allocator instance. And `io.netty.maxDirectMemory` is similar but isn't what we want because it applies the limit to all buffers while we just want to know the free memory size of a PooledByteBufAllocator instance
[ "buffer/src/main/java/io/netty/buffer/PoolArena.java", "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java" ]
[ "buffer/src/main/java/io/netty/buffer/PoolArena.java", "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java", "buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/PoolArena.java b/buffer/src/main/java/io/netty/buffer/PoolArena.java index fba1127b5d7..beff6e115bd 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolArena.java +++ b/buffer/src/main/java/io/netty/buffer/PoolArena.java @@ -455,6 +455,22 @@ public long numActiveBytes() { return max(0, val); } + /** + * Return the number of bytes that are currently pinned to buffer instances, by the arena. The pinned memory is not + * accessible for use by any other allocation, until the buffers using have all been released. + */ + public long numPinnedBytes() { + long val = activeBytesHuge.value(); // Huge chunks are exact-sized for the buffers they were allocated to. + synchronized (this) { + for (int i = 0; i < chunkListMetrics.size(); i++) { + for (PoolChunkMetric m: chunkListMetrics.get(i)) { + val += ((PoolChunk<?>) m).pinnedBytes(); + } + } + } + return max(0, val); + } + protected abstract PoolChunk<T> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize); protected abstract PoolChunk<T> newUnpooledChunk(int capacity); protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity); diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunk.java b/buffer/src/main/java/io/netty/buffer/PoolChunk.java index b9a1b4cb49b..5cf495b974b 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunk.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunk.java @@ -172,6 +172,7 @@ final class PoolChunk<T> implements PoolChunkMetric { private final Deque<ByteBuffer> cachedNioBuffers; int freeBytes; + int pinnedBytes; PoolChunkList<T> parent; PoolChunk<T> prev; @@ -339,7 +340,9 @@ private long allocateRun(int runSize) { handle = splitLargeRun(handle, pages); } - freeBytes -= runSize(pageShifts, handle); + int pinnedSize = runSize(pageShifts, handle); + freeBytes -= pinnedSize; + pinnedBytes += pinnedSize; return handle; } } @@ -448,6 +451,8 @@ private long allocateSubpage(int sizeIdx) { * @param handle handle to free */ void free(long handle, int normCapacity, ByteBuffer nioBuffer) { + int runSize = runSize(pageShifts, handle); + pinnedBytes -= runSize; if (isSubpage(handle)) { int sizeIdx = arena.size2SizeIdx(normCapacity); PoolSubpage<T> head = arena.findSubpagePoolHead(sizeIdx); @@ -470,8 +475,6 @@ void free(long handle, int normCapacity, ByteBuffer nioBuffer) { } //start free run - int pages = runPages(handle); - synchronized (runsAvail) { // collapse continuous runs, successfully collapsed runs // will be removed from runsAvail and runsAvailMap @@ -483,7 +486,7 @@ void free(long handle, int normCapacity, ByteBuffer nioBuffer) { finalRun &= ~(1L << IS_SUBPAGE_SHIFT); insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun); - freeBytes += pages << pageShifts; + freeBytes += runSize; } if (nioBuffer != null && cachedNioBuffers != null && @@ -585,6 +588,12 @@ public int freeBytes() { } } + public int pinnedBytes() { + synchronized (arena) { + return pinnedBytes; + } + } + @Override public String toString() { final int freeBytes; diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java index dd9e36f55b7..8fd7b1fd0a7 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java @@ -677,6 +677,40 @@ private static long usedMemory(PoolArena<?>[] arenas) { return used; } + /** + * Returns the number of bytes of heap memory that is currently pinned to heap buffers allocated by a + * {@link ByteBufAllocator}, or {@code -1} if unknown. + * A buffer can pin more memory than its {@linkplain ByteBuf#capacity() capacity} might indicate, + * due to implementation details of the allocator. + */ + public final long pinnedHeapMemory() { + return pinnedMemory(heapArenas); + } + + /** + * Returns the number of bytes of direct memory that is currently pinned to direct buffers allocated by a + * {@link ByteBufAllocator}, or {@code -1} if unknown. + * A buffer can pin more memory than its {@linkplain ByteBuf#capacity() capacity} might indicate, + * due to implementation details of the allocator. + */ + public final long pinnedDirectMemory() { + return pinnedMemory(directArenas); + } + + private static long pinnedMemory(PoolArena<?>[] arenas) { + if (arenas == null) { + return -1; + } + long used = 0; + for (PoolArena<?> arena : arenas) { + used += arena.numPinnedBytes(); + if (used < 0) { + return Long.MAX_VALUE; + } + } + return used; + } + final PoolThreadCache threadCache() { PoolThreadCache cache = threadCache.get(); assert cache != null;
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java index e2f3ab72ef3..e6e064b40cc 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java @@ -95,7 +95,6 @@ protected static void assertInstanceOf(ByteBuf buffer, Class<? extends ByteBuf> assertTrue(clazz.isInstance(buffer instanceof SimpleLeakAwareByteBuf ? buffer.unwrap() : buffer)); } - @SuppressWarnings("unchecked") @Test public void testUsedDirectMemory() { T allocator = newAllocator(true); @@ -114,7 +113,6 @@ public void testUsedDirectMemory() { assertEquals(expectedUsedMemoryAfterRelease(allocator, capacity), metric.usedDirectMemory()); } - @SuppressWarnings("unchecked") @Test public void testUsedHeapMemory() { T allocator = newAllocator(true); @@ -141,4 +139,7 @@ protected long expectedUsedMemory(T allocator, int capacity) { protected long expectedUsedMemoryAfterRelease(T allocator, int capacity) { return 0; } + + protected void trimCaches(T allocator) { + } } diff --git a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java index ef3a73d7789..16cb9927b9e 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Queue; +import java.util.Random; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -37,6 +38,7 @@ import static io.netty.buffer.PoolChunk.runOffset; import static io.netty.buffer.PoolChunk.runPages; import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -67,6 +69,11 @@ protected long expectedUsedMemoryAfterRelease(PooledByteBufAllocator allocator, return allocator.metric().chunkSize(); } + @Override + protected void trimCaches(PooledByteBufAllocator allocator) { + allocator.trimCurrentThreadCache(); + } + @Test public void testTrim() { PooledByteBufAllocator allocator = newAllocator(true); @@ -689,4 +696,129 @@ public void testNormalPoolSubpageRelease() { assertTrue(beforeFreeBytes < afterFreeBytes); } + + @Override + @Test + public void testUsedDirectMemory() { + for (int power = 0; power < 8; power++) { + int initialCapacity = 1024 << power; + testUsedDirectMemory(initialCapacity); + } + } + + private void testUsedDirectMemory(int initialCapacity) { + PooledByteBufAllocator allocator = newAllocator(true); + ByteBufAllocatorMetric metric = allocator.metric(); + assertEquals(0, metric.usedDirectMemory()); + assertEquals(0, allocator.pinnedDirectMemory()); + ByteBuf buffer = allocator.directBuffer(initialCapacity, 4 * initialCapacity); + int capacity = buffer.capacity(); + assertEquals(expectedUsedMemory(allocator, capacity), metric.usedDirectMemory()); + assertThat(allocator.pinnedDirectMemory()) + .isGreaterThanOrEqualTo(capacity) + .isLessThanOrEqualTo(metric.usedDirectMemory()); + + // Double the size of the buffer + buffer.capacity(capacity << 1); + capacity = buffer.capacity(); + assertEquals(expectedUsedMemory(allocator, capacity), metric.usedDirectMemory(), buffer.toString()); + assertThat(allocator.pinnedDirectMemory()) + .isGreaterThanOrEqualTo(capacity) + .isLessThanOrEqualTo(metric.usedDirectMemory()); + + buffer.release(); + assertEquals(expectedUsedMemoryAfterRelease(allocator, capacity), metric.usedDirectMemory()); + assertThat(allocator.pinnedDirectMemory()) + .isGreaterThanOrEqualTo(0) + .isLessThanOrEqualTo(metric.usedDirectMemory()); + trimCaches(allocator); + assertEquals(0, allocator.pinnedDirectMemory()); + + int[] capacities = new int[30]; + Random rng = new Random(); + for (int i = 0; i < capacities.length; i++) { + capacities[i] = initialCapacity / 4 + rng.nextInt(8 * initialCapacity); + } + ByteBuf[] bufs = new ByteBuf[capacities.length]; + for (int i = 0; i < 20; i++) { + bufs[i] = allocator.directBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 10; i++) { + bufs[i].release(); + } + for (int i = 20; i < 30; i++) { + bufs[i] = allocator.directBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 10; i++) { + bufs[i] = allocator.directBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 30; i++) { + bufs[i].release(); + } + trimCaches(allocator); + assertEquals(0, allocator.pinnedDirectMemory()); + } + + @Override + @Test + public void testUsedHeapMemory() { + for (int power = 0; power < 8; power++) { + int initialCapacity = 1024 << power; + testUsedHeapMemory(initialCapacity); + } + } + + private void testUsedHeapMemory(int initialCapacity) { + PooledByteBufAllocator allocator = newAllocator(true); + ByteBufAllocatorMetric metric = allocator.metric(); + + assertEquals(0, metric.usedHeapMemory()); + assertEquals(0, allocator.pinnedDirectMemory()); + ByteBuf buffer = allocator.heapBuffer(initialCapacity, 4 * initialCapacity); + int capacity = buffer.capacity(); + assertEquals(expectedUsedMemory(allocator, capacity), metric.usedHeapMemory()); + assertThat(allocator.pinnedHeapMemory()) + .isGreaterThanOrEqualTo(capacity) + .isLessThanOrEqualTo(metric.usedHeapMemory()); + + // Double the size of the buffer + buffer.capacity(capacity << 1); + capacity = buffer.capacity(); + assertEquals(expectedUsedMemory(allocator, capacity), metric.usedHeapMemory()); + assertThat(allocator.pinnedHeapMemory()) + .isGreaterThanOrEqualTo(capacity) + .isLessThanOrEqualTo(metric.usedHeapMemory()); + + buffer.release(); + assertEquals(expectedUsedMemoryAfterRelease(allocator, capacity), metric.usedHeapMemory()); + assertThat(allocator.pinnedHeapMemory()) + .isGreaterThanOrEqualTo(0) + .isLessThanOrEqualTo(metric.usedHeapMemory()); + trimCaches(allocator); + assertEquals(0, allocator.pinnedHeapMemory()); + + int[] capacities = new int[30]; + Random rng = new Random(); + for (int i = 0; i < capacities.length; i++) { + capacities[i] = initialCapacity / 4 + rng.nextInt(8 * initialCapacity); + } + ByteBuf[] bufs = new ByteBuf[capacities.length]; + for (int i = 0; i < 20; i++) { + bufs[i] = allocator.heapBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 10; i++) { + bufs[i].release(); + } + for (int i = 20; i < 30; i++) { + bufs[i] = allocator.heapBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 10; i++) { + bufs[i] = allocator.heapBuffer(capacities[i], 2 * capacities[i]); + } + for (int i = 0; i < 30; i++) { + bufs[i].release(); + } + trimCaches(allocator); + assertEquals(0, allocator.pinnedDirectMemory()); + } }
test
test
"2021-09-09T15:08:08"
"2021-08-31T20:26:19Z"
JamesChenX
val
netty/netty/11700_11706
netty/netty
netty/netty/11700
netty/netty/11706
[ "keyword_issue_to_pr", "keyword_pr_to_issue" ]
23405e2000427e9cad104913e7071d8d08e91a3c
83278c50005cfa58762167505aa9be8f19814433
[ "Actually your this test has bug in line 26, 27.", "> Actually your this test has bug in line 26, 27.\r\n\r\n@forchid The test where line 26 and 27 are is passing. What bug does it have? The issue is that the other test is failing.", "> The test where line 26 and 27 are is passing. What bug does it have? The issue is that the other test is failing.\r\n\r\nIn the method endianTestCallingOrder(), the byte order is LITTLE_ENDIAN, so these asserts should be:\r\n```java\r\nassertThat(b.getShort(0)).isEqualTo(message);\r\nassertThat(b.getShortLE(0)).isEqualTo(reverse);\r\n```\r\n# The detail of these steps:\r\n## Step-1 writeShortLE()\r\n```java\r\nByteBuf b = Unpooled.buffer(1024).order(ByteOrder.LITTLE_ENDIAN).writeShortLE(message)\r\n```\r\n-> Storage arrange: 0x50,0xD0\r\n\r\n## Step-2 getShort() in LITTLE_ENDIAN ByteBuf\r\n-> Result: 0x50 | (0xD0 >> 8) for the java short byte order BIG_ENDIAN!\r\n\r\n## Step-3 getShortLe() in LITTLE_ENDIAN ByteBuf\r\n-> Result: (0x50 >> 8) | 0xD0\r\n", "> > The test where line 26 and 27 are is passing. What bug does it have? The issue is that the other test is failing.\r\n\r\n> In the method endianTestCallingOrder(), the byte order is LITTLE_ENDIAN, so these asserts should be:\r\n> ```\r\n> assertThat(b.getShort(0)).isEqualTo(message);\r\n> assertThat(b.getShortLE(0)).isEqualTo(reverse);\r\n> ```\r\n\r\nLine 26 and 27 are not in method `endianTestCallingOrder()`. Is there still a bug in line 26 and 27 in method `endianTestWithoutCallingOrder()`?\r\n\r\nThe method `endianTestCallingOrder()` was passing in Netty 4.1.53.Final, does that mean there was a bug in Netty 4.1.53.Final? (Is it related to #10747 ?)", "Yes, this bug was existing before Netty 4.1.54.Final. Actually getShort() in java is getShortBE() for instruction set BIG_ENDIAN in jvm specification.", "The `*LE` methods have different behaviour depending on the concrete buffer implementation and how it's put together with wrappers and so on.", "@chrisvest, \"different behavior depending on the implementation\" sounds like a bug or a useless API. It seems especially bad with the methods being encouraged over `order()`. We had assumed \"maybe it is a bug with order()\", but if the API is \"a box of chocolates and you never know what you are going to get\" then that is a more severe issue.\r\n\r\nBut even if there is some variation in implementation, it seems particularly surprising that `writeShortLE` and `getShortLE` disagree with each other; I don't know how to interpret that other than there's a bug.", "Oh, I didn't catch that those two were disagreeing, but either way, yeah I think it's a bug.", "Looks like the entirety of `SwappedByteBuf` needs an audit. But it looks like it has always been broken https://github.com/netty/netty/commit/0f9492c9affc528c766f9677952412564d4a3f6d . It seems what changed is https://github.com/netty/netty/commit/96da45de2d53d74ad3a77e83c840410eb3f847c7 fixed many of the methods, but also missed many of them. Before the methods were wrong, but in agreement. Now some methods are just a bit more obviously broken.\r\n\r\nhttps://github.com/netty/netty/blob/23405e2000427e9cad104913e7071d8d08e91a3c/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java#L247-L250\r\n\r\nAnd getMediumLE, getIntLE, and getLongLE look busted. And then I see some other busted methods like:\r\n\r\nhttps://github.com/netty/netty/blob/23405e2000427e9cad104913e7071d8d08e91a3c/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java#L402-L406", "Opened #11706 to fix this.\r\n\r\n@dapengzhang0 Note the tests I added to `UnpooledTest` are different from the ones you posted above. Specifically, the test you said was failing is now making the _opposite_ assertion, namely that `LE` methods always perform little-endian accesses." ]
[]
"2021-09-23T11:47:19Z"
[]
ByteBuf.order() seems to have wrong behavior
Found unexpected behavior when calling ```java Unpooled.buffer(1024).order(ByteOrder.LITTLE_ENDIAN).writeShortLE(message) ``` since netty 4.1.54.Final. The `order()` method has already been deprecated, but nevertheless it shouldn't be broken. ### Expected behavior Test passes ### Actual behavior Test failed ``` expected: -12208 but was : 20688 at NettyEndianTest.endianTestCallingOrder(NettyEndianTest.java:26) ``` ### Steps to reproduce Create the following project *./build.gradle* ```gradle plugins { id 'java' } group 'org.example' version '1.0-SNAPSHOT' repositories { mavenCentral() } dependencies { testImplementation 'com.google.truth:truth:1.0.1' testImplementation 'io.netty:netty-buffer:4.1.68.Final' testImplementation 'org.junit.jupiter:junit-jupiter-api:5.7.0' testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.7.0' } test { useJUnitPlatform() } ``` *./src/NettyEndianTest.java* ```java import static com.google.common.truth.Truth.assertThat; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.nio.ByteOrder; import org.junit.jupiter.api.Test; class NettyEndianTest { // Test will fail for netty 4.1.54.Final and above @SuppressWarnings("deprecation") @Test void endianTestCallingOrder() { short message = (short) 0xD050; short reverse = (short) 0x50D0; ByteBuf b = Unpooled.buffer(1024).order(ByteOrder.LITTLE_ENDIAN).writeShortLE(message); assertThat(b.getShortLE(0)).isEqualTo(message); assertThat(b.getShort(0)).isEqualTo(reverse); } // Test is passing @Test void endianTestWithoutCallingOrder() { short message = (short) 0xD050; short reverse = (short) 0x50D0; ByteBuf b = Unpooled.buffer(1024).writeShortLE(message); assertThat(b.getShortLE(0)).isEqualTo(message); assertThat(b.getShort(0)).isEqualTo(reverse); } } ``` Run *gradle test* ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.54.Final - 4.1.68.Final Earlier versions before 4.1.54 were good. ### JVM version (e.g. `java -version`) openjdk version "11.0.11" 2021-04-20 LTS OpenJDK Runtime Environment Zulu11.48+21-CA (build 11.0.11+9-LTS) OpenJDK 64-Bit Server VM Zulu11.48+21-CA (build 11.0.11+9-LTS, mixed mode) Other JVMs the same. ### OS version (e.g. `uname -a`) Darwin Kernel Version 20.6.0 Other platforms the same.
[ "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java", "buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java", "handler/src/main/java/io/netty/handler/ssl/SslUtils.java" ]
[ "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java", "buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java", "handler/src/main/java/io/netty/handler/ssl/SslUtils.java" ]
[ "buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java", "buffer/src/test/java/io/netty/buffer/UnpooledTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java index 7f7a4902cc3..68a0ab5aaa0 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java @@ -474,7 +474,7 @@ private static long compareUintBigEndian( private static long compareUintLittleEndian( ByteBuf bufferA, ByteBuf bufferB, int aIndex, int bIndex, int uintCountIncrement) { for (int aEnd = aIndex + uintCountIncrement; aIndex < aEnd; aIndex += 4, bIndex += 4) { - long comp = bufferA.getUnsignedIntLE(aIndex) - bufferB.getUnsignedIntLE(bIndex); + long comp = uintFromLE(bufferA.getUnsignedIntLE(aIndex)) - uintFromLE(bufferB.getUnsignedIntLE(bIndex)); if (comp != 0) { return comp; } @@ -485,7 +485,9 @@ private static long compareUintLittleEndian( private static long compareUintBigEndianA( ByteBuf bufferA, ByteBuf bufferB, int aIndex, int bIndex, int uintCountIncrement) { for (int aEnd = aIndex + uintCountIncrement; aIndex < aEnd; aIndex += 4, bIndex += 4) { - long comp = bufferA.getUnsignedInt(aIndex) - bufferB.getUnsignedIntLE(bIndex); + long a = bufferA.getUnsignedInt(aIndex); + long b = uintFromLE(bufferB.getUnsignedIntLE(bIndex)); + long comp = a - b; if (comp != 0) { return comp; } @@ -496,7 +498,9 @@ private static long compareUintBigEndianA( private static long compareUintBigEndianB( ByteBuf bufferA, ByteBuf bufferB, int aIndex, int bIndex, int uintCountIncrement) { for (int aEnd = aIndex + uintCountIncrement; aIndex < aEnd; aIndex += 4, bIndex += 4) { - long comp = bufferA.getUnsignedIntLE(aIndex) - bufferB.getUnsignedInt(bIndex); + long a = uintFromLE(bufferA.getUnsignedIntLE(aIndex)); + long b = bufferB.getUnsignedInt(bIndex); + long comp = a - b; if (comp != 0) { return comp; } @@ -504,6 +508,10 @@ private static long compareUintBigEndianB( return 0; } + private static long uintFromLE(long value) { + return Long.reverseBytes(value) >>> Integer.SIZE; + } + private static final class SWARByteSearch { private static long compilePattern(byte byteToFind) { diff --git a/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java index 7e038a6e87b..3b1b4d53854 100644 --- a/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java @@ -246,7 +246,7 @@ public short getShort(int index) { @Override public short getShortLE(int index) { - return buf.getShort(index); + return buf.getShortLE(index); } @Override @@ -266,7 +266,7 @@ public int getMedium(int index) { @Override public int getMediumLE(int index) { - return buf.getMedium(index); + return buf.getMediumLE(index); } @Override @@ -286,7 +286,7 @@ public int getInt(int index) { @Override public int getIntLE(int index) { - return buf.getInt(index); + return buf.getIntLE(index); } @Override @@ -306,7 +306,7 @@ public long getLong(int index) { @Override public long getLongLE(int index) { - return buf.getLong(index); + return buf.getLongLE(index); } @Override @@ -401,7 +401,7 @@ public ByteBuf setShort(int index, int value) { @Override public ByteBuf setShortLE(int index, int value) { - buf.setShort(index, (short) value); + buf.setShortLE(index, (short) value); return this; } @@ -413,7 +413,7 @@ public ByteBuf setMedium(int index, int value) { @Override public ByteBuf setMediumLE(int index, int value) { - buf.setMedium(index, value); + buf.setMediumLE(index, value); return this; } @@ -425,7 +425,7 @@ public ByteBuf setInt(int index, int value) { @Override public ByteBuf setIntLE(int index, int value) { - buf.setInt(index, value); + buf.setIntLE(index, value); return this; } @@ -437,7 +437,7 @@ public ByteBuf setLong(int index, long value) { @Override public ByteBuf setLongLE(int index, long value) { - buf.setLong(index, value); + buf.setLongLE(index, value); return this; } diff --git a/handler/src/main/java/io/netty/handler/ssl/SslUtils.java b/handler/src/main/java/io/netty/handler/ssl/SslUtils.java index 49665002bad..c9d06ed4386 100644 --- a/handler/src/main/java/io/netty/handler/ssl/SslUtils.java +++ b/handler/src/main/java/io/netty/handler/ssl/SslUtils.java @@ -327,15 +327,21 @@ static int getEncryptedPacketLength(ByteBuf buffer, int offset) { // Reads a big-endian unsigned short integer from the buffer @SuppressWarnings("deprecation") private static int unsignedShortBE(ByteBuf buffer, int offset) { - return buffer.order() == ByteOrder.BIG_ENDIAN ? - buffer.getUnsignedShort(offset) : buffer.getUnsignedShortLE(offset); + int value = buffer.getUnsignedShort(offset); + if (buffer.order() == ByteOrder.LITTLE_ENDIAN) { + value = Integer.reverseBytes(value) >>> Short.SIZE; + } + return value; } // Reads a big-endian short integer from the buffer @SuppressWarnings("deprecation") private static short shortBE(ByteBuf buffer, int offset) { - return buffer.order() == ByteOrder.BIG_ENDIAN ? - buffer.getShort(offset) : buffer.getShortLE(offset); + short value = buffer.getShort(offset); + if (buffer.order() == ByteOrder.LITTLE_ENDIAN) { + value = Short.reverseBytes(value); + } + return value; } private static short unsignedByte(byte b) {
diff --git a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java index c61dd334d16..6a2c98fbdf6 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java @@ -5943,6 +5943,16 @@ public void explicitLittleEndianReadMethodsMustAlwaysUseLittleEndianByteOrder() buffer.readerIndex(0); assertEquals(0x0807060504030201L, Double.doubleToRawLongBits(buffer.readDoubleLE())); buffer.readerIndex(0); + + assertEquals(0x0201, buffer.getShortLE(0)); + assertEquals(0x0201, buffer.getUnsignedShortLE(0)); + assertEquals(0x030201, buffer.getMediumLE(0)); + assertEquals(0x030201, buffer.getUnsignedMediumLE(0)); + assertEquals(0x04030201, buffer.getIntLE(0)); + assertEquals(0x04030201, buffer.getUnsignedIntLE(0)); + assertEquals(0x04030201, Float.floatToRawIntBits(buffer.getFloatLE(0))); + assertEquals(0x0807060504030201L, buffer.getLongLE(0)); + assertEquals(0x0807060504030201L, Double.doubleToRawLongBits(buffer.getDoubleLE(0))); } @Test @@ -5965,5 +5975,18 @@ public void explicitLittleEndianWriteMethodsMustAlwaysUseLittleEndianByteOrder() buffer.clear(); buffer.writeDoubleLE(Double.longBitsToDouble(0x0102030405060708L)); assertEquals(0x0102030405060708L, Double.doubleToRawLongBits(buffer.readDoubleLE())); + + buffer.setShortLE(0, 0x0102); + assertEquals(0x0102, buffer.getShortLE(0)); + buffer.setMediumLE(0, 0x010203); + assertEquals(0x010203, buffer.getMediumLE(0)); + buffer.setIntLE(0, 0x01020304); + assertEquals(0x01020304, buffer.getIntLE(0)); + buffer.setFloatLE(0, Float.intBitsToFloat(0x01020304)); + assertEquals(0x01020304, Float.floatToRawIntBits(buffer.getFloatLE(0))); + buffer.setLongLE(0, 0x0102030405060708L); + assertEquals(0x0102030405060708L, buffer.getLongLE(0)); + buffer.setDoubleLE(0, Double.longBitsToDouble(0x0102030405060708L)); + assertEquals(0x0102030405060708L, Double.doubleToRawLongBits(buffer.getDoubleLE(0))); } } diff --git a/buffer/src/test/java/io/netty/buffer/UnpooledTest.java b/buffer/src/test/java/io/netty/buffer/UnpooledTest.java index 0d1fc2131a3..efc1dafd1ed 100644 --- a/buffer/src/test/java/io/netty/buffer/UnpooledTest.java +++ b/buffer/src/test/java/io/netty/buffer/UnpooledTest.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.ByteOrder; import java.nio.channels.ScatteringByteChannel; import java.nio.charset.Charset; import java.util.ArrayList; @@ -768,4 +769,53 @@ public void execute() throws Throwable { wrappedBuffer.release(); } } + + @SuppressWarnings("deprecation") + @Test + public void littleEndianWriteOnLittleEndianBufferMustStoreLittleEndianValue() { + ByteBuf b = buffer(1024).order(ByteOrder.LITTLE_ENDIAN); + + b.writeShortLE(0x0102); + assertEquals((short) 0x0102, b.getShortLE(0)); + assertEquals((short) 0x0102, b.getShort(0)); + b.clear(); + + b.writeMediumLE(0x010203); + assertEquals(0x010203, b.getMediumLE(0)); + assertEquals(0x010203, b.getMedium(0)); + b.clear(); + + b.writeIntLE(0x01020304); + assertEquals(0x01020304, b.getIntLE(0)); + assertEquals(0x01020304, b.getInt(0)); + b.clear(); + + b.writeLongLE(0x0102030405060708L); + assertEquals(0x0102030405060708L, b.getLongLE(0)); + assertEquals(0x0102030405060708L, b.getLong(0)); + } + + @Test + public void littleEndianWriteOnDefaultBufferMustStoreLittleEndianValue() { + ByteBuf b = buffer(1024); + + b.writeShortLE(0x0102); + assertEquals((short) 0x0102, b.getShortLE(0)); + assertEquals((short) 0x0201, b.getShort(0)); + b.clear(); + + b.writeMediumLE(0x010203); + assertEquals(0x010203, b.getMediumLE(0)); + assertEquals(0x030201, b.getMedium(0)); + b.clear(); + + b.writeIntLE(0x01020304); + assertEquals(0x01020304, b.getIntLE(0)); + assertEquals(0x04030201, b.getInt(0)); + b.clear(); + + b.writeLongLE(0x0102030405060708L); + assertEquals(0x0102030405060708L, b.getLongLE(0)); + assertEquals(0x0807060504030201L, b.getLong(0)); + } }
train
test
"2021-09-22T08:39:24"
"2021-09-21T19:09:38Z"
dapengzhang0
val
netty/netty/11701_11722
netty/netty
netty/netty/11701
netty/netty/11722
[ "keyword_pr_to_issue" ]
4a1297ffc2124b2266fbea295e406e31c2140993
f87abab779613cbe730fc0fa8fd32fa7c90609d1
[ "The binary compatibility baseline for our native code is the GLIBC version that's used on CentOS 6. The work-around is to use the Nio transport.\r\n\r\nIt's not clear from the error reports how this is really showing up. If it's an ABI problem, then I'm not sure what we can do to mitigate it. If we are calling glibc specific functions, then I wonder if we can somehow check what libc we are using at runtime.", ">It's not clear from the error reports how this is really showing up.\r\n\r\nI'm not sure what your question is. The error happens when Netty native is used without having glibc.\r\n\r\n>If it's an ABI problem...\r\n\r\nI don't think it is an ABI problem since glibc is missing in Alpine, there is nothing to be compatible with (and based on [this comment ](https://github.com/netty/netty/issues/6841#issuecomment-307716433), other libc implementations are out of question).\r\n\r\nI think checking if glibc is available or not and/or fixing `Epoll.isAvailable()` would fix the issue (it seems it returns true even if glibc is missing).", "I put together a little test for this: https://github.com/jonatan-ivanov/netty-gh-11701\r\n\r\nI called `Epoll.isAvailable()` and `Epoll.ensureAvailability()` on different distros to see if they return false/fail in case glibc is missing. It seems `isAvailable` does not return false and `ensureAvailability` does not fail even if glibc is missing.\r\n\r\nImage | `isAvailable` | `ensureAvailability` | Comment\r\n------ | ------------------- | --------------------------- | -------------\r\nbellsoft_liberica-openjre-debian:17 | `true` | `OK` | works\r\nbellsoft_liberica-openjre-alpine:17 | `true` | `OK` | works since it contains glibc\r\n**bellsoft_liberica-openjre-alpine-musl:17** | `true` | `OK` | **does not work as expected**\r\nazul_zulu-openjdk:17 | `true` | `OK` | works as expected\r\n**azul_zulu-openjdk-alpine:17-jre** | `true` | `OK` | **does not work as expected**\r\n\r\n", "There's no way to detect the libc vendor from within a process, as far as I can see. And calling out to `lld`, or something like that, is quite awkward and can be a security liability. I think the best we can do for the moment is to document that only the Nio transport is supported on musl.\r\n\r\nWhat do you think, @normanmaurer ?", "I agree with @chrisvest here ", "I'm not very familiar with this domain but do you think is it at least possible to throw an exception instead of crashing a JVM if such a thing occur?\r\n\r\nAlso can you use the `__GLIBC__ ` from [`features.h`](https://sourceware.org/git/?p=glibc.git;a=blob;f=include/features.h;h=d974eabfafc24ffb9ff4cd066b650134df32048b;hb=refs/heads/master#l477) to detect if you are dealing with glibc or not or is this only possible compile time?\r\n", "@jonatan-ivanov That's only at compile time. At runtime you can feature detect by inspecting linked symbols, except you cant do that without calling into libc which in this case would cause a crash. Alternatively we shell out to a program to inspect libc, but that is also bad for multiple reasons, and we don't want to do that.", "I have updated the documentation on the native transports to note that musl is not officially supported: https://github.com/netty/netty/wiki/Native-transports\r\n\r\nI think that's the best we can do for now.", "musl generally tries to support glibc-linked binaries invoking POSIX and some BSD extensions. errors related to GNU-specific extensions (and glibc header bastardizations) should be caught at load time, as in #6841 \"__strndup: symbol not found\". that particular issue can be fixed by installing gcompat package in alpine linux. segfaults are, as you say, caused by some ABI incompatibility.\r\n\r\ni briefly glanced through netty c code and found some suspicious items: https://github.com/netty/netty/blob/6b3ec62e6e4e19a85304aecd8cb5331a3fcc70be/transport-native-unix-common/src/main/c/netty_unix_errors.c#L41 is suspicious: application code should never check feature test macros, because it should be defining it themselves. https://github.com/netty/netty/blob/51ebcbd9aba7eac854716e45f07765744f8dcdaf/transport-native-epoll/src/main/c/netty_epoll_native.c#L140 is suspicious: sysctl values should always be accessed using `read`/`write`, never stdio. some sysctls will not be read correctly if using stdio. neither of these should cause segfault at run time though.", "@Hello71 The netty_unit_errors code is because we wish to use a thread-safe strerror variant, which is not immediately portable. (I'm not familiar with the other one you point out)\r\nIf you know of a way to improve it, we'd love to review a PR!", "Fix reverted because it negatively impacts musl-based systems that also use the glibc compatibility layer.", "@chrisvest Would you consider reopening this issue so that we can track another fix (if any)?", "reopened ", "fwiw if you want to do this in the dumbest possible way, you can check for `gnu_get_libc_version`. if your program is segfaulting and not just failing with symbol not found, then most likely there is some more serious issue with your program, not related to musl.", "@Hello71 The JVM itself might rely on signals and tie them to internal semantics, i.e. rare NullPointerExceptions sometimes rely on segfaults to optimise a non-null fast path. So I wouldn't feel very confident trying to trap signals in JNI library code.", "I think this would be fixed by https://github.com/netty/netty/pull/12272 ... " ]
[ "either we should rename the variable or rename the property", "nit: we could just move the whole `throw` in the if block that is contained in the while loop and so get rid of the boolean at all." ]
"2021-09-29T17:32:58Z"
[]
Throwing an exception in case glibc is missing instead of segfaulting the JVM
It seems Netty's native support depends on glibc (btw is this documented?). When glibc is missing (for example using vanilla Alpine), the JVM could end up with a crash (`SIGSEGV`), see details in this issue: https://github.com/micrometer-metrics/micrometer/issues/2776 It also seems that `Epoll.isAvailable()` returns `true` even if glibc is missing. ### Expected behavior Throwing an exception ### Actual behavior JVM crash ### Steps to reproduce Using Netty native with an environment that does not have glibc, e.g.: vanilla Alpine ### Minimal yet complete reproducer code (or URL to code) This seems to be a known issue, please let me know if you really need a reproducer. ### Netty version `4.1.68` (latest) ### JVM version (e.g. `java -version`) Latest 11: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'java --version' openjdk 11.0.12 2021-07-20 LTS OpenJDK Runtime Environment Zulu11.50+19-CA (build 11.0.12+7-LTS) OpenJDK 64-Bit Server VM Zulu11.50+19-CA (build 11.0.12+7-LTS, mixed mode) ``` Or also latest 17: ```bash ❯ docker run --rm 'bellsoft/liberica-openjdk-alpine-musl:17' 'sh' '-c' 'java --version' openjdk 17 2021-09-14 LTS OpenJDK Runtime Environment (build 17+35-LTS) OpenJDK 64-Bit Server VM (build 17+35-LTS, mixed mode) ``` I assume this is an issue in every JRE that is currently supported. ### OS version (e.g. `uname -a`) Both images above have the same output: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'uname -a' Linux 1ee8d1090f14 5.10.47-linuxkit #1 SMP Sat Jul 3 21:51:47 UTC 2021 x86_64 Linux ``` I assume this is an issue in every OS where glibc is missing.
[ "transport-native-epoll/src/main/c/netty_epoll_native.c", "transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java", "transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java" ]
[ "transport-native-epoll/src/main/c/netty_epoll_native.c", "transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java", "transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java" ]
[]
diff --git a/transport-native-epoll/src/main/c/netty_epoll_native.c b/transport-native-epoll/src/main/c/netty_epoll_native.c index 5fed0889596..531d13879b8 100644 --- a/transport-native-epoll/src/main/c/netty_epoll_native.c +++ b/transport-native-epoll/src/main/c/netty_epoll_native.c @@ -326,6 +326,7 @@ static jint netty_epoll_native_epollCtlAdd0(JNIEnv* env, jclass clazz, jint efd, } return res; } + static jint netty_epoll_native_epollCtlMod0(JNIEnv* env, jclass clazz, jint efd, jint fd, jint flags) { int res = epollCtl(env, efd, EPOLL_CTL_MOD, fd, flags); if (res < 0) { @@ -540,6 +541,15 @@ static jstring netty_epoll_native_kernelVersion(JNIEnv* env, jclass clazz) { return NULL; } +static jint netty_epoll_native_gnulibc(JNIEnv* env, jclass clazz) { +#ifdef __GLIBC__ + return 1; +#else + // We are using an alternative libc, possibly musl but could be anything. + return 0; +#endif // __GLIBC__ +} + static jboolean netty_epoll_native_isSupportingSendmmsg(JNIEnv* env, jclass clazz) { if (SYS_sendmmsg == -1) { return JNI_FALSE; @@ -658,7 +668,8 @@ static const JNINativeMethod statically_referenced_fixed_method_table[] = { { "isSupportingSendmmsg", "()Z", (void *) netty_epoll_native_isSupportingSendmmsg }, { "isSupportingRecvmmsg", "()Z", (void *) netty_epoll_native_isSupportingRecvmmsg }, { "tcpFastopenMode", "()I", (void *) netty_epoll_native_tcpFastopenMode }, - { "kernelVersion", "()Ljava/lang/String;", (void *) netty_epoll_native_kernelVersion } + { "kernelVersion", "()Ljava/lang/String;", (void *) netty_epoll_native_kernelVersion }, + { "gnulibc", "()I", (void *) netty_epoll_native_gnulibc } }; static const jint statically_referenced_fixed_method_table_size = sizeof(statically_referenced_fixed_method_table) / sizeof(statically_referenced_fixed_method_table[0]); static const JNINativeMethod fixed_method_table[] = { diff --git a/transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java b/transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java index 8a409dd108f..775a037f50e 100644 --- a/transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java +++ b/transport-native-epoll/src/main/java/io/netty/channel/epoll/Native.java @@ -23,11 +23,15 @@ import io.netty.util.internal.ClassInitializerUtil; import io.netty.util.internal.NativeLibraryLoader; import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.ThrowableUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; +import java.io.BufferedReader; +import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStreamReader; import java.nio.channels.FileChannel; import java.nio.channels.Selector; @@ -36,6 +40,7 @@ import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.epollin; import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.epollout; import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.epollrdhup; +import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.gnulibc; import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.isSupportingRecvmmsg; import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.isSupportingSendmmsg; import static io.netty.channel.epoll.NativeStaticallyReferencedJniMethods.kernelVersion; @@ -50,6 +55,7 @@ * <p>Static members which call JNI methods must be defined in {@link NativeStaticallyReferencedJniMethods}. */ public final class Native { + private static final boolean checkMusl = SystemPropertyUtil.getBoolean("io.netty.native.musl.check", true); private static final InternalLogger logger = InternalLoggerFactory.getInstance(Native.class); static { @@ -77,9 +83,9 @@ public final class Native { ); try { - // First, try calling a side-effect free JNI method to see if the library was already + // First, try calling a side effect free JNI method to see if the library was already // loaded by the application. - offsetofEpollData(); + gnulibc(); } catch (UnsatisfiedLinkError ignore) { // The library was not previously loaded, load it now. loadNativeLibrary(); @@ -92,6 +98,33 @@ public final class Native { // Just ignore } } + if (checkMusl && gnulibc() == 1) { + // Our binary is compiled for linking with GLIBC. + // Let's check that we don't have anything that looks like Musl libc in our runtime. + try { + FileInputStream fis = new FileInputStream("/proc/self/maps"); + try { + BufferedReader reader = new BufferedReader(new InputStreamReader(fis)); + String line; + while ((line = reader.readLine()) != null) { + if (line.contains("-musl-")) { + throw new LinkageError("Native library was compiled for linking with GLIBC, but GLIBC " + + "was not found among library mappings. This likely means the OS/JVM uses an " + + "alternative libc, such as musl. To fix, either use NIO transport, or build a " + + "native transport for your platform."); + } + } + } finally { + try { + fis.close(); + } catch (IOException e) { + logger.debug("Failed to close /proc/self/maps file.", e); + } + } + } catch (IOException e) { + logger.debug("Unable to check libc compatibility.", e); + } + } Unix.registerInternal(new Runnable() { @Override public void run() { diff --git a/transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java b/transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java index b52721b26f6..ba1df58e815 100644 --- a/transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java +++ b/transport-native-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java @@ -43,4 +43,5 @@ private NativeStaticallyReferencedJniMethods() { } static native boolean isSupportingRecvmmsg(); static native int tcpFastopenMode(); static native String kernelVersion(); + static native int gnulibc(); }
null
val
test
"2021-09-29T16:55:36"
"2021-09-22T02:15:46Z"
jonatan-ivanov
val
netty/netty/11784_11793
netty/netty
netty/netty/11784
netty/netty/11793
[ "keyword_pr_to_issue" ]
77b7c1a56dcd4fc964137f21caf321a1ca19c0ed
3a1fb3f10df18eece68e88a86d4561fdd1b7723a
[ "Hi @wo883721 \r\n\r\nI read through the snippet you posted, but I don't see the issue. Can you explain a bit more?", "@chrisvest \r\nI don't understand,Why can this method change the value of `writerIndex`?\r\n\r\n`setCharSequence` method implementation\r\n```\r\n public int setCharSequence(int index, CharSequence sequence, Charset charset) {\r\n return setCharSequence0(index, sequence, charset, false);\r\n }\r\n```\r\n\r\n`writeCharSequence` method implementation\r\n```\r\n @Override\r\n public int writeCharSequence(CharSequence sequence, Charset charset) {\r\n int written = setCharSequence0(writerIndex, sequence, charset, true);\r\n writerIndex += written;\r\n return written;\r\n }\r\n```", "@wo883721 It's a \"set\" method. They take a target index as an argument, and leaves the bytebuf writer index unchanged.\n\nIf it changed the writer index, it would have to be a \"write\" method.", "Oh I see. The javadocs have been copy-pasted from the write method. " ]
[]
"2021-10-25T08:51:56Z"
[]
It has a problem that setCharSequence method description at ByteBuf.class
This is just a minor problem,I found that setCharSequence method description at ByteBuf.class。 ``` /** * Writes the specified {@link CharSequence} at the current {@code writerIndex} and increases * the {@code writerIndex} by the written bytes. * * @param index on which the sequence should be written * @param sequence to write * @param charset that should be used. * @return the written number of bytes. * @throws IndexOutOfBoundsException * if {@code this.writableBytes} is not large enough to write the whole sequence */ public abstract int setCharSequence(int index, CharSequence sequence, Charset charset); ```
[ "buffer/src/main/java/io/netty/buffer/ByteBuf.java" ]
[ "buffer/src/main/java/io/netty/buffer/ByteBuf.java" ]
[]
diff --git a/buffer/src/main/java/io/netty/buffer/ByteBuf.java b/buffer/src/main/java/io/netty/buffer/ByteBuf.java index fef30f125ae..908e110a57f 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBuf.java @@ -1338,15 +1338,15 @@ public ByteBuf setDoubleLE(int index, double value) { public abstract ByteBuf setZero(int index, int length); /** - * Writes the specified {@link CharSequence} at the current {@code writerIndex} and increases - * the {@code writerIndex} by the written bytes. + * Writes the specified {@link CharSequence} at the given {@code index}. + * The {@code writerIndex} is not modified by this method. * * @param index on which the sequence should be written * @param sequence to write * @param charset that should be used. * @return the written number of bytes. * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is not large enough to write the whole sequence + * if the sequence at the given index would be out of bounds of the buffer capacity */ public abstract int setCharSequence(int index, CharSequence sequence, Charset charset);
null
val
test
"2021-10-15T15:18:48"
"2021-10-21T08:54:57Z"
wo883721
val
netty/netty/10821_11858
netty/netty
netty/netty/10821
netty/netty/11858
[ "connected" ]
2b4ac369669116d152ec8893910a04cdf4d94c65
40196a6305f7bfb5a3490196360b0862c9a0470d
[ "Is this reproducible for you ?", "Also I wonder if there was another exception before this as well .", "We are not able to reproduce this behavior. There was no other exception thrown before this.", "I was not able to spot any bug yet :/ ", "We saw this issue re-occur with the same exact stack trace. There were no other exceptions logged before this. @normanmaurer are you looking to see some specific exception messaging from a specific file ? Also what is the log level for that line ?", "Just share whatever exception was logged ", "Since we're suspecting the Recycler is at fault here, I'm going to close this because the Recycler has been rewritten for Netty 4.1.71.", "@chrisvest I am facing similar issue while PooledByteBuAllocator while writing Http/2 request\r\n\r\nAttaching the stack trace\r\n\r\njava.lang.ArrayIndexOutOfBoundsException: Index 39 out of bounds for length 39\r\n\tat io.netty.buffer.PoolArena.findSubpagePoolHead(PoolArena.java:267) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolChunk.free(PoolChunk.java:453) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolChunkList.free(PoolChunkList.java:120) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolArena.freeChunk(PoolArena.java:258) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache$MemoryRegionCache.freeEntry(PoolThreadCache.java:430) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache$MemoryRegionCache.free(PoolThreadCache.java:396) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache$MemoryRegionCache.trim(PoolThreadCache.java:414) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:277) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:269) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:258) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache.allocate(PoolThreadCache.java:170) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolThreadCache.allocateSmall(PoolThreadCache.java:151) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolArena.tcacheAllocateSmall(PoolArena.java:148) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolArena.allocate(PoolArena.java:134) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PoolArena.allocate(PoolArena.java:126) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:395) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:188) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:179) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.buffer.AbstractByteBufAllocator.buffer(AbstractByteBufAllocator.java:116) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DefaultHttp2FrameWriter.writeHeadersInternal(DefaultHttp2FrameWriter.java:515) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DefaultHttp2FrameWriter.writeHeaders(DefaultHttp2FrameWriter.java:260) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.sendHeaders(DefaultHttp2ConnectionEncoder.java:184) ~[netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.writeHeaders0(DefaultHttp2ConnectionEncoder.java:233) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.writeHeaders(DefaultHttp2ConnectionEncoder.java:151) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.handler.codec.http2.DecoratingHttp2FrameWriter.writeHeaders(DecoratingHttp2FrameWriter.java:45) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat com.rt.jio.scp.http2.client.active.standby.ScpHttp2EgressActiveStandbyClientRequestHandler.write(ScpHttp2EgressActiveStandbyClientRequestHandler.java:118) [rtSDP_Run.jar:?]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeWrite(AbstractChannelHandlerContext.java:709) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.channel.AbstractChannelHandlerContext$WriteTask.run(AbstractChannelHandlerContext.java:1069) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:469) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:384) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:986) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) [netty-all-4.1.68.Final.jar:4.1.68.Final]\r\n\tat java.lang.Thread.run(Thread.java:832) [?:?]", "@EzajAnsari Did this fix your issue?\r\n\r\n@chrisvest When will 4.1.71 be released? I have been facing similar issue as @EzajAnsari for a couple of months and its blocking a production rollout. I am hoping [this](https://github.com/netty/netty/pull/11858) fixes the below issue too. \r\n\r\n```\r\njava.lang.ArrayIndexOutOfBoundsException: 39, exiting...\r\nio.netty.handler.codec.DecoderException: java.lang.ArrayIndexOutOfBoundsException: 39\r\nat io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:478)\r\nat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\nat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\nat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\nat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919)\r\nat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581)\r\nat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493)\r\nat io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989)\r\nat io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)\r\nat java.lang.Thread.run(Thread.java:748)\r\nCaused by: java.lang.ArrayIndexOutOfBoundsException: 39\r\nat io.netty.buffer.PoolArena.findSubpagePoolHead(PoolArena.java:267)\r\nat io.netty.buffer.PoolChunk.free(PoolChunk.java:453)\r\nat io.netty.buffer.PoolChunkList.free(PoolChunkList.java:120)\r\nat io.netty.buffer.PoolArena.freeChunk(PoolArena.java:258)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.freeEntry(PoolThreadCache.java:430)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.free(PoolThreadCache.java:396)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.trim(PoolThreadCache.java:414)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:277)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:269)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:258)\r\nat io.netty.buffer.PoolThreadCache.allocate(PoolThreadCache.java:170)\r\nat io.netty.buffer.PoolThreadCache.allocateSmall(PoolThreadCache.java:151)\r\nat io.netty.buffer.PoolArena.tcacheAllocateSmall(PoolArena.java:148)\r\nat io.netty.buffer.PoolArena.allocate(PoolArena.java:134)\r\nat io.netty.buffer.PoolArena.allocate(PoolArena.java:126)\r\nat io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:395)\r\nat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:187)\r\nat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:178)\r\nat io.netty.handler.ssl.SslHandler.allocate(SslHandler.java:2194)\r\nat io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1402)\r\nat io.netty.handler.ssl.SslHandler.decodeNonJdkCompatible(SslHandler.java:1265)\r\nat io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1302)\r\nat io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508)\r\nat io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447)\r\n... 16 more\r\n```", "@atanu1991 today ", "> @EzajAnsari Did this fix your issue?\r\n> \r\n> @chrisvest When will 4.1.71 be released? I have been facing similar issue as @EzajAnsari for a couple of months and its blocking a production rollout. I am hoping [this](https://github.com/netty/netty/pull/11858) fixes the below issue too.\r\n> \r\n> ```\r\n> java.lang.ArrayIndexOutOfBoundsException: 39, exiting...\r\n> io.netty.handler.codec.DecoderException: java.lang.ArrayIndexOutOfBoundsException: 39\r\n> at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:478)\r\n> at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276)\r\n> at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n> at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n> at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\r\n> at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410)\r\n> at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\r\n> at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\r\n> at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919)\r\n> at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166)\r\n> at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719)\r\n> at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:655)\r\n> at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:581)\r\n> at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493)\r\n> at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989)\r\n> at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)\r\n> at java.lang.Thread.run(Thread.java:748)\r\n> Caused by: java.lang.ArrayIndexOutOfBoundsException: 39\r\n> at io.netty.buffer.PoolArena.findSubpagePoolHead(PoolArena.java:267)\r\n> at io.netty.buffer.PoolChunk.free(PoolChunk.java:453)\r\n> at io.netty.buffer.PoolChunkList.free(PoolChunkList.java:120)\r\n> at io.netty.buffer.PoolArena.freeChunk(PoolArena.java:258)\r\n> at io.netty.buffer.PoolThreadCache$MemoryRegionCache.freeEntry(PoolThreadCache.java:430)\r\n> at io.netty.buffer.PoolThreadCache$MemoryRegionCache.free(PoolThreadCache.java:396)\r\n> at io.netty.buffer.PoolThreadCache$MemoryRegionCache.trim(PoolThreadCache.java:414)\r\n> at io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:277)\r\n> at io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:269)\r\n> at io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:258)\r\n> at io.netty.buffer.PoolThreadCache.allocate(PoolThreadCache.java:170)\r\n> at io.netty.buffer.PoolThreadCache.allocateSmall(PoolThreadCache.java:151)\r\n> at io.netty.buffer.PoolArena.tcacheAllocateSmall(PoolArena.java:148)\r\n> at io.netty.buffer.PoolArena.allocate(PoolArena.java:134)\r\n> at io.netty.buffer.PoolArena.allocate(PoolArena.java:126)\r\n> at io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:395)\r\n> at io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:187)\r\n> at io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:178)\r\n> at io.netty.handler.ssl.SslHandler.allocate(SslHandler.java:2194)\r\n> at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1402)\r\n> at io.netty.handler.ssl.SslHandler.decodeNonJdkCompatible(SslHandler.java:1265)\r\n> at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1302)\r\n> at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:508)\r\n> at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:447)\r\n> ... 16 more\r\n> ```\r\n\r\n@chrisvest The error seems to still exist with 4.1.71 version of Netty. Any clue why this could be happening? Why is the number 39 (ArrayIndexOutOfBoundsException) always fixed?", "@atanu1991 can you give us a stacktrace with 4.1.71.Final in use ?", "@atanu1991 Also, if you could enable debug logging and collect the `-Dio.netty.allocator.*` lines, that would be helpful.", "@chrisvest @normanmaurer \r\n\r\nThis is the log from 4.1.71.Final\r\n\r\n```\r\njava.lang.ArrayIndexOutOfBoundsException: 39\r\nat io.netty.buffer.PoolArena.findSubpagePoolHead(PoolArena.java:267)\r\nat io.netty.buffer.PoolChunk.free(PoolChunk.java:458)\r\nat io.netty.buffer.PoolChunkList.free(PoolChunkList.java:120)\r\nat io.netty.buffer.PoolArena.freeChunk(PoolArena.java:258)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.freeEntry(PoolThreadCache.java:430)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.free(PoolThreadCache.java:396)\r\nat io.netty.buffer.PoolThreadCache$MemoryRegionCache.trim(PoolThreadCache.java:414)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:277)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:269)\r\nat io.netty.buffer.PoolThreadCache.trim(PoolThreadCache.java:258)\r\nat io.netty.buffer.PoolThreadCache.allocate(PoolThreadCache.java:170)\r\nat io.netty.buffer.PoolThreadCache.allocateSmall(PoolThreadCache.java:151)\r\nat io.netty.buffer.PoolArena.tcacheAllocateSmall(PoolArena.java:148)\r\nat io.netty.buffer.PoolArena.allocate(PoolArena.java:134)\r\nat io.netty.buffer.PoolArena.allocate(PoolArena.java:126)\r\nat io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:395)\r\nat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:188)\r\nat io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:179)\r\nat io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:1031)\r\nat io.netty.handler.ssl.SslHandler.wrap(SslHandler.java:826)\r\nat io.netty.handler.ssl.SslHandler.wrapAndFlush(SslHandler.java:797)\r\nat io.netty.handler.ssl.SslHandler.flush(SslHandler.java:778)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:750)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:742)\r\nat io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:728)\r\nat io.netty.handler.flush.FlushConsolidationHandler.flushNow(FlushConsolidationHandler.java:204)\r\nat io.netty.handler.flush.FlushConsolidationHandler.flushIfNeeded(FlushConsolidationHandler.java:197)\r\nat io.netty.handler.flush.FlushConsolidationHandler.resetReadAndFlushIfNeeded(FlushConsolidationHandler.java:192)\r\nat io.netty.handler.flush.FlushConsolidationHandler.channelReadComplete(FlushConsolidationHandler.java:145)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:410)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:397)\r\nat io.netty.channel.AbstractChannelHandlerContext.fireChannelReadComplete(AbstractChannelHandlerContext.java:390)\r\nat io.netty.handler.ssl.SslHandler.channelReadComplete0(SslHandler.java:1302)\r\nat io.netty.handler.ssl.SslHandler.channelReadComplete(SslHandler.java:1291)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:410)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:397)\r\nat io.netty.channel.AbstractChannelHandlerContext.fireChannelReadComplete(AbstractChannelHandlerContext.java:390)\r\nat io.netty.channel.DefaultChannelPipeline$HeadContext.channelReadComplete(DefaultChannelPipeline.java:1415)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:410)\r\nat io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:397)\r\nat io.netty.channel.DefaultChannelPipeline.fireChannelReadComplete(DefaultChannelPipeline.java:925)\r\nat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:171)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:722)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:658)\r\nat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:584)\r\nat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:496)\r\nat io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:986)\r\nat io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)\r\nat java.lang.Thread.run(Thread.java:748)\r\n```\r\n\r\nFor context we are using Netty from Apache Giraph: https://github.com/apache/giraph\r\nAnother point to note is that this is flaky and the same job sometimes pass and sometimes throws the above exception and fail.\r\n\r\nI did start our service in debug mode but I dont see io.netty.allocator log lines. Is there some special command I need to pass?", "@chrisvest @normanmaurer \r\n\r\nAny thoughts on this?", "@atanu1991 dint have time yet. Are you able to share a reproducer ?", "https://github.com/netty/netty/pull/11939" ]
[ "why not using a mpsc unbounded queue from JCTools here?\r\n\r\n`claim()` is supposed to be called by the same thread (and its corresponding drain too), while filling it could happen from several threads; it looks an easy way to save using synchronization\r\n\r\n", "draining inbox would worth if there isn't anything on pooledHandles so we can defer draining until is necessary or if the external mailbox got \"too big\"?", "Good call. This simplifies the code even more. Performance is roughly unchanged in my measurements.", "No longer relevant.", "I would expose chunk size of the queue: it impacts the footprint in memory while the pool is in steady state ie contains < next power of 2 chunk size elements\r\n\r\nhttps://github.com/JCTools/JCTools/blob/master/jctools-core/src/main/java/org/jctools/queues/MpscGrowableArrayQueue.java#L28 is a good fit as well: it would left the chunk capacity to change accordly to the load (max reached capacity) until it would be stable; but meaning that its footprint will be the max capacity ever reached.", "Nice one: if the release vs acquire rate isn't that high it shouldn't indeed, but if there are many interleaving release/acquire yep because of the contention while draining from inbox vs releasing to it", "nit: why not just create this directly ? I think there is really no need for the extra static block here. ", "Because that would require two casts in a row in order to get the generics to line up. And that would look ugly. We've done static blocks for this situation in other places, so it's not a new pattern. " ]
"2021-11-24T17:06:09Z"
[]
ArrayIndexOutOfBoundsException With PooledByteBufAllocator Recycler implementaion
### Issue Description While trying to write an HttpMessage using HTTP1 with HttpClientCodec i.e HttpObjectEncoder, Netty is throwing an ArrayIndexOutOfBounds Exception while trying to allocate buffer for headers. Exception seems to be happening in the PooledByteBufAllocator Recycler Implementation. ``` Caused by: java.lang.ArrayIndexOutOfBoundsException: Index -841651488 out of bounds for length 256 at io.netty.util.Recycler$Stack.pushNow(Recycler.java:666) at io.netty.util.Recycler$Stack.push(Recycler.java:642) at io.netty.util.Recycler$DefaultHandle.recycle(Recycler.java:236) at io.netty.buffer.PoolThreadCache$MemoryRegionCache$Entry.recycle(PoolThreadCache.java:482) at io.netty.buffer.PoolThreadCache$MemoryRegionCache.allocate(PoolThreadCache.java:412) at io.netty.buffer.PoolThreadCache.allocate(PoolThreadCache.java:187) at io.netty.buffer.PoolThreadCache.allocateSmall(PoolThreadCache.java:171) at io.netty.buffer.PoolArena.allocate(PoolArena.java:189) at io.netty.buffer.PoolArena.allocate(PoolArena.java:147) at io.netty.buffer.PooledByteBufAllocator.newDirectBuffer(PooledByteBufAllocator.java:356) at io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:187) at io.netty.buffer.AbstractByteBufAllocator.directBuffer(AbstractByteBufAllocator.java:178) at io.netty.buffer.AbstractByteBufAllocator.buffer(AbstractByteBufAllocator.java:115) at io.netty.handler.codec.http.HttpObjectEncoder.encode(HttpObjectEncoder.java:93) at io.netty.handler.codec.http.HttpClientCodec$Encoder.encode(HttpClientCodec.java:189) at io.netty.handler.codec.MessageToMessageEncoder.write(MessageToMessageEncoder.java:89) ... 111 more ``` A new buffer is being allocated in Encoder here - https://github.com/netty/netty/blob/ee3b9a5f7b1829e1095fdbbccb5490949ac3e94e/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectEncoder.java#L93. Recycler code triggering the exception - https://github.com/netty/netty/blob/ee3b9a5f7b1829e1095fdbbccb5490949ac3e94e/common/src/main/java/io/netty/util/Recycler.java#L666 Similar issue seen with Recycler in the past - https://github.com/netty/netty/issues/9608 ### Netty version 4.1.51.Final ### JVM version Java 11 ### OS version x86_64 GNU/Linux
[ "common/src/main/java/io/netty/util/Recycler.java", "common/src/main/java/io/netty/util/internal/PlatformDependent.java" ]
[ "common/src/main/java/io/netty/util/Recycler.java", "common/src/main/java/io/netty/util/internal/PlatformDependent.java", "microbench/src/main/java/io/netty/microbench/util/RecyclerBenchmark.java" ]
[ "common/src/test/java/io/netty/util/RecyclerTest.java" ]
diff --git a/common/src/main/java/io/netty/util/Recycler.java b/common/src/main/java/io/netty/util/Recycler.java index eec543f2d22..373a79251b4 100644 --- a/common/src/main/java/io/netty/util/Recycler.java +++ b/common/src/main/java/io/netty/util/Recycler.java @@ -13,23 +13,18 @@ * License for the specific language governing permissions and limitations * under the License. */ - package io.netty.util; import io.netty.util.concurrent.FastThreadLocal; import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; -import java.lang.ref.WeakReference; -import java.util.Arrays; -import java.util.Map; -import java.util.WeakHashMap; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.Queue; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import static io.netty.util.internal.MathUtil.safeFindNextPositivePowerOfTwo; import static java.lang.Math.max; import static java.lang.Math.min; @@ -39,26 +34,22 @@ * @param <T> the type of the pooled object */ public abstract class Recycler<T> { - private static final InternalLogger logger = InternalLoggerFactory.getInstance(Recycler.class); - - @SuppressWarnings("rawtypes") - private static final Handle NOOP_HANDLE = new Handle() { + private static final Handle<?> NOOP_HANDLE = new Handle<Object>() { @Override public void recycle(Object object) { // NOOP } + + @Override + public String toString() { + return "NOOP_HANDLE"; + } }; - private static final AtomicInteger ID_GENERATOR = new AtomicInteger(Integer.MIN_VALUE); - private static final int OWN_THREAD_ID = ID_GENERATOR.getAndIncrement(); private static final int DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD = 4 * 1024; // Use 4k instances as default. private static final int DEFAULT_MAX_CAPACITY_PER_THREAD; - private static final int INITIAL_CAPACITY; - private static final int MAX_SHARED_CAPACITY_FACTOR; - private static final int MAX_DELAYED_QUEUES_PER_THREAD; - private static final int LINK_CAPACITY; private static final int RATIO; - private static final int DELAYED_QUEUE_RATIO; + private static final int DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD; static { // In the future, we might have different maxCapacity for different object types. @@ -71,65 +62,33 @@ public void recycle(Object object) { } DEFAULT_MAX_CAPACITY_PER_THREAD = maxCapacityPerThread; - - MAX_SHARED_CAPACITY_FACTOR = max(2, - SystemPropertyUtil.getInt("io.netty.recycler.maxSharedCapacityFactor", - 2)); - - MAX_DELAYED_QUEUES_PER_THREAD = max(0, - SystemPropertyUtil.getInt("io.netty.recycler.maxDelayedQueuesPerThread", - // We use the same value as default EventLoop number - NettyRuntime.availableProcessors() * 2)); - - LINK_CAPACITY = safeFindNextPositivePowerOfTwo( - max(SystemPropertyUtil.getInt("io.netty.recycler.linkCapacity", 16), 16)); + DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD = SystemPropertyUtil.getInt("io.netty.recycler.chunkSize", 32); // By default we allow one push to a Recycler for each 8th try on handles that were never recycled before. // This should help to slowly increase the capacity of the recycler while not be too sensitive to allocation // bursts. RATIO = max(0, SystemPropertyUtil.getInt("io.netty.recycler.ratio", 8)); - DELAYED_QUEUE_RATIO = max(0, SystemPropertyUtil.getInt("io.netty.recycler.delayedQueue.ratio", RATIO)); - - INITIAL_CAPACITY = min(DEFAULT_MAX_CAPACITY_PER_THREAD, 256); if (logger.isDebugEnabled()) { if (DEFAULT_MAX_CAPACITY_PER_THREAD == 0) { logger.debug("-Dio.netty.recycler.maxCapacityPerThread: disabled"); - logger.debug("-Dio.netty.recycler.maxSharedCapacityFactor: disabled"); - logger.debug("-Dio.netty.recycler.linkCapacity: disabled"); logger.debug("-Dio.netty.recycler.ratio: disabled"); - logger.debug("-Dio.netty.recycler.delayedQueue.ratio: disabled"); + logger.debug("-Dio.netty.recycler.chunkSize: disabled"); } else { logger.debug("-Dio.netty.recycler.maxCapacityPerThread: {}", DEFAULT_MAX_CAPACITY_PER_THREAD); - logger.debug("-Dio.netty.recycler.maxSharedCapacityFactor: {}", MAX_SHARED_CAPACITY_FACTOR); - logger.debug("-Dio.netty.recycler.linkCapacity: {}", LINK_CAPACITY); logger.debug("-Dio.netty.recycler.ratio: {}", RATIO); - logger.debug("-Dio.netty.recycler.delayedQueue.ratio: {}", DELAYED_QUEUE_RATIO); + logger.debug("-Dio.netty.recycler.chunkSize: {}", DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD); } } } private final int maxCapacityPerThread; - private final int maxSharedCapacityFactor; private final int interval; - private final int maxDelayedQueuesPerThread; - private final int delayedQueueInterval; - - private final FastThreadLocal<Stack<T>> threadLocal = new FastThreadLocal<Stack<T>>() { + private final int chunkSize; + private final FastThreadLocal<LocalPool<T>> threadLocal = new FastThreadLocal<LocalPool<T>>() { @Override - protected Stack<T> initialValue() { - return new Stack<T>(Recycler.this, Thread.currentThread(), maxCapacityPerThread, maxSharedCapacityFactor, - interval, maxDelayedQueuesPerThread, delayedQueueInterval); - } - - @Override - protected void onRemoval(Stack<T> value) { - // Let us remove the WeakOrderQueue from the WeakHashMap directly if its safe to remove some overhead - if (value.threadRef.get() == Thread.currentThread()) { - if (DELAYED_RECYCLED.isSet()) { - DELAYED_RECYCLED.get().remove(value); - } - } + protected LocalPool<T> initialValue() { + return new LocalPool<T>(maxCapacityPerThread, interval, chunkSize); } }; @@ -138,31 +97,49 @@ protected Recycler() { } protected Recycler(int maxCapacityPerThread) { - this(maxCapacityPerThread, MAX_SHARED_CAPACITY_FACTOR); + this(maxCapacityPerThread, RATIO, DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD); } + /** + * @deprecated Use one of the following instead: + * {@link #Recycler()}, {@link #Recycler(int)}, {@link #Recycler(int, int, int)}. + */ + @Deprecated + @SuppressWarnings("unused") // Parameters we can't remove due to compatibility. protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor) { - this(maxCapacityPerThread, maxSharedCapacityFactor, RATIO, MAX_DELAYED_QUEUES_PER_THREAD); + this(maxCapacityPerThread, RATIO, DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD); } + /** + * @deprecated Use one of the following instead: + * {@link #Recycler()}, {@link #Recycler(int)}, {@link #Recycler(int, int, int)}. + */ + @Deprecated + @SuppressWarnings("unused") // Parameters we can't remove due to compatibility. protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor, int ratio, int maxDelayedQueuesPerThread) { - this(maxCapacityPerThread, maxSharedCapacityFactor, ratio, maxDelayedQueuesPerThread, - DELAYED_QUEUE_RATIO); + this(maxCapacityPerThread, ratio, DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD); } + /** + * @deprecated Use one of the following instead: + * {@link #Recycler()}, {@link #Recycler(int)}, {@link #Recycler(int, int, int)}. + */ + @Deprecated + @SuppressWarnings("unused") // Parameters we can't remove due to compatibility. protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor, int ratio, int maxDelayedQueuesPerThread, int delayedQueueRatio) { + this(maxCapacityPerThread, ratio, DEFAULT_QUEUE_CHUNK_SIZE_PER_THREAD); + } + + protected Recycler(int maxCapacityPerThread, int ratio, int chunkSize) { interval = max(0, ratio); - delayedQueueInterval = max(0, delayedQueueRatio); if (maxCapacityPerThread <= 0) { this.maxCapacityPerThread = 0; - this.maxSharedCapacityFactor = 1; - this.maxDelayedQueuesPerThread = 0; + this.chunkSize = 0; } else { - this.maxCapacityPerThread = maxCapacityPerThread; - this.maxSharedCapacityFactor = max(1, maxSharedCapacityFactor); - this.maxDelayedQueuesPerThread = max(0, maxDelayedQueuesPerThread); + this.maxCapacityPerThread = max(4, maxCapacityPerThread); + this.chunkSize = max(2, min(chunkSize, this.maxCapacityPerThread >> 1)); } } @@ -171,13 +148,22 @@ public final T get() { if (maxCapacityPerThread == 0) { return newObject((Handle<T>) NOOP_HANDLE); } - Stack<T> stack = threadLocal.get(); - DefaultHandle<T> handle = stack.pop(); + LocalPool<T> localPool = threadLocal.get(); + DefaultHandle<T> handle = localPool.claim(); + T obj; if (handle == null) { - handle = stack.newHandle(); - handle.value = newObject(handle); + handle = localPool.newHandle(); + if (handle != null) { + obj = newObject(handle); + handle.set(obj); + } else { + obj = newObject((Handle<T>) NOOP_HANDLE); + } + } else { + obj = handle.get(); } - return (T) handle.value; + + return obj; } /** @@ -189,46 +175,36 @@ public final boolean recycle(T o, Handle<T> handle) { return false; } - DefaultHandle<T> h = (DefaultHandle<T>) handle; - if (h.stack.parent != this) { - return false; - } - - h.recycle(o); + handle.recycle(o); return true; } - final int threadLocalCapacity() { - return threadLocal.get().elements.length; - } - final int threadLocalSize() { - return threadLocal.get().size; + return threadLocal.get().pooledHandles.size(); } protected abstract T newObject(Handle<T> handle); + @SuppressWarnings("ClassNameSameAsAncestorName") // Can't change this due to compatibility. public interface Handle<T> extends ObjectPool.Handle<T> { } - @SuppressWarnings("unchecked") private static final class DefaultHandle<T> implements Handle<T> { - private static final AtomicIntegerFieldUpdater<DefaultHandle<?>> LAST_RECYCLED_ID_UPDATER; + private static final int STATE_CLAIMED = 0; + private static final int STATE_AVAILABLE = 1; + private static final AtomicIntegerFieldUpdater<DefaultHandle<?>> STATE_UPDATER; static { - AtomicIntegerFieldUpdater<?> updater = AtomicIntegerFieldUpdater.newUpdater( - DefaultHandle.class, "lastRecycledId"); - LAST_RECYCLED_ID_UPDATER = (AtomicIntegerFieldUpdater<DefaultHandle<?>>) updater; + AtomicIntegerFieldUpdater<?> updater = AtomicIntegerFieldUpdater.newUpdater(DefaultHandle.class, "state"); + //noinspection unchecked + STATE_UPDATER = (AtomicIntegerFieldUpdater<DefaultHandle<?>>) updater; } - volatile int lastRecycledId; - int recycleId; - - boolean hasBeenRecycled; - - Stack<?> stack; - Object value; + @SuppressWarnings({"FieldMayBeFinal", "unused"}) // Updated by STATE_UPDATER. + private volatile int state; // State is initialised to STATE_CLAIMED (aka. 0) so they can be released. + private final LocalPool<T> localPool; + private T value; - DefaultHandle(Stack<?> stack) { - this.stack = stack; + DefaultHandle(LocalPool<T> localPool) { + this.localPool = localPool; } @Override @@ -236,511 +212,63 @@ public void recycle(Object object) { if (object != value) { throw new IllegalArgumentException("object does not belong to handle"); } - - Stack<?> stack = this.stack; - if (lastRecycledId != recycleId || stack == null) { - throw new IllegalStateException("recycled already"); - } - - stack.push(this); - } - - public boolean compareAndSetLastRecycledId(int expectLastRecycledId, int updateLastRecycledId) { - // Use "weak…" because we do not need synchronize-with ordering, only atomicity. - // Also, spurious failures are fine, since no code should rely on recycling for correctness. - return LAST_RECYCLED_ID_UPDATER.weakCompareAndSet(this, expectLastRecycledId, updateLastRecycledId); + localPool.release(this); } - } - private static final FastThreadLocal<Map<Stack<?>, WeakOrderQueue>> DELAYED_RECYCLED = - new FastThreadLocal<Map<Stack<?>, WeakOrderQueue>>() { - @Override - protected Map<Stack<?>, WeakOrderQueue> initialValue() { - return new WeakHashMap<Stack<?>, WeakOrderQueue>(); + T get() { + return value; } - }; - - // a queue that makes only moderate guarantees about visibility: items are seen in the correct order, - // but we aren't absolutely guaranteed to ever see anything at all, thereby keeping the queue cheap to maintain - private static final class WeakOrderQueue extends WeakReference<Thread> { - - static final WeakOrderQueue DUMMY = new WeakOrderQueue(); - - // Let Link extend AtomicInteger for intrinsics. The Link itself will be used as writerIndex. - @SuppressWarnings("serial") - static final class Link extends AtomicInteger { - final DefaultHandle<?>[] elements = new DefaultHandle[LINK_CAPACITY]; - int readIndex; - Link next; + void set(T value) { + this.value = value; } - // Its important this does not hold any reference to either Stack or WeakOrderQueue. - private static final class Head { - private final AtomicInteger availableSharedCapacity; - - Link link; - - Head(AtomicInteger availableSharedCapacity) { - this.availableSharedCapacity = availableSharedCapacity; - } - - /** - * Reclaim all used space and also unlink the nodes to prevent GC nepotism. - */ - void reclaimAllSpaceAndUnlink() { - Link head = link; - link = null; - int reclaimSpace = 0; - while (head != null) { - reclaimSpace += LINK_CAPACITY; - Link next = head.next; - // Unlink to help GC and guard against GC nepotism. - head.next = null; - head = next; - } - if (reclaimSpace > 0) { - reclaimSpace(reclaimSpace); - } - } - - private void reclaimSpace(int space) { - availableSharedCapacity.addAndGet(space); - } - - void relink(Link link) { - reclaimSpace(LINK_CAPACITY); - this.link = link; - } - - /** - * Creates a new {@link} and returns it if we can reserve enough space for it, otherwise it - * returns {@code null}. - */ - Link newLink() { - return reserveSpaceForLink(availableSharedCapacity) ? new Link() : null; - } - - static boolean reserveSpaceForLink(AtomicInteger availableSharedCapacity) { - for (;;) { - int available = availableSharedCapacity.get(); - if (available < LINK_CAPACITY) { - return false; - } - if (availableSharedCapacity.compareAndSet(available, available - LINK_CAPACITY)) { - return true; - } - } - } - } - - // chain of data items - private final Head head; - private Link tail; - // pointer to another queue of delayed items for the same stack - private WeakOrderQueue next; - private final int id = ID_GENERATOR.getAndIncrement(); - private final int interval; - private int handleRecycleCount; - - private WeakOrderQueue() { - super(null); - head = new Head(null); - interval = 0; - } - - private WeakOrderQueue(Stack<?> stack, Thread thread) { - super(thread); - tail = new Link(); - - // Its important that we not store the Stack itself in the WeakOrderQueue as the Stack also is used in - // the WeakHashMap as key. So just store the enclosed AtomicInteger which should allow to have the - // Stack itself GCed. - head = new Head(stack.availableSharedCapacity); - head.link = tail; - interval = stack.delayedQueueInterval; - handleRecycleCount = interval; // Start at interval so the first one will be recycled. - } - - static WeakOrderQueue newQueue(Stack<?> stack, Thread thread) { - // We allocated a Link so reserve the space - if (!Head.reserveSpaceForLink(stack.availableSharedCapacity)) { - return null; - } - final WeakOrderQueue queue = new WeakOrderQueue(stack, thread); - // Done outside of the constructor to ensure WeakOrderQueue.this does not escape the constructor and so - // may be accessed while its still constructed. - stack.setHead(queue); - - return queue; - } - - WeakOrderQueue getNext() { - return next; - } - - void setNext(WeakOrderQueue next) { - assert next != this; - this.next = next; - } - - void reclaimAllSpaceAndUnlink() { - head.reclaimAllSpaceAndUnlink(); - next = null; - } - - void add(DefaultHandle<?> handle) { - if (!handle.compareAndSetLastRecycledId(0, id)) { - // Separate threads could be racing to add the handle to each their own WeakOrderQueue. - // We only add the handle to the queue if we win the race and observe that lastRecycledId is zero. - return; - } - - // While we also enforce the recycling ratio when we transfer objects from the WeakOrderQueue to the Stack - // we better should enforce it as well early. Missing to do so may let the WeakOrderQueue grow very fast - // without control - if (!handle.hasBeenRecycled) { - if (handleRecycleCount < interval) { - handleRecycleCount++; - // Drop the item to prevent from recycling too aggressively. - return; - } - handleRecycleCount = 0; - } - - Link tail = this.tail; - int writeIndex; - if ((writeIndex = tail.get()) == LINK_CAPACITY) { - Link link = head.newLink(); - if (link == null) { - // Drop it. - return; - } - // We allocate a Link so reserve the space - this.tail = tail = tail.next = link; - - writeIndex = tail.get(); - } - tail.elements[writeIndex] = handle; - handle.stack = null; - // we lazy set to ensure that setting stack to null appears before we unnull it in the owning thread; - // this also means we guarantee visibility of an element in the queue if we see the index updated - tail.lazySet(writeIndex + 1); - } - - boolean hasFinalData() { - return tail.readIndex != tail.get(); - } - - // transfer as many items as we can from this queue to the stack, returning true if any were transferred - @SuppressWarnings("rawtypes") - boolean transfer(Stack<?> dst) { - Link head = this.head.link; - if (head == null) { + boolean availableToClaim() { + if (state != STATE_AVAILABLE) { return false; } - - if (head.readIndex == LINK_CAPACITY) { - if (head.next == null) { - return false; - } - head = head.next; - this.head.relink(head); - } - - final int srcStart = head.readIndex; - int srcEnd = head.get(); - final int srcSize = srcEnd - srcStart; - if (srcSize == 0) { - return false; - } - - final int dstSize = dst.size; - final int expectedCapacity = dstSize + srcSize; - - if (expectedCapacity > dst.elements.length) { - final int actualCapacity = dst.increaseCapacity(expectedCapacity); - srcEnd = min(srcStart + actualCapacity - dstSize, srcEnd); - } - - if (srcStart != srcEnd) { - final DefaultHandle[] srcElems = head.elements; - final DefaultHandle[] dstElems = dst.elements; - int newDstSize = dstSize; - for (int i = srcStart; i < srcEnd; i++) { - DefaultHandle<?> element = srcElems[i]; - if (element.recycleId == 0) { - element.recycleId = element.lastRecycledId; - } else if (element.recycleId != element.lastRecycledId) { - throw new IllegalStateException("recycled already"); - } - srcElems[i] = null; - - if (dst.dropHandle(element)) { - // Drop the object. - continue; - } - element.stack = dst; - dstElems[newDstSize ++] = element; - } - - if (srcEnd == LINK_CAPACITY && head.next != null) { - // Add capacity back as the Link is GCed. - this.head.relink(head.next); - } - - head.readIndex = srcEnd; - if (dst.size == newDstSize) { - return false; - } - dst.size = newDstSize; - return true; - } else { - // The destination stack is full already. - return false; - } - } - } - - private static final class Stack<T> { - - // we keep a queue of per-thread queues, which is appended to once only, each time a new thread other - // than the stack owner recycles: when we run out of items in our stack we iterate this collection - // to scavenge those that can be reused. this permits us to incur minimal thread synchronisation whilst - // still recycling all items. - final Recycler<T> parent; - - // We store the Thread in a WeakReference as otherwise we may be the only ones that still hold a strong - // Reference to the Thread itself after it died because DefaultHandle will hold a reference to the Stack. - // - // The biggest issue is if we do not use a WeakReference the Thread may not be able to be collected at all if - // the user will store a reference to the DefaultHandle somewhere and never clear this reference (or not clear - // it in a timely manner). - final WeakReference<Thread> threadRef; - final AtomicInteger availableSharedCapacity; - private final int maxDelayedQueues; - - private final int maxCapacity; - private final int interval; - private final int delayedQueueInterval; - DefaultHandle<?>[] elements; - int size; - private int handleRecycleCount; - private WeakOrderQueue cursor, prev; - private volatile WeakOrderQueue head; - - Stack(Recycler<T> parent, Thread thread, int maxCapacity, int maxSharedCapacityFactor, - int interval, int maxDelayedQueues, int delayedQueueInterval) { - this.parent = parent; - threadRef = new WeakReference<Thread>(thread); - this.maxCapacity = maxCapacity; - availableSharedCapacity = new AtomicInteger(max(maxCapacity / maxSharedCapacityFactor, LINK_CAPACITY)); - elements = new DefaultHandle[min(INITIAL_CAPACITY, maxCapacity)]; - this.interval = interval; - this.delayedQueueInterval = delayedQueueInterval; - handleRecycleCount = interval; // Start at interval so the first one will be recycled. - this.maxDelayedQueues = maxDelayedQueues; - } - - // Marked as synchronized to ensure this is serialized. - synchronized void setHead(WeakOrderQueue queue) { - queue.setNext(head); - head = queue; + return STATE_UPDATER.compareAndSet(this, STATE_AVAILABLE, STATE_CLAIMED); } - int increaseCapacity(int expectedCapacity) { - int newCapacity = elements.length; - int maxCapacity = this.maxCapacity; - do { - newCapacity <<= 1; - } while (newCapacity < expectedCapacity && newCapacity < maxCapacity); - - newCapacity = min(newCapacity, maxCapacity); - if (newCapacity != elements.length) { - elements = Arrays.copyOf(elements, newCapacity); + void toAvailable() { + int prev = STATE_UPDATER.getAndSet(this, STATE_AVAILABLE); + if (prev == STATE_AVAILABLE) { + throw new IllegalStateException("Object has been recycled already."); } - - return newCapacity; - } - - @SuppressWarnings({ "unchecked", "rawtypes" }) - DefaultHandle<T> pop() { - int size = this.size; - if (size == 0) { - if (!scavenge()) { - return null; - } - size = this.size; - if (size <= 0) { - // double check, avoid races - return null; - } - } - size --; - DefaultHandle ret = elements[size]; - elements[size] = null; - // As we already set the element[size] to null we also need to store the updated size before we do - // any validation. Otherwise we may see a null value when later try to pop again without a new element - // added before. - this.size = size; - - if (ret.lastRecycledId != ret.recycleId) { - throw new IllegalStateException("recycled multiple times"); - } - ret.recycleId = 0; - ret.lastRecycledId = 0; - return ret; } + } - private boolean scavenge() { - // continue an existing scavenge, if any - if (scavengeSome()) { - return true; - } + private static final class LocalPool<T> { + private final int ratioInterval; + private final Queue<DefaultHandle<T>> pooledHandles; + private int ratioCounter; - // reset our scavenge cursor - prev = null; - cursor = head; - return false; + LocalPool(int maxCapacity, int ratioInterval, int chunkSize) { + this.ratioInterval = ratioInterval; + pooledHandles = PlatformDependent.newMpscQueue(chunkSize, maxCapacity); + ratioCounter = ratioInterval; // Start at interval so the first one will be recycled. } - private boolean scavengeSome() { - WeakOrderQueue prev; - WeakOrderQueue cursor = this.cursor; - if (cursor == null) { - prev = null; - cursor = head; - if (cursor == null) { - return false; - } - } else { - prev = this.prev; - } - - boolean success = false; + DefaultHandle<T> claim() { + Queue<DefaultHandle<T>> pooledHandles = this.pooledHandles; + DefaultHandle<T> handle; do { - if (cursor.transfer(this)) { - success = true; - break; - } - WeakOrderQueue next = cursor.getNext(); - if (cursor.get() == null) { - // If the thread associated with the queue is gone, unlink it, after - // performing a volatile read to confirm there is no data left to collect. - // We never unlink the first queue, as we don't want to synchronize on updating the head. - if (cursor.hasFinalData()) { - for (;;) { - if (cursor.transfer(this)) { - success = true; - } else { - break; - } - } - } - - if (prev != null) { - // Ensure we reclaim all space before dropping the WeakOrderQueue to be GC'ed. - cursor.reclaimAllSpaceAndUnlink(); - prev.setNext(next); - } - } else { - prev = cursor; - } - - cursor = next; - - } while (cursor != null && !success); - - this.prev = prev; - this.cursor = cursor; - return success; - } - - void push(DefaultHandle<?> item) { - Thread currentThread = Thread.currentThread(); - if (threadRef.get() == currentThread) { - // The current Thread is the thread that belongs to the Stack, we can try to push the object now. - pushNow(item); - } else { - // The current Thread is not the one that belongs to the Stack - // (or the Thread that belonged to the Stack was collected already), we need to signal that the push - // happens later. - pushLater(item, currentThread); - } - } - - private void pushNow(DefaultHandle<?> item) { - if (item.recycleId != 0 || !item.compareAndSetLastRecycledId(0, OWN_THREAD_ID)) { - throw new IllegalStateException("recycled already"); - } - item.recycleId = OWN_THREAD_ID; - - int size = this.size; - if (size >= maxCapacity || dropHandle(item)) { - // Hit the maximum capacity or should drop - drop the possibly youngest object. - return; - } - if (size == elements.length) { - elements = Arrays.copyOf(elements, min(size << 1, maxCapacity)); - } - - elements[size] = item; - this.size = size + 1; + handle = pooledHandles.poll(); + } while (handle != null && !handle.availableToClaim()); + return handle; } - private void pushLater(DefaultHandle<?> item, Thread thread) { - if (maxDelayedQueues == 0) { - // We don't support recycling across threads and should just drop the item on the floor. - return; - } - - // we don't want to have a ref to the queue as the value in our weak map - // so we null it out; to ensure there are no races with restoring it later - // we impose a memory ordering here (no-op on x86) - Map<Stack<?>, WeakOrderQueue> delayedRecycled = DELAYED_RECYCLED.get(); - WeakOrderQueue queue = delayedRecycled.get(this); - if (queue == null) { - if (delayedRecycled.size() >= maxDelayedQueues) { - // Add a dummy queue so we know we should drop the object - delayedRecycled.put(this, WeakOrderQueue.DUMMY); - return; - } - // Check if we already reached the maximum number of delayed queues and if we can allocate at all. - if ((queue = newWeakOrderQueue(thread)) == null) { - // drop object - return; - } - delayedRecycled.put(this, queue); - } else if (queue == WeakOrderQueue.DUMMY) { - // drop object - return; - } - - queue.add(item); - } - - /** - * Allocate a new {@link WeakOrderQueue} or return {@code null} if not possible. - */ - private WeakOrderQueue newWeakOrderQueue(Thread thread) { - return WeakOrderQueue.newQueue(this, thread); - } - - boolean dropHandle(DefaultHandle<?> handle) { - if (!handle.hasBeenRecycled) { - if (handleRecycleCount < interval) { - handleRecycleCount++; - // Drop the object. - return true; - } - handleRecycleCount = 0; - handle.hasBeenRecycled = true; - } - return false; + void release(DefaultHandle<T> handle) { + handle.toAvailable(); + pooledHandles.offer(handle); } DefaultHandle<T> newHandle() { - return new DefaultHandle<T>(this); + if (++ratioCounter >= ratioInterval) { + ratioCounter = 0; + return new DefaultHandle<T>(this); + } + return null; } } } diff --git a/common/src/main/java/io/netty/util/internal/PlatformDependent.java b/common/src/main/java/io/netty/util/internal/PlatformDependent.java index 099aba86765..a116da2451f 100644 --- a/common/src/main/java/io/netty/util/internal/PlatformDependent.java +++ b/common/src/main/java/io/netty/util/internal/PlatformDependent.java @@ -974,8 +974,12 @@ static <T> Queue<T> newMpscQueue(final int maxCapacity) { // This is forced by the MpscChunkedArrayQueue implementation as will try to round it // up to the next power of two and so will overflow otherwise. final int capacity = max(min(maxCapacity, MAX_ALLOWED_MPSC_CAPACITY), MIN_MAX_MPSC_CAPACITY); - return USE_MPSC_CHUNKED_ARRAY_QUEUE ? new MpscChunkedArrayQueue<T>(MPSC_CHUNK_SIZE, capacity) - : new MpscChunkedAtomicArrayQueue<T>(MPSC_CHUNK_SIZE, capacity); + return newChunkedMpscQueue(MPSC_CHUNK_SIZE, capacity); + } + + static <T> Queue<T> newChunkedMpscQueue(final int chunkSize, final int capacity) { + return USE_MPSC_CHUNKED_ARRAY_QUEUE ? new MpscChunkedArrayQueue<T>(chunkSize, capacity) + : new MpscChunkedAtomicArrayQueue<T>(chunkSize, capacity); } static <T> Queue<T> newMpscQueue() { @@ -1001,6 +1005,15 @@ public static <T> Queue<T> newMpscQueue(final int maxCapacity) { return Mpsc.newMpscQueue(maxCapacity); } + /** + * Create a new {@link Queue} which is safe to use for multiple producers (different threads) and a single + * consumer (one thread!). + * The queue will grow and shrink its capacity in units of the given chunk size. + */ + public static <T> Queue<T> newMpscQueue(final int chunkSize, final int maxCapacity) { + return Mpsc.newChunkedMpscQueue(chunkSize, maxCapacity); + } + /** * Create a new {@link Queue} which is safe to use for single producer (one thread!) and a single * consumer (one thread!). diff --git a/microbench/src/main/java/io/netty/microbench/util/RecyclerBenchmark.java b/microbench/src/main/java/io/netty/microbench/util/RecyclerBenchmark.java new file mode 100644 index 00000000000..eb02774dbb6 --- /dev/null +++ b/microbench/src/main/java/io/netty/microbench/util/RecyclerBenchmark.java @@ -0,0 +1,125 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.microbench.util; + +import io.netty.util.Recycler; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Group; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Control; +import org.openjdk.jmh.runner.options.ChainedOptionsBuilder; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = AbstractMicrobenchmarkBase.DEFAULT_WARMUP_ITERATIONS, time = 1) +@Measurement(iterations = AbstractMicrobenchmarkBase.DEFAULT_MEASURE_ITERATIONS, time = 1) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +public class RecyclerBenchmark extends AbstractMicrobenchmark { + private Recycler<DummyObject> recycler = new Recycler<DummyObject>() { + @Override + protected DummyObject newObject(Recycler.Handle<DummyObject> handle) { + return new DummyObject(handle); + } + }; + + @Override + protected ChainedOptionsBuilder newOptionsBuilder() throws Exception { + return super.newOptionsBuilder().addProfiler("gc"); + } + + @Benchmark + public DummyObject plainNew() { + return new DummyObject(); + } + + @Benchmark + public DummyObject recyclerGetAndOrphan() { + return recycler.get(); + } + + @Benchmark + public DummyObject recyclerGetAndRecycle() { + DummyObject o = recycler.get(); + o.recycle(); + return o; + } + + @State(Scope.Benchmark) + public static class ProducerConsumerState { + final ArrayBlockingQueue<DummyObject> queue = new ArrayBlockingQueue<DummyObject>(100); + } + + // The allocation stats are the main thing interesting about this benchmark + @Benchmark + @Group("producerConsumer") + public void producer(ProducerConsumerState state, Control control) throws Exception { + ArrayBlockingQueue<DummyObject> queue = state.queue; + DummyObject object = recycler.get(); + while (!control.stopMeasurement) { + if (queue.offer(object)) { + break; + } + } + } + + @Benchmark + @Group("producerConsumer") + public void consumer(ProducerConsumerState state, Control control) throws Exception { + DummyObject object; + do { + object = state.queue.poll(); + if (object != null) { + object.recycle(); + return; + } + } while (!control.stopMeasurement); + } + + @SuppressWarnings("unused") + private static final class DummyObject { + private final Recycler.Handle<DummyObject> handle; + private long l1; + private long l2; + private long l3; + private long l4; + private long l5; + private Object o1; + private Object o2; + private Object o3; + private Object o4; + private Object o5; + + DummyObject() { + this(null); + } + + DummyObject(Recycler.Handle<DummyObject> handle) { + this.handle = handle; + } + + public void recycle() { + handle.recycle(this); + } + } +}
diff --git a/common/src/test/java/io/netty/util/RecyclerTest.java b/common/src/test/java/io/netty/util/RecyclerTest.java index 5d50e5981d1..76fdd465f32 100644 --- a/common/src/test/java/io/netty/util/RecyclerTest.java +++ b/common/src/test/java/io/netty/util/RecyclerTest.java @@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -39,14 +40,17 @@ public class RecyclerTest { private static Recycler<HandledObject> newRecycler(int maxCapacityPerThread) { - return newRecycler(maxCapacityPerThread, 2, 8, 2, 8); + return newRecycler(maxCapacityPerThread, 8, maxCapacityPerThread >> 1); } private static Recycler<HandledObject> newRecycler(int maxCapacityPerThread, int maxSharedCapacityFactor, int ratio, int maxDelayedQueuesPerThread, - int delayedQueueRatio) { - return new Recycler<HandledObject>(maxCapacityPerThread, maxSharedCapacityFactor, ratio, - maxDelayedQueuesPerThread, delayedQueueRatio) { + int delayedQueueRatio, int chunkSize) { + return newRecycler(maxCapacityPerThread, ratio, chunkSize); + } + + private static Recycler<HandledObject> newRecycler(int maxCapacityPerThread, int ratio, int chunkSize) { + return new Recycler<HandledObject>(maxCapacityPerThread, ratio, chunkSize) { @Override protected HandledObject newObject( Recycler.Handle<HandledObject> handle) { @@ -93,6 +97,11 @@ protected void finalize() throws Throwable { reference.getAndSet(null).recycle(); } + @Test + public void verySmallRecycer() { + newRecycler(2, 0, 1).get(); + } + @Test public void testMultipleRecycle() { Recycler<HandledObject> recycler = newRecycler(1024); @@ -187,7 +196,7 @@ public void run() { assertNotSame(a, b); IllegalStateException exception = exceptionStore.get(); if (exception != null) { - assertEquals("recycled already", exception.getMessage()); + assertThat(exception).hasMessageContaining("recycled already"); assertEquals(0, exception.getSuppressed().length); } } finally { @@ -235,9 +244,7 @@ public void run() { HandledObject b = recycler.get(); assertNotSame(a, b); IllegalStateException exception = exceptionStore.get(); - if (exception != null) { - throw exception; - } + assertNotNull(exception); // Object got recycled twice, so at least one of the calls must throw. } finally { thread1.join(1000); } @@ -265,7 +272,7 @@ public void testRecycleDisable() { @Test public void testRecycleDisableDrop() { - Recycler<HandledObject> recycler = newRecycler(1024, 2, 0, 2, 0); + Recycler<HandledObject> recycler = newRecycler(1024, 0, 16); HandledObject object = recycler.get(); object.recycle(); HandledObject object2 = recycler.get(); @@ -276,27 +283,6 @@ public void testRecycleDisableDrop() { object3.recycle(); } - @Test - public void testRecycleDisableDelayedQueueDrop() throws Exception { - final Recycler<HandledObject> recycler = newRecycler(1024, 2, 1, 2, 0); - final HandledObject o = recycler.get(); - final HandledObject o2 = recycler.get(); - final HandledObject o3 = recycler.get(); - final Thread thread = new Thread() { - @Override - public void run() { - o.recycle(); - o2.recycle(); - o3.recycle(); - } - }; - thread.start(); - thread.join(); - // In reverse order - assertSame(o3, recycler.get()); - assertSame(o, recycler.get()); - } - /** * Test to make sure bug #2848 never happens again * https://github.com/netty/netty/issues/2848 @@ -322,14 +308,14 @@ private static void testMaxCapacity(int maxCapacity) { objects[i] = null; } - assertTrue(maxCapacity >= recycler.threadLocalCapacity(), - "The threadLocalCapacity (" + recycler.threadLocalCapacity() + ") must be <= maxCapacity (" + assertTrue(maxCapacity >= recycler.threadLocalSize(), + "The threadLocalSize (" + recycler.threadLocalSize() + ") must be <= maxCapacity (" + maxCapacity + ") as we not pool all new handles internally"); } @Test public void testRecycleAtDifferentThread() throws Exception { - final Recycler<HandledObject> recycler = newRecycler(256, 10, 2, 10, 2); + final Recycler<HandledObject> recycler = newRecycler(256, 2, 16); final HandledObject o = recycler.get(); final HandledObject o2 = recycler.get(); @@ -387,7 +373,7 @@ public void run() { @Test public void testMaxCapacityWithRecycleAtDifferentThread() throws Exception { final int maxCapacity = 4; // Choose the number smaller than WeakOrderQueue.LINK_CAPACITY - final Recycler<HandledObject> recycler = newRecycler(maxCapacity); + final Recycler<HandledObject> recycler = newRecycler(maxCapacity, 4, 4); // Borrow 2 * maxCapacity objects. // Return the half from the same thread. @@ -413,14 +399,12 @@ public void run() { thread.start(); thread.join(); - assertEquals(maxCapacity, recycler.threadLocalCapacity()); - assertEquals(1, recycler.threadLocalSize()); + assertEquals(maxCapacity * 3 / 4, recycler.threadLocalSize()); for (int i = 0; i < array.length; i ++) { recycler.get(); } - assertEquals(maxCapacity, recycler.threadLocalCapacity()); assertEquals(0, recycler.threadLocalSize()); } @@ -429,7 +413,7 @@ public void testDiscardingExceedingElementsWithRecycleAtDifferentThread() throws final int maxCapacity = 32; final AtomicInteger instancesCount = new AtomicInteger(0); - final Recycler<HandledObject> recycler = new Recycler<HandledObject>(maxCapacity, 2) { + final Recycler<HandledObject> recycler = new Recycler<HandledObject>(maxCapacity) { @Override protected HandledObject newObject(Recycler.Handle<HandledObject> handle) { instancesCount.incrementAndGet();
val
test
"2021-11-24T08:45:19"
"2020-11-24T23:44:44Z"
patel-bhavik
val
netty/netty/11890_11891
netty/netty
netty/netty/11890
netty/netty/11891
[ "keyword_pr_to_issue" ]
1e189654f913b13be3616c552a04459465b752b3
d80a34ecf84bbe153b60123b059deabcc97a8935
[]
[ "```suggestion\r\n heapBuf = ctx.alloc().heapBuffer(len, len);\r\n```", "Please revert the formatting changes. ", "please revert formatting changes", "please revert formatting changes", "make this final and assign in the `if / else ` blocks", "Thank you; done.", "@normanmaurer ,\r\n> make this final and assign in the if / else blocks\r\n\r\nI introduced a `try / finally` so that we would not leak the reference to the allocated heap buffer; this accounts for the formatting changes above and it also confuses the Java compiler when making `heapBuf` final; see attached screenshot.\r\n![Screenshot from 2021-12-06 10-00-16](https://user-images.githubusercontent.com/13126037/144897921-5aff2f72-9d7c-40a9-9012-072bf18b20e4.png)\r\n\r\n\r\n", "got it... ", "@normanmaurer ,\r\nAre you referring to the additional indentation? If so, it is due to the addition of the `try / finally` block to ensure that the allocated heapBuf is released on method return. Do do you want me to remove the `try / finally`? ", "Check the checkstyle errors ", "Got it; the checkstyle errors should now be addressed.", "```suggestion\r\n for (;;) {\r\n```", "```suggestion\r\n for (;;) {\r\n```" ]
"2021-12-04T20:02:07Z"
[]
JdkZlibEncoder: heap allocated arrays for deflater input generate memory pressure under load
### Expected behavior I am writing a high-throughput client for a custom protocol over TCP; the protocol has the option of compressing contents using zlib encoding, In order to reach performance targets I accumulate encoded frames into batches stored in off-heap pooled ByteBufs and then write the contents to the Netty Channel. When running load tests with compression enabled I expected very little GC activity. ### Actual behavior When running load tests with compression enabled I see a lot of GC activity. This is due to byte arrays allocated on the heap to provide input to zlib Deflater. When the incoming ByteBuf is direct, we allocate a byte array on the heap and copy the contents into it. ### Steps to reproduce - Create a Netty client that uses a ZlibEncoder - Write direct ByteBufs to the Channel in a loop - Observe the increase in memory consumption (using system tools or a profiler) ### Netty version ``` 4.1.70.Final ``` ### JVM version (e.g. `java -version`) ``` openjdk version "1.8.0_292" OpenJDK Runtime Environment (build 1.8.0_292-8u292-b10-0ubuntu1~20.04-b10) OpenJDK 64-Bit Server VM (build 25.292-b10, mixed mode) ``` ### OS version (e.g. `uname -a`) ```Linux peso 5.10.0-1051-oem #53-Ubuntu SMP Thu Oct 28 08:11:53 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux```
[ "codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java" ]
[ "codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java" ]
[]
diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java index 08292ef9546..892f6ccf3d3 100644 --- a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java +++ b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibEncoder.java @@ -200,42 +200,50 @@ protected void encode(ChannelHandlerContext ctx, ByteBuf uncompressed, ByteBuf o int offset; byte[] inAry; - if (uncompressed.hasArray()) { - // if it is backed by an array we not need to to do a copy at all - inAry = uncompressed.array(); - offset = uncompressed.arrayOffset() + uncompressed.readerIndex(); - // skip all bytes as we will consume all of them - uncompressed.skipBytes(len); - } else { - inAry = new byte[len]; - uncompressed.readBytes(inAry); - offset = 0; - } + ByteBuf heapBuf = null; + try { + if (uncompressed.hasArray()) { + // if it is backed by an array we not need to do a copy at all + inAry = uncompressed.array(); + offset = uncompressed.arrayOffset() + uncompressed.readerIndex(); + // skip all bytes as we will consume all of them + uncompressed.skipBytes(len); + } else { + heapBuf = ctx.alloc().heapBuffer(len, len); + uncompressed.readBytes(heapBuf, len); + inAry = heapBuf.array(); + offset = heapBuf.arrayOffset() + heapBuf.readerIndex(); + } - if (writeHeader) { - writeHeader = false; - if (wrapper == ZlibWrapper.GZIP) { - out.writeBytes(gzipHeader); + if (writeHeader) { + writeHeader = false; + if (wrapper == ZlibWrapper.GZIP) { + out.writeBytes(gzipHeader); + } } - } - if (wrapper == ZlibWrapper.GZIP) { - crc.update(inAry, offset, len); - } + if (wrapper == ZlibWrapper.GZIP) { + crc.update(inAry, offset, len); + } - deflater.setInput(inAry, offset, len); - for (;;) { - deflate(out); - if (deflater.needsInput()) { - // Consumed everything - break; - } else { - if (!out.isWritable()) { - // We did not consume everything but the buffer is not writable anymore. Increase the capacity to - // make more room. - out.ensureWritable(out.writerIndex()); + deflater.setInput(inAry, offset, len); + for (;;) { + deflate(out); + if (deflater.needsInput()) { + // Consumed everything + break; + } else { + if (!out.isWritable()) { + // We did not consume everything but the buffer is not writable anymore. Increase the capacity + // to make more room. + out.ensureWritable(out.writerIndex()); + } } } + } finally { + if (heapBuf != null) { + heapBuf.release(); + } } }
null
train
test
"2021-12-03T10:00:11"
"2021-12-04T19:54:48Z"
dferstay
val
netty/netty/11661_11922
netty/netty
netty/netty/11661
netty/netty/11922
[ "keyword_pr_to_issue" ]
cccce01497db4ac33b412dfb60630fc5ab443ce1
ea6d48320a007d403c8245bc548c1dd9c5f54e11
[ "Sounds good! Can you do a PR?", "Careful with the current JDK `WatchService` as it will miss changes if the file is _bind mounted_. In container based productions this is likely `/etc/hosts` is bind mounted.\r\n\r\nI wrote a bit a few years ago : https://blog.arkey.fr/2019/09/13/watchservice-and-bind-mount/", "@bric3 I think we should just re-read after X seconds. ", "Agreed, polling is the cheapest thing to do and it's portable.\r\nThat's what I ended up doing.", "It would be good if we keep this non-blocking ...", "I think what we should do is very similar to here : https://github.com/netty/netty/blob/4.1/resolver-dns-classes-macos/src/main/java/io/netty/resolver/dns/macos/MacOSDnsServerAddressStreamProvider.java#L170", "I am new to this project and I'd like to take this up. Do I wait until this is assigned to me or can I start working on it immediately? Thanks!", "There is already a pending PR for this: https://github.com/netty/netty/pull/11922", "> There is already a pending PR for this: #11922\r\n\r\nok, I will look into other open issues. Thanks!" ]
[ "Honestly I think every 10 seconds might be a bit aggressive... What about 60 seconds as default + maybe a system property to change it . ", "please don't use wildcard imports ", "all of these can be static ", "nit: remove the public keyword ", "I just wanted to add the question as a separate comment, didn't expect you will be so fast! ", "I wasn't able to find a typical pattern across system property names, so pls let me know if I need to change it.", "fixed", "fixed", "fixed", "just do this in a init block like we do in other places:\r\n\r\nhttps://github.com/netty/netty/blob/netty-4.1.72.Final/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java#L117", "nit: store `System.nanoTime()` in a local variable and reuse it in the compareAndSet. No need to call the function two times. ", "I would still allow to pass in the `refreshInterval` so we can use it for testing.", "See above... if we add a constructor that takes the interval we can directly construct the `DefaultHostsFileEntriesResolver` in tests and set the correct interval. ", "moved to static init block, also added `debug` log :+1:, thanks", "fixed", "added `refreshInterval` back to the constuctor", "replaced this `beforeAll` with `refreshInterval` in constructor params" ]
"2021-12-14T15:40:39Z"
[ "help wanted" ]
The resolver does not re-read /etc/hosts upon change
### Expected behavior When /etc/hosts is changed, the netty resolver should re-read it. ### Actual behavior io.netty.resolver.DefaultHostsFileEntriesResolver parses /etc/hosts only once, upon creation. ### Netty version 4.1.67 ### JVM version (e.g. `java -version`) any ### OS version (e.g. `uname -a`) any The WatchService (https://docs.oracle.com/javase/tutorial/essential/io/notification.html) can be used to monitor /etc/hosts and re-read it immediately after it is changed. The same Java API can be used in UnixResolverDnsServerAddressStreamProvider for re-reading /etc/resolv.conf. At the very least, /etc/hosts should be re-read every few minutes (like the current approach for /etc/resolv.conf).
[ "resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java" ]
[ "resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java" ]
[ "resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java" ]
diff --git a/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java b/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java index 378fd98d87e..d48fe096c37 100644 --- a/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java +++ b/resolver/src/main/java/io/netty/resolver/DefaultHostsFileEntriesResolver.java @@ -17,6 +17,9 @@ import io.netty.util.CharsetUtil; import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.SystemPropertyUtil; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; import java.net.InetAddress; import java.nio.charset.Charset; @@ -24,42 +27,49 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; /** * Default {@link HostsFileEntriesResolver} that resolves hosts file entries only once. */ public final class DefaultHostsFileEntriesResolver implements HostsFileEntriesResolver { - private final Map<String, List<InetAddress>> inet4Entries; - private final Map<String, List<InetAddress>> inet6Entries; + private static final InternalLogger logger = + InternalLoggerFactory.getInstance(DefaultHostsFileEntriesResolver.class); + private static final long DEFAULT_REFRESH_INTERVAL; + + private final long refreshInterval; + private final AtomicLong lastRefresh = new AtomicLong(System.nanoTime()); + private final HostsFileEntriesProvider.Parser hostsFileParser; + private volatile Map<String, List<InetAddress>> inet4Entries; + private volatile Map<String, List<InetAddress>> inet6Entries; + + static { + DEFAULT_REFRESH_INTERVAL = SystemPropertyUtil.getLong( + "io.netty.hostsFileRefreshInterval", TimeUnit.SECONDS.toNanos(60)); + + if (logger.isDebugEnabled()) { + logger.debug("-Dio.netty.hostsFileRefreshInterval: {}", DEFAULT_REFRESH_INTERVAL); + } + } public DefaultHostsFileEntriesResolver() { - this(parseEntries()); + this(HostsFileEntriesProvider.parser(), DEFAULT_REFRESH_INTERVAL); } // for testing purpose only - DefaultHostsFileEntriesResolver(HostsFileEntriesProvider entries) { + DefaultHostsFileEntriesResolver(HostsFileEntriesProvider.Parser hostsFileParser, long refreshInterval) { + this.hostsFileParser = hostsFileParser; + this.refreshInterval = refreshInterval; + HostsFileEntriesProvider entries = parseEntries(hostsFileParser); inet4Entries = entries.ipv4Entries(); inet6Entries = entries.ipv6Entries(); } @Override public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddressTypes) { - String normalized = normalize(inetHost); - switch (resolvedAddressTypes) { - case IPV4_ONLY: - return firstAddress(inet4Entries.get(normalized)); - case IPV6_ONLY: - return firstAddress(inet6Entries.get(normalized)); - case IPV4_PREFERRED: - InetAddress inet4Address = firstAddress(inet4Entries.get(normalized)); - return inet4Address != null ? inet4Address : firstAddress(inet6Entries.get(normalized)); - case IPV6_PREFERRED: - InetAddress inet6Address = firstAddress(inet6Entries.get(normalized)); - return inet6Address != null ? inet6Address : firstAddress(inet4Entries.get(normalized)); - default: - throw new IllegalArgumentException("Unknown ResolvedAddressTypes " + resolvedAddressTypes); - } + return firstAddress(addresses(inetHost, resolvedAddressTypes)); } /** @@ -72,6 +82,8 @@ public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddress */ public List<InetAddress> addresses(String inetHost, ResolvedAddressTypes resolvedAddressTypes) { String normalized = normalize(inetHost); + ensureHostsFileEntriesAreFresh(); + switch (resolvedAddressTypes) { case IPV4_ONLY: return inet4Entries.get(normalized); @@ -90,6 +102,18 @@ public List<InetAddress> addresses(String inetHost, ResolvedAddressTypes resolve } } + private void ensureHostsFileEntriesAreFresh() { + long last = lastRefresh.get(); + long currentTime = System.nanoTime(); + if (currentTime - last > refreshInterval) { + if (lastRefresh.compareAndSet(last, currentTime)) { + HostsFileEntriesProvider entries = parseEntries(hostsFileParser); + inet4Entries = entries.ipv4Entries(); + inet6Entries = entries.ipv6Entries(); + } + } + } + // package-private for testing purposes String normalize(String inetHost) { return inetHost.toLowerCase(Locale.ENGLISH); @@ -108,14 +132,13 @@ private static InetAddress firstAddress(List<InetAddress> addresses) { return addresses != null && !addresses.isEmpty() ? addresses.get(0) : null; } - private static HostsFileEntriesProvider parseEntries() { + private static HostsFileEntriesProvider parseEntries(HostsFileEntriesProvider.Parser parser) { if (PlatformDependent.isWindows()) { // Ony windows there seems to be no standard for the encoding used for the hosts file, so let us // try multiple until we either were able to parse it or there is none left and so we return an // empty instance. - return HostsFileEntriesProvider.parser() - .parseSilently(Charset.defaultCharset(), CharsetUtil.UTF_16, CharsetUtil.UTF_8); + return parser.parseSilently(Charset.defaultCharset(), CharsetUtil.UTF_16, CharsetUtil.UTF_8); } - return HostsFileEntriesProvider.parser().parseSilently(); + return parser.parseSilently(); } }
diff --git a/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java b/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java index b9e8c25ea41..794d2566a0b 100644 --- a/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java +++ b/resolver/src/test/java/io/netty/resolver/DefaultHostsFileEntriesResolverTest.java @@ -15,24 +15,37 @@ */ package io.netty.resolver; +import com.google.common.collect.Maps; import io.netty.util.NetUtil; import org.junit.jupiter.api.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; +import java.nio.charset.Charset; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.any; public class DefaultHostsFileEntriesResolverTest { + private static final Map<String, List<InetAddress>> LOCALHOST_V4_ADDRESSES = + Collections.singletonMap("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + private static final Map<String, List<InetAddress>> LOCALHOST_V6_ADDRESSES = + Collections.singletonMap("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); + private static final long ENTRIES_TTL = TimeUnit.MINUTES.toNanos(1); /** * show issue https://github.com/netty/netty/issues/5182 @@ -47,13 +60,12 @@ public void testCaseInsensitivity() { @Test public void shouldntFindWhenAddressTypeDoesntMatch() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + Collections.<String, List<InetAddress>>emptyMap() + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV6_ONLY); assertNull(address, "Should pick an IPv6 address"); @@ -61,14 +73,12 @@ public void shouldntFindWhenAddressTypeDoesntMatch() { @Test public void shouldPickIpv4WhenBothAreDefinedButIpv4IsPreferred() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + LOCALHOST_V6_ADDRESSES + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV4_PREFERRED); assertThat("Should pick an IPv4 address", address, instanceOf(Inet4Address.class)); @@ -76,14 +86,12 @@ public void shouldPickIpv4WhenBothAreDefinedButIpv4IsPreferred() { @Test public void shouldPickIpv6WhenBothAreDefinedButIpv6IsPreferred() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + LOCALHOST_V6_ADDRESSES + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV6_PREFERRED); assertThat("Should pick an IPv6 address", address, instanceOf(Inet6Address.class)); @@ -91,13 +99,12 @@ public void shouldPickIpv6WhenBothAreDefinedButIpv6IsPreferred() { @Test public void shouldntFindWhenAddressesTypeDoesntMatch() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + Collections.<String, List<InetAddress>>emptyMap() + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV6_ONLY); assertNull(addresses, "Should pick an IPv6 address"); @@ -105,14 +112,12 @@ public void shouldntFindWhenAddressesTypeDoesntMatch() { @Test public void shouldPickIpv4FirstWhenBothAreDefinedButIpv4IsPreferred() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + LOCALHOST_V6_ADDRESSES + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV4_PREFERRED); assertNotNull(addresses); @@ -123,14 +128,12 @@ public void shouldPickIpv4FirstWhenBothAreDefinedButIpv4IsPreferred() { @Test public void shouldPickIpv6FirstWhenBothAreDefinedButIpv6IsPreferred() { - Map<String, List<InetAddress>> inet4Entries = new HashMap<String, List<InetAddress>>(); - Map<String, List<InetAddress>> inet6Entries = new HashMap<String, List<InetAddress>>(); + HostsFileEntriesProvider.Parser parser = givenHostsParserWith( + LOCALHOST_V4_ADDRESSES, + LOCALHOST_V6_ADDRESSES + ); - inet4Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); - inet6Entries.put("localhost", Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); - - DefaultHostsFileEntriesResolver resolver = - new DefaultHostsFileEntriesResolver(new HostsFileEntriesProvider(inet4Entries, inet6Entries)); + DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); List<InetAddress> addresses = resolver.addresses("localhost", ResolvedAddressTypes.IPV6_PREFERRED); assertNotNull(addresses); @@ -138,4 +141,54 @@ public void shouldPickIpv6FirstWhenBothAreDefinedButIpv6IsPreferred() { assertThat("Should pick an IPv6 address", addresses.get(0), instanceOf(Inet6Address.class)); assertThat("Should pick an IPv4 address", addresses.get(1), instanceOf(Inet4Address.class)); } + + @Test + public void shouldNotRefreshHostsFileContentBeforeRefreshIntervalElapsed() { + Map<String, List<InetAddress>> v4Addresses = Maps.newHashMap(LOCALHOST_V4_ADDRESSES); + Map<String, List<InetAddress>> v6Addresses = Maps.newHashMap(LOCALHOST_V6_ADDRESSES); + DefaultHostsFileEntriesResolver resolver = + new DefaultHostsFileEntriesResolver(givenHostsParserWith(v4Addresses, v6Addresses), ENTRIES_TTL); + String newHost = UUID.randomUUID().toString(); + + v4Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + v6Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); + + assertNull(resolver.address(newHost, ResolvedAddressTypes.IPV4_ONLY)); + assertNull(resolver.address(newHost, ResolvedAddressTypes.IPV6_ONLY)); + } + + @Test + public void shouldRefreshHostsFileContentAfterRefreshInterval() { + Map<String, List<InetAddress>> v4Addresses = Maps.newHashMap(LOCALHOST_V4_ADDRESSES); + Map<String, List<InetAddress>> v6Addresses = Maps.newHashMap(LOCALHOST_V6_ADDRESSES); + DefaultHostsFileEntriesResolver resolver = + new DefaultHostsFileEntriesResolver(givenHostsParserWith(v4Addresses, v6Addresses), -1); + String newHost = UUID.randomUUID().toString(); + + InetAddress address = resolver.address(newHost, ResolvedAddressTypes.IPV6_ONLY); + assertNull(address); + + v4Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST4)); + v6Addresses.put(newHost, Collections.<InetAddress>singletonList(NetUtil.LOCALHOST6)); + + assertEquals(NetUtil.LOCALHOST4, resolver.address(newHost, ResolvedAddressTypes.IPV4_ONLY)); + assertEquals(NetUtil.LOCALHOST6, resolver.address(newHost, ResolvedAddressTypes.IPV6_ONLY)); + } + + private HostsFileEntriesProvider.Parser givenHostsParserWith(final Map<String, List<InetAddress>> inet4Entries, + final Map<String, List<InetAddress>> inet6Entries) { + HostsFileEntriesProvider.Parser mockParser = mock(HostsFileEntriesProvider.Parser.class); + + Answer<HostsFileEntriesProvider> mockedAnswer = new Answer<HostsFileEntriesProvider>() { + @Override + public HostsFileEntriesProvider answer(InvocationOnMock invocation) { + return new HostsFileEntriesProvider(inet4Entries, inet6Entries); + } + }; + + when(mockParser.parseSilently()).thenAnswer(mockedAnswer); + when(mockParser.parseSilently(any(Charset.class))).thenAnswer(mockedAnswer); + + return mockParser; + } }
val
test
"2021-12-13T11:50:02"
"2021-09-07T13:38:33Z"
gbrdead
val
netty/netty/11965_11966
netty/netty
netty/netty/11965
netty/netty/11966
[ "keyword_pr_to_issue" ]
9ab5e9180253e85eacdb978d436c087615c6e3b1
1cbd3afcdec9591b477ccddc3173afd7b30b7df5
[]
[ "```suggestion\r\n private boolean selfFiredChannelRead;\r\n```", "This is not needed at all. Please remove ", "```suggestion\r\n assertFalse(channel.writeInbound(Unpooled.wrappedBuffer(new byte[]{1})));\r\n```", "```suggestion\r\n assertEquals(0, interceptor.readsTriggered);\r\n assertNotNull(channel.pipeline().get(FixedLengthFrameDecoder.class));\r\n assertFalse(channel.finish());\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n private static final class ReadInterceptingHandler extends ChannelOutboundHandlerAdapter {\r\n```" ]
"2022-01-02T13:29:06Z"
[]
WebSocket server loss one frame, maybe caused by ByteToMessageDecoder trigger a unexpected read() when readCompleted fired
We're creating a relay-like application with WebSocket. When the WebSocket client sends an upgrade request to the server, the server sets the channel's autoRead to false (or config child channel autoRead to false, and manually trigger first read()). When the upstream connection is ready (which is started by another type of client), and the relay handler would be ok, set autoRead to true. ### Expected behavior No `channelRead` fired after setting autoRead to false ### Actual behavior A frame was read and then discard in TailContext ### Steps to reproduce See the following junit test: 1. start testWebsocketServer 2. start testWebsocketServerClient 3. watch the server's log ``` 18:46:25.176 [nioEventLoopGroup-2-1] DEBUG i.n.channel.DefaultChannelPipeline - Discarded inbound message TextWebSocketFrame(data: PooledUnsafeDirectByteBuf(ridx: 0, widx: 7, cap: 7)) that reached at the tail of the pipeline. Please check your pipeline configuration. 18:46:25.176 [nioEventLoopGroup-2-1] DEBUG i.n.channel.DefaultChannelPipeline - Discarded message pipeline : [wsencoder, wsdecoder, io.netty.handler.codec.http.websocketx.Utf8FrameValidator, WebSocketServerProtocolHandler#0, DefaultChannelPipeline$TailContext#0]. Channel : [id: 0xc1bcc332, L:/127.0.0.1:8100 - R:/127.0.0.1:64710]. 18:46:27.149 [nioEventLoopGroup-2-1] INFO com.example.alexlx.WebSocketTest - read: hello 1 18:46:27.179 [nioEventLoopGroup-2-1] INFO com.example.alexlx.WebSocketTest - read: hello 2 ``` ### Minimal yet complete reproducer code (or URL to code) ```java package com.example.alexlx; import io.netty.bootstrap.Bootstrap; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.*; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpServerCodec; import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolConfig; import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolHandler; import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.TimeUnit; public class WebSocketTest { private static final Logger log = LoggerFactory.getLogger(WebSocketTest.class); @Test public void testWebsocketServer() { NioEventLoopGroup group = new NioEventLoopGroup(1); new ServerBootstrap() .group(group) .channel(NioServerSocketChannel.class) .childOption(ChannelOption.AUTO_READ, false) .childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast( new HttpServerCodec(), new HttpObjectAggregator(1024 * 1024), new WebSocketServerProtocolHandler("/ws"), new SimpleUserEventChannelHandler<WebSocketServerProtocolHandler.HandshakeComplete>() { @Override protected void eventReceived(ChannelHandlerContext ctx, WebSocketServerProtocolHandler.HandshakeComplete evt) throws Exception { log.info("handshake {}", evt); ctx.pipeline().remove(this); // Simulate the relay connection established in 3 seconds ctx.executor().schedule( new Runnable() { @Override public void run() { ctx.channel().config().setAutoRead(true); ctx.pipeline().addLast(new SimpleChannelInboundHandler<TextWebSocketFrame>() { @Override protected void channelRead0(ChannelHandlerContext ctx, TextWebSocketFrame msg) throws Exception { log.info("read: " + msg.text()); } }); } }, 3, TimeUnit.SECONDS ); } } ); ch.read(); } }) .bind(8100).syncUninterruptibly().channel().closeFuture().syncUninterruptibly(); } @Test public void testWebsocketServerClient() { new Bootstrap() .group(new NioEventLoopGroup(1)) .channel(NioSocketChannel.class) .handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast( new HttpClientCodec(), new HttpObjectAggregator(1024 * 1024), new WebSocketClientProtocolHandler( WebSocketClientProtocolConfig.newBuilder() .webSocketUri("ws://127.0.0.1:8100/ws") .build()), new SimpleUserEventChannelHandler<WebSocketClientProtocolHandler.ClientHandshakeStateEvent>() { @Override protected void eventReceived(ChannelHandlerContext ctx, WebSocketClientProtocolHandler.ClientHandshakeStateEvent evt) throws Exception { if (evt == WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_COMPLETE) { log.info("HANDSHAKE_COMPLETE"); ctx.channel().eventLoop() .scheduleAtFixedRate(new Runnable() { int i = 0; @Override public void run() { log.info("write {}, writable={}", i, ch.isWritable()); ch.writeAndFlush(new TextWebSocketFrame("hello " + (i++))); if (i >= 10) { ctx.close(); } } }, 1, 1, TimeUnit.SECONDS); } } } ); } }) .connect("127.0.0.1", 8100) .syncUninterruptibly() .channel().closeFuture().syncUninterruptibly(); } } ``` ### Netty version 4.1.52 ### JVM version (e.g. `java -version`) 1.8 ### OS version (e.g. `uname -a`) windows 10 or mac os x 10.14
[ "codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java" ]
[ "codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java" ]
[ "codec/src/test/java/io/netty/handler/codec/ByteToMessageDecoderTest.java" ]
diff --git a/codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java b/codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java index f83209855d9..523aa4700d6 100644 --- a/codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java +++ b/codec/src/main/java/io/netty/handler/codec/ByteToMessageDecoder.java @@ -162,6 +162,8 @@ public ByteBuf cumulate(ByteBufAllocator alloc, ByteBuf cumulation, ByteBuf in) */ private boolean firedChannelRead; + private boolean selfFiredChannelRead; + /** * A bitmask where the bits are defined as * <ul> @@ -268,6 +270,7 @@ protected void handlerRemoved0(ChannelHandlerContext ctx) throws Exception { } @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof ByteBuf) { + selfFiredChannelRead = true; CodecOutputList out = CodecOutputList.newInstance(); try { first = cumulation == null; @@ -329,7 +332,7 @@ static void fireChannelRead(ChannelHandlerContext ctx, CodecOutputList msgs, int public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { numReads = 0; discardSomeReadBytes(); - if (!firedChannelRead && !ctx.channel().config().isAutoRead()) { + if (selfFiredChannelRead && !firedChannelRead && !ctx.channel().config().isAutoRead()) { ctx.read(); } firedChannelRead = false;
diff --git a/codec/src/test/java/io/netty/handler/codec/ByteToMessageDecoderTest.java b/codec/src/test/java/io/netty/handler/codec/ByteToMessageDecoderTest.java index 10d4f2f56cc..9f9dac49630 100644 --- a/codec/src/test/java/io/netty/handler/codec/ByteToMessageDecoderTest.java +++ b/codec/src/test/java/io/netty/handler/codec/ByteToMessageDecoderTest.java @@ -25,6 +25,7 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.channel.socket.ChannelInputShutdownEvent; import io.netty.util.internal.PlatformDependent; @@ -38,6 +39,7 @@ import static io.netty.buffer.Unpooled.wrappedBuffer; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -434,17 +436,18 @@ public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, Byte } } - @Test - public void testDoesNotOverRead() { - class ReadInterceptingHandler extends ChannelOutboundHandlerAdapter { - private int readsTriggered; + private static final class ReadInterceptingHandler extends ChannelOutboundHandlerAdapter { + private int readsTriggered; - @Override - public void read(ChannelHandlerContext ctx) throws Exception { - readsTriggered++; - super.read(ctx); - } + @Override + public void read(ChannelHandlerContext ctx) throws Exception { + readsTriggered++; + super.read(ctx); } + } + + @Test + public void testDoesNotOverRead() { ReadInterceptingHandler interceptor = new ReadInterceptingHandler(); EmbeddedChannel channel = new EmbeddedChannel(); @@ -538,4 +541,25 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) { assertBuffer(Unpooled.wrappedBuffer(bytes), (ByteBuf) channel.readInbound()); assertNull(channel.readInbound()); } + + @Test + void testUnexpectRead() { + EmbeddedChannel channel = new EmbeddedChannel(); + channel.config().setAutoRead(false); + ReadInterceptingHandler interceptor = new ReadInterceptingHandler(); + channel.pipeline().addLast( + interceptor, + new SimpleChannelInboundHandler<ByteBuf>() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { + ctx.pipeline().replace(this, "fix", new FixedLengthFrameDecoder(3)); + } + } + ); + + assertFalse(channel.writeInbound(Unpooled.wrappedBuffer(new byte[]{1}))); + assertEquals(0, interceptor.readsTriggered); + assertNotNull(channel.pipeline().get(FixedLengthFrameDecoder.class)); + assertFalse(channel.finish()); + } }
train
test
"2021-12-27T11:00:36"
"2022-01-02T11:38:50Z"
alex-lx
val
netty/netty/11963_11970
netty/netty
netty/netty/11963
netty/netty/11970
[ "keyword_pr_to_issue" ]
bdcf3988c20c9c0dae92850b36ae63d5ddc5b502
195b84910644e51f2323a30d55704f0e361fdcd8
[ "Thanks for the report, preparing a fix.", "good", "this is `ByteBuf` feature,`readByte()` make readIndex + 1,so the first index is `P` rather than `+`." ]
[]
"2022-01-05T00:59:30Z"
[]
the result of `ByteBufUtil.indexOf(ByteBuf needle, ByteBuf haystack)` wrong
### Expected behavior ``` ByteBuf haystack = Unpooled.copiedBuffer("+PONG\r\n", CharsetUtil.UTF_8); ByteBuf needle = Unpooled.copiedBuffer("\r\n", CharsetUtil.UTF_8); haystack.readByte(); int index = ByteBufUtil.indexOf(needle ,haystack); System.out.println(index); ``` expected result is 5. ### Actual behavior actual result is 4. ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.73.Final-SNAPSHOT ### JVM version (e.g. `java -version`) openjdk version "11.0.13" 2021-10-19 OpenJDK Runtime Environment (build 11.0.13+8-Ubuntu-0ubuntu1.21.10) OpenJDK 64-Bit Server VM (build 11.0.13+8-Ubuntu-0ubuntu1.21.10, mixed mode, sharing) ### OS version (e.g. `uname -a`) Linux XPS-13-9310 5.13.0-22-generic #22-Ubuntu SMP Fri Nov 5 13:21:36 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
[ "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java" ]
[ "buffer/src/main/java/io/netty/buffer/ByteBufUtil.java" ]
[ "buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java index 68a0ab5aaa0..8636d539553 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java @@ -279,7 +279,7 @@ public static int indexOf(ByteBuf needle, ByteBuf haystack) { --i; } if (i <= memory) { - return j; + return j + bStartIndex; } j += per; memory = m - per - 1; @@ -304,7 +304,7 @@ public static int indexOf(ByteBuf needle, ByteBuf haystack) { --i; } if (i < 0) { - return j; + return j + bStartIndex; } j += per; } else {
diff --git a/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java b/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java index c872b2c4e1d..fbf106c4e2b 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java @@ -132,13 +132,13 @@ public void testIndexOf() { final ByteBuf needle = Unpooled.copiedBuffer("abc12", CharsetUtil.UTF_8); haystack.readerIndex(1); needle.readerIndex(1); - assertEquals(0, ByteBufUtil.indexOf(needle, haystack)); + assertEquals(1, ByteBufUtil.indexOf(needle, haystack)); haystack.readerIndex(2); needle.readerIndex(3); - assertEquals(1, ByteBufUtil.indexOf(needle, haystack)); + assertEquals(3, ByteBufUtil.indexOf(needle, haystack)); haystack.readerIndex(1); needle.readerIndex(2); - assertEquals(1, ByteBufUtil.indexOf(needle, haystack)); + assertEquals(2, ByteBufUtil.indexOf(needle, haystack)); haystack.release(); haystack = new WrappedByteBuf(Unpooled.copiedBuffer("abc123", CharsetUtil.UTF_8));
train
test
"2022-01-04T21:43:18"
"2021-12-30T13:46:33Z"
shichaoyuan
val
netty/netty/11981_11982
netty/netty
netty/netty/11981
netty/netty/11982
[ "keyword_pr_to_issue" ]
bd7e0f72bdcae09beccb42f0562a862caaf8c89e
d60ea595bcd2399530d8becbced45df0b9b22aa5
[]
[ "Can we just use no codec for this test ?", "Please follow the same structure for tests as the rest of the file. This ensures the test will be run for NIO,OIO,Epoll and Kqueue. I suspect the same fix will need to go there as well. ", "Like said above there is no need to use a codec here. ", "Use an CountdownLatch this way you can also remove the `Thread.sleep(...)` below.", "See above ", "Call `ReferenceCountUtil.release(msg);`", "Call `ReferenceCountUtil.release(msg);`", "Like said above remove the extra codecs", "```suggestion\r\n // When a side enables SO_LINGER and calls showdownOutput(...) to start TCP half-closure, we can not call doDeregister here\r\n```", "```suggestion\r\n // The shutdown function does not block regardless of the SO_LINGER setting on the socket,so we don't need to use\r\n```", "```suggestion\r\n // GlobalEventExecutor to execute the shutdown\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "```suggestion\r\n```", "Please don't use wildcard imports", "@huibinliupush I am sure you will need to have the same change in the other transports as well (Epoll, KQueue).", "> Please don't use wildcard imports\r\n\r\nreally sorry for this", "> @huibinliupush I am sure you will need to have the same change in the other transports as well (Epoll, KQueue).\r\n\r\n@normanmaurer OK, i'll check it right now\r\n", "@normanmaurer \r\n\r\ni have checked `EpollSocketChannel` and `KQueueSocketChannel` again,they all call \r\n`((AbstractUnsafe) unsafe()).shutdownOutput(promise)` method to shutdownOutput In their parent class\r\n\r\nAm I missing something?", "ah ok... Sorry I thought they also offload :) \r\n" ]
"2022-01-08T12:47:10Z"
[]
[bug] when SO_LINGGER is used in the TCP half-closed scenario, the client in the fin_wait2 state cannot receive the data sent from the server which is in the close_wait state
![SO_LINGERHalfClosureBug](https://user-images.githubusercontent.com/5794695/148641718-987a2774-6d10-4400-8d32-c14feaa0417b.png) as shown in the picture above。 **when `SO_LINGGER` is used in the client side**, and client call `NioSocketChannel#shutdownOutput()` to **half close** the tcp connection。 then server side `OP_READ` is active,server begin to `shutdowninput` and `triger ChannelInputShutdownEvent`,next send some data to client in the process handler of the ChannelInputShutdownEvent event。 ### Actual behavior client in the `FIN_WAIT2` state **can not read and process these data** which is sended by server in the `CLOSE_WAIT` state. ### Expected behavior client in the `FIN_WAIT2` state can read and process these data which is sended by server in the `CLOSE_WAIT` state. ### Steps to reproduce ``` ServerBootstrap server = new ServerBootstrap(); server.childOption(ChannelOption.ALLOW_HALF_CLOSURE, true) Bootstrap client = new Bootstrap(); client.option(ChannelOption.SO_LINGER, 1) ``` ### Minimal yet complete reproducer code (or URL to code) [see unit test method testHalfClosureReceiveDataOnFinalWait2StateWhenSoLingerSet](https://github.com/huibinliupush/netty/blob/afd2de8d81146301465ebc62fa1b5675931dd950/testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java) ### Netty version 4.1.72.Final
[ "transport/src/main/java/io/netty/channel/AbstractChannel.java" ]
[ "transport/src/main/java/io/netty/channel/AbstractChannel.java" ]
[ "testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java" ]
diff --git a/transport/src/main/java/io/netty/channel/AbstractChannel.java b/transport/src/main/java/io/netty/channel/AbstractChannel.java index 5aa2ab92db0..887a3e3c66e 100644 --- a/transport/src/main/java/io/netty/channel/AbstractChannel.java +++ b/transport/src/main/java/io/netty/channel/AbstractChannel.java @@ -650,38 +650,20 @@ private void shutdownOutput(final ChannelPromise promise, Throwable cause) { final Throwable shutdownCause = cause == null ? new ChannelOutputShutdownException("Channel output shutdown") : new ChannelOutputShutdownException("Channel output shutdown", cause); - Executor closeExecutor = prepareToClose(); - if (closeExecutor != null) { - closeExecutor.execute(new Runnable() { - @Override - public void run() { - try { - // Execute the shutdown. - doShutdownOutput(); - promise.setSuccess(); - } catch (Throwable err) { - promise.setFailure(err); - } finally { - // Dispatch to the EventLoop - eventLoop().execute(new Runnable() { - @Override - public void run() { - closeOutboundBufferForShutdown(pipeline, outboundBuffer, shutdownCause); - } - }); - } - } - }); - } else { - try { - // Execute the shutdown. - doShutdownOutput(); - promise.setSuccess(); - } catch (Throwable err) { - promise.setFailure(err); - } finally { - closeOutboundBufferForShutdown(pipeline, outboundBuffer, shutdownCause); - } + + // When a side enables SO_LINGER and calls showdownOutput(...) to start TCP half-closure + // we can not call doDeregister here because we should ensure this side in fin_wait2 state + // can still receive and process the data which is send by another side in the close_wait state。 + // See https://github.com/netty/netty/issues/11981 + try { + // The shutdown function does not block regardless of the SO_LINGER setting on the socket + // so we don't need to use GlobalEventExecutor to execute the shutdown + doShutdownOutput(); + promise.setSuccess(); + } catch (Throwable err) { + promise.setFailure(err); + } finally { + closeOutboundBufferForShutdown(pipeline, outboundBuffer, shutdownCause); } }
diff --git a/testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java b/testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java index f2d6466dc6f..8e90217372a 100644 --- a/testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java +++ b/testsuite/src/main/java/io/netty/testsuite/transport/socket/SocketHalfClosedTest.java @@ -19,6 +19,7 @@ import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelConfig; import io.netty.channel.ChannelFuture; @@ -33,6 +34,8 @@ import io.netty.channel.socket.ChannelInputShutdownReadComplete; import io.netty.channel.socket.ChannelOutputShutdownEvent; import io.netty.channel.socket.DuplexChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.util.ReferenceCountUtil; import io.netty.util.UncheckedBooleanSupplier; import io.netty.util.internal.PlatformDependent; import org.junit.jupiter.api.Test; @@ -51,6 +54,81 @@ import static org.junit.jupiter.api.Assumptions.assumeFalse; public class SocketHalfClosedTest extends AbstractSocketTest { + + @Test + @Timeout(value = 5000, unit = MILLISECONDS) + public void testHalfClosureReceiveDataOnFinalWait2StateWhenSoLingerSet(TestInfo testInfo) throws Throwable { + run(testInfo, new Runner<ServerBootstrap, Bootstrap>() { + @Override + public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable { + testHalfClosureReceiveDataOnFinalWait2StateWhenSoLingerSet(serverBootstrap, bootstrap); + } + }); + } + + private void testHalfClosureReceiveDataOnFinalWait2StateWhenSoLingerSet(ServerBootstrap sb, Bootstrap cb) + throws Throwable { + Channel serverChannel = null; + Channel clientChannel = null; + + final CountDownLatch waitHalfClosureDone = new CountDownLatch(1); + try { + sb.childOption(ChannelOption.SO_LINGER, 1) + .childHandler(new ChannelInitializer<Channel>() { + + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { + + @Override + public void channelActive(final ChannelHandlerContext ctx) { + SocketChannel channel = (SocketChannel) ctx.channel(); + channel.shutdownOutput(); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + ReferenceCountUtil.release(msg); + waitHalfClosureDone.countDown(); + } + }); + } + }); + + cb.option(ChannelOption.ALLOW_HALF_CLOSURE, true) + .handler(new ChannelInitializer<Channel>() { + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new ChannelInboundHandlerAdapter() { + + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (ChannelInputShutdownEvent.INSTANCE == evt) { + ctx.writeAndFlush(ctx.alloc().buffer().writeZero(16)); + } + + if (ChannelInputShutdownReadComplete.INSTANCE == evt) { + ctx.close(); + } + } + }); + } + }); + + serverChannel = sb.bind().sync().channel(); + clientChannel = cb.connect(serverChannel.localAddress()).sync().channel(); + waitHalfClosureDone.await(); + } finally { + if (clientChannel != null) { + clientChannel.close().sync(); + } + + if (serverChannel != null) { + serverChannel.close().sync(); + } + } + } + @Test @Timeout(value = 10000, unit = MILLISECONDS) public void testHalfClosureOnlyOneEventWhenAutoRead(TestInfo testInfo) throws Throwable {
train
test
"2022-01-07T13:58:14"
"2022-01-08T12:41:33Z"
huibinliupush
val
netty/netty/11984_11990
netty/netty
netty/netty/11984
netty/netty/11990
[ "keyword_pr_to_issue" ]
d60ea595bcd2399530d8becbced45df0b9b22aa5
33247074a0becce08c1f48eec61e8da0c7bbe4c0
[ "@chrisvest can you have a look at this one ?", "I got the test running and showing the expected failure. It disappears if I add a `alloc.trimCurrentThreadCache();` after the `release()` loop. The accounting is done in a bad place, where we might either miss out on activity happening at the cache layer, or we'll have the same memory activity accounted twice. I need to find a better place to do the accounting of the pinned memory." ]
[ "```suggestion\r\n int result = PINNED_UPDATER.addAndGet(this, -delta);\r\n assert result >= 0;\r\n```", "nit: Not strong about this but we could also just use `AtomicInteger` directly here as I wouldn't expect a massive amount of these instances. That said I am not strong on it at all.", "nit: we could size it correctly from the start ", "I wonder if we want to run the test one time with cache enabled and one time without. ", "I was mostly thinking about the number of memory indirections, tbh.", "I guess it not matters too much... That said I am not strong here like I said. So just keep it.", "Added a second test.", "Changed it anyway." ]
"2022-01-11T17:18:05Z"
[]
PooledByteBufAllocator.pinnedDirectMemory is sometimes returning 0 even if some direct buffers are used
I'm trying to monitor the estimate of memory that is used by in-use direct buffers. To do so, I'm trying to use the PooledByteBufAllocator.pinnedDirectMemory() method but sometimes, it seems that this method is returning 0 even if some buffers are currently allocated and used (not yet released). ### Expected behavior PooledByteBufAllocator.pinnedDirectMemory() should not return 0 in case some buffers are currently allocated. ### Actual behavior Sometimes, not always, it seems that the PooledByteBufAllocator.pinnedDirectMemory() is returning 0 even if some buffers are allocated. ### Steps to reproduce Please check attached sample project: [netty.pinnedMemory.test.tar.gz](https://github.com/netty/netty/files/7835795/netty.pinnedMemory.test.tar.gz) The Test class is allocating some buffers, and before releasing them, it displays the pinned direct memory. It also displays an estimate of used memory by traversing arena of direct buffers, and for each arena, it accumulates the diff between the chunk size and the bytes that are not yet allocated by in-use buffers: ``` static long usedMem(List<PoolArenaMetric> arenas) { long totalUsed = 0; for (PoolArenaMetric arenaMetrics : arenas) { for (PoolChunkListMetric arenaMetric : arenaMetrics.chunkLists()) { for (PoolChunkMetric chunkMetric : arenaMetric) { // chunkMetric.chunkSize() returns maximum of bytes that can be served out of the chunk // and chunkMetric.freeBytes() returns the bytes that are not yet allocated by in-use buffers totalUsed += (chunkMetric.chunkSize() - chunkMetric.freeBytes()); } } } return totalUsed; } ``` To run the program: ./gradlew jar ./gradlew runTest The program displays many logs, like this: ``` used=1810432, pinned=1372971008 used=2097152, pinned=1373257728 used=1974272, pinned=1371643904 ``` _used_ corresponds to the result of the _usedMem_ method shown above, and _pinned_ corresponds to the result of the call to alloc.pinnedDirectMemory(). Now, sometimes, you will notice that the pinned memory is zero: ``` used=2031616, pinned=1835008 used=1892352, pinned=188416 used=1916928, pinned=212992 used=1892352, pinned=0 used=2023424, pinned=0 used=2072576, pinned=0 ... used=2220032, pinned=2146779136 used=2310144, pinned=2146869248 used=1843200, pinned=2144354304 used=1974272, pinned=2144485376 used=2007040, pinned=2144518144 used=1998848, pinned=2143215616 used=2138112, pinned=2143354880 ``` To stop, just press CTRL-C ### Minimal yet complete reproducer code (or URL to code) Please see attached sample project above ### Netty version 4.1.72.Final ### JVM version (e.g. `java -version`) openjdk version "1.8.0_302" OpenJDK Runtime Environment (Zulu 8.56.0.23-CA-macos-aarch64) (build 1.8.0_302-b08) OpenJDK 64-Bit Server VM (Zulu 8.56.0.23-CA-macos-aarch64) (build 25.302-b08, mixed mode) ### OS version (e.g. `uname -a`) MacOS M1: Darwin xxx 21.1.0 Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:24 PDT 2021; root:xnu-8019.41.5~1/RELEASE_ARM64_T8101 arm64 The issue is also observed on linux Ubuntu: Linux xxx 5.13.0-22-generic #22-Ubuntu SMP Fri Nov 5 13:21:36 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
[ "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBuf.java" ]
[ "buffer/src/main/java/io/netty/buffer/PoolChunk.java", "buffer/src/main/java/io/netty/buffer/PooledByteBuf.java" ]
[ "buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunk.java b/buffer/src/main/java/io/netty/buffer/PoolChunk.java index 70bc6419275..96df62ffed9 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunk.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunk.java @@ -19,6 +19,7 @@ import java.util.ArrayDeque; import java.util.Deque; import java.util.PriorityQueue; +import java.util.concurrent.atomic.AtomicInteger; /** * Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk @@ -160,6 +161,11 @@ final class PoolChunk<T> implements PoolChunkMetric { */ private final PoolSubpage<T>[] subpages; + /** + * Accounting of pinned memory – memory that is currently in use by ByteBuf instances. + */ + private final AtomicInteger pinnedBytes; + private final int pageSize; private final int pageShifts; private final int chunkSize; @@ -172,7 +178,6 @@ final class PoolChunk<T> implements PoolChunkMetric { private final Deque<ByteBuffer> cachedNioBuffers; int freeBytes; - int pinnedBytes; PoolChunkList<T> parent; PoolChunk<T> prev; @@ -202,6 +207,7 @@ final class PoolChunk<T> implements PoolChunkMetric { insertAvailRun(0, pages, initHandle); cachedNioBuffers = new ArrayDeque<ByteBuffer>(8); + pinnedBytes = new AtomicInteger(); } /** Creates a special chunk that is not pooled. */ @@ -217,6 +223,7 @@ final class PoolChunk<T> implements PoolChunkMetric { subpages = null; chunkSize = size; cachedNioBuffers = null; + pinnedBytes = new AtomicInteger(); } private static LongPriorityQueue[] newRunsAvailqueueArray(int size) { @@ -343,7 +350,6 @@ private long allocateRun(int runSize) { int pinnedSize = runSize(pageShifts, handle); freeBytes -= pinnedSize; - pinnedBytes += pinnedSize; return handle; } } @@ -453,7 +459,6 @@ private long allocateSubpage(int sizeIdx) { */ void free(long handle, int normCapacity, ByteBuffer nioBuffer) { int runSize = runSize(pageShifts, handle); - pinnedBytes -= runSize; if (isSubpage(handle)) { int sizeIdx = arena.size2SizeIdx(normCapacity); PoolSubpage<T> head = arena.findSubpagePoolHead(sizeIdx); @@ -557,8 +562,9 @@ private static long toRunHandle(int runOffset, int runPages, int inUsed) { void initBuf(PooledByteBuf<T> buf, ByteBuffer nioBuffer, long handle, int reqCapacity, PoolThreadCache threadCache) { if (isRun(handle)) { + int maxLength = runSize(pageShifts, handle); buf.init(this, nioBuffer, handle, runOffset(handle) << pageShifts, - reqCapacity, runSize(pageShifts, handle), arena.parent.threadCache()); + reqCapacity, maxLength, arena.parent.threadCache()); } else { initBufWithSubpage(buf, nioBuffer, handle, reqCapacity, threadCache); } @@ -577,6 +583,18 @@ void initBufWithSubpage(PooledByteBuf<T> buf, ByteBuffer nioBuffer, long handle, buf.init(this, nioBuffer, handle, offset, reqCapacity, s.elemSize, threadCache); } + void incrementPinnedMemory(int delta) { + assert delta > 0; + int result = pinnedBytes.addAndGet(delta); + assert result > 0; + } + + void decrementPinnedMemory(int delta) { + assert delta > 0; + int result = pinnedBytes.addAndGet(-delta); + assert result >= 0; + } + @Override public int chunkSize() { return chunkSize; @@ -590,9 +608,7 @@ public int freeBytes() { } public int pinnedBytes() { - synchronized (arena) { - return pinnedBytes; - } + return pinnedBytes.get(); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java index bd57f5acfa8..a7fbf48e563 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java @@ -62,6 +62,7 @@ private void init0(PoolChunk<T> chunk, ByteBuffer nioBuffer, assert !PoolChunk.isSubpage(handle) || chunk.arena.size2SizeIdx(maxLength) <= chunk.arena.smallMaxSizeIdx: "Allocated small sub-page handle for a buffer size that isn't \"small.\""; + chunk.incrementPinnedMemory(maxLength); this.chunk = chunk; memory = chunk.memory; tmpNioBuf = nioBuffer; @@ -117,6 +118,7 @@ public final ByteBuf capacity(int newCapacity) { } // Reallocation required. + chunk.decrementPinnedMemory(maxLength); chunk.arena.reallocate(this, newCapacity, true); return this; } @@ -170,6 +172,7 @@ protected final void deallocate() { final long handle = this.handle; this.handle = -1; memory = null; + chunk.decrementPinnedMemory(maxLength); chunk.arena.free(chunk, tmpNioBuf, handle, maxLength, cache); tmpNioBuf = null; chunk = null;
diff --git a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java index e2bf4672361..0a977ff0b71 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java @@ -30,7 +30,9 @@ import java.util.Random; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.LockSupport; import org.junit.jupiter.api.Timeout; @@ -821,4 +823,92 @@ private void testUsedHeapMemory(int initialCapacity) { trimCaches(allocator); assertEquals(0, allocator.pinnedDirectMemory()); } + + @Test + public void pinnedMemoryMustReflectBuffersInUseWithThreadLocalCaching() { + pinnedMemoryMustReflectBuffersInUse(true); + } + + @Test + public void pinnedMemoryMustReflectBuffersInUseWithoutThreadLocalCaching() { + pinnedMemoryMustReflectBuffersInUse(false); + } + + private static void pinnedMemoryMustReflectBuffersInUse(boolean useThreadLocalCaching) { + int smallCacheSize; + int normalCacheSize; + if (useThreadLocalCaching) { + smallCacheSize = PooledByteBufAllocator.defaultSmallCacheSize(); + normalCacheSize = PooledByteBufAllocator.defaultNormalCacheSize(); + } else { + smallCacheSize = 0; + normalCacheSize = 0; + } + int directMemoryCacheAlignment = 0; + PooledByteBufAllocator alloc = new PooledByteBufAllocator( + PooledByteBufAllocator.defaultPreferDirect(), + PooledByteBufAllocator.defaultNumHeapArena(), + PooledByteBufAllocator.defaultNumDirectArena(), + PooledByteBufAllocator.defaultPageSize(), + PooledByteBufAllocator.defaultMaxOrder(), + smallCacheSize, + normalCacheSize, + useThreadLocalCaching, + directMemoryCacheAlignment); + PooledByteBufAllocatorMetric metric = alloc.metric(); + AtomicLong capSum = new AtomicLong(); + + for (long index = 0; index < 10000; index++) { + ThreadLocalRandom rnd = ThreadLocalRandom.current(); + int bufCount = rnd.nextInt(1, 100); + List<ByteBuf> buffers = new ArrayList<ByteBuf>(bufCount); + + if (index % 2 == 0) { + // ensure that we allocate a small buffer + for (int i = 0; i < bufCount; i++) { + ByteBuf buf = alloc.directBuffer(rnd.nextInt(8, 128)); + buffers.add(buf); + capSum.addAndGet(buf.capacity()); + } + } else { + // allocate a larger buffer + for (int i = 0; i < bufCount; i++) { + ByteBuf buf = alloc.directBuffer(rnd.nextInt(1024, 1024 * 100)); + buffers.add(buf); + capSum.addAndGet(buf.capacity()); + } + } + + if (index % 100 == 0) { + long used = usedMemory(metric.directArenas()); + long pinned = alloc.pinnedDirectMemory(); + assertThat(capSum.get()).isLessThanOrEqualTo(pinned); + assertThat(pinned).isLessThanOrEqualTo(used); + } + + for (ByteBuf buffer : buffers) { + buffer.release(); + } + capSum.set(0); + // After releasing all buffers, pinned memory must be zero + assertThat(alloc.pinnedDirectMemory()).isZero(); + } + } + + /** + * Returns an estimate of bytes used by currently in-use buffers + */ + private static long usedMemory(List<PoolArenaMetric> arenas) { + long totalUsed = 0; + for (PoolArenaMetric arenaMetrics : arenas) { + for (PoolChunkListMetric arenaMetric : arenaMetrics.chunkLists()) { + for (PoolChunkMetric chunkMetric : arenaMetric) { + // chunkMetric.chunkSize() returns maximum of bytes that can be served out of the chunk + // and chunkMetric.freeBytes() returns the bytes that are not yet allocated by in-use buffers + totalUsed += chunkMetric.chunkSize() - chunkMetric.freeBytes(); + } + } + } + return totalUsed; + } }
test
test
"2022-01-11T16:55:55"
"2022-01-09T20:17:28Z"
pderop
val
netty/netty/11864_11996
netty/netty
netty/netty/11864
netty/netty/11996
[ "keyword_pr_to_issue" ]
c6e65cc006c4e0623e84ef96a88dc5bd5cb00a82
55cdaa75adfaff89c9929de9893c8ff7bd2891ae
[ "@chrisvest I think the following issue seems still exists in the newest 4.1.72.Final version.\r\n\r\n\"When thread2 is recycling objects which created by thread1, if thread1 was finished/terminated at this moment, then thread2 will still put the objects back to thread1's queue, which is not necessary.\"\r\n\r\nCorrects me if I wrongly understood it.", "@laosijikaichele Right, yeah… I'm not sure it makes sense to salvage those objects. We could be in a system shutdown phase; there might not be other threads to offload the objects to; we'd have to centrally track threads joining and leaving the pool.", "@chrisvest What about we store the thread to `WeakReference` in `io.netty.util.Recycler.LocalPool`, and do the `null` check while recycling, like the following:\r\n```\r\nprivate static final class LocalPool<T> {\r\n ...\r\n private final WeakReference<Thread> threadRef = new WeakReference<>(Thread.currentThread());\r\n ...\r\n}\r\n\r\nprivate static final class DefaultHandle<T> implements Handle<T> {\r\n @Override\r\n public void recycle(Object object) {\r\n ...\r\n if (localPool.threadRef.get() == null) {\r\n return; // Do not need to recycle;\r\n }\r\n ...\r\n }\r\n}\r\n```\r\nWDYT?", "@laosijikaichele Not a fan of adding back `WeakReferences` when we just got rid of them. They have some overhead that can impact GC pause times. But we can mark the local pool in `FastThreadLocal.onRemoval` somehow, and avoid adding handles to the queue based on that.", "@chrisvest It sounds good. :) " ]
[ "Remove ?", "Removed." ]
"2022-01-12T19:35:11Z"
[]
Multithreading reclaiming objects in the Recycler object pool issue problem
### Netty version 4.1.70.final ### Issue The issue shown in the picture below,Thread 2 is recycling objects which belongs to stack1 created by thread 1。 If thread1 was GC'ed at this moment,i think thread2 does not need to add this object to the `weakOrderQueue node` which is in the `Stack1` any more。 because `stack1` will be GC'ed sooner or later。This object will never be used again even if it is recycled. ![netty recycler issue](https://user-images.githubusercontent.com/5794695/143539720-36080ab2-458e-4c52-b6cb-db0c2f6f602d.png) But the `Recycler`'s related source code about recycling objects does not deal with the situation of `threadRef.get() == null`, and still continue to add objects to the weakOrderQueue node. I think this is a waste of time. the sooner we deal with the situation of `threadRef.get() == null` , the better。 ``` private static final class Stack<T> { void push(DefaultHandle<?> item) { Thread currentThread = Thread.currentThread(); if (threadRef.get() == currentThread) { // The current Thread is the thread that belongs to the Stack, we can try to push the object now. pushNow(item); } else { // The current Thread is not the one that belongs to the Stack // (or the Thread that belonged to the Stack was collected already), we need to signal that the push // happens later. pushLater(item, currentThread); } } } ```
[ "common/src/main/java/io/netty/util/Recycler.java" ]
[ "common/src/main/java/io/netty/util/Recycler.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/Recycler.java b/common/src/main/java/io/netty/util/Recycler.java index b80c749df16..f518aaa09b4 100644 --- a/common/src/main/java/io/netty/util/Recycler.java +++ b/common/src/main/java/io/netty/util/Recycler.java @@ -102,7 +102,9 @@ protected LocalPool<T> initialValue() { @Override protected void onRemoval(LocalPool<T> value) throws Exception { super.onRemoval(value); - value.pooledHandles.clear(); + MessagePassingQueue<DefaultHandle<T>> handles = value.pooledHandles; + value.pooledHandles = null; + handles.clear(); } }; @@ -254,7 +256,7 @@ void toAvailable() { private static final class LocalPool<T> { private final int ratioInterval; - private final MessagePassingQueue<DefaultHandle<T>> pooledHandles; + private volatile MessagePassingQueue<DefaultHandle<T>> pooledHandles; private int ratioCounter; @SuppressWarnings("unchecked") @@ -269,16 +271,23 @@ private static final class LocalPool<T> { } DefaultHandle<T> claim() { + MessagePassingQueue<DefaultHandle<T>> handles = pooledHandles; + if (handles == null) { + return null; + } DefaultHandle<T> handle; do { - handle = pooledHandles.relaxedPoll(); + handle = handles.relaxedPoll(); } while (handle != null && !handle.availableToClaim()); return handle; } void release(DefaultHandle<T> handle) { + MessagePassingQueue<DefaultHandle<T>> handles = pooledHandles; handle.toAvailable(); - pooledHandles.relaxedOffer(handle); + if (handles != null) { + handles.relaxedOffer(handle); + } } DefaultHandle<T> newHandle() {
null
test
test
"2022-01-12T08:20:24"
"2021-11-26T07:32:24Z"
huibinliupush
val
netty/netty/12017_12018
netty/netty
netty/netty/12017
netty/netty/12018
[ "keyword_pr_to_issue" ]
f984708d0b1e1b25b6534a79dc44c438d8a712d4
dbdd0d46622ec68b124d312722002be156adc533
[ "@MrEasy can you provide a PR ?", "> @MrEasy can you provide a PR ?\r\n\r\nSure, can do if you agree.\r\nWould do it for epoll and kqueue. Not for netty-transport-native-unix-common/linux-x86_64, since it does not ship a library, only sources as far as I see.", "@MrEasy yes sounds good... thanks!", "PR#12018 for main. Also applicable for `4.1` branch" ]
[ "@MrEasy this doesn't look correct", "@MrEasy this doesn't look correct", "thx, of course - updated.", "thx, of course - updated.", "```suggestion\r\n <Fragment-Host>io.netty.resolver-dns-classes-macos</Fragment-Host>\r\n```", "```suggestion\r\n <Fragment-Host>io.netty.resolver-dns-classes-macos</Fragment-Host>\r\n```" ]
"2022-01-18T15:11:40Z"
[]
netty-transport-native-* Bundles missing a Fragment-Host
### Initial Position The netty-transport-native-epoll and -kqueue bundles ship native libraries, e.g. for linux_x86_64. The do declare this in their manifest via Bundle-NativeCode header to make it available in an OSGi environment. ### Actual behavior However, when the native library gets loaded via e.g. io.netty.channel.epoll.Epoll, it is not found with the following error: ``` java.lang.UnsatisfiedLinkError: could not load a native library: netty_transport_native_epoll_x86_64 caused by: java.io.FileNotFoundException: META-INF/native/libnetty_transport_native_epoll_x86_64.so ``` You see that the file location is correctly retrieved from manifest header of bundle netty-transport-native-epoll, but class EPoll is trying to load the resource from its bundle classloader (netty-transport-classes-epoll), which the resource does not reside in. ### Evaluation and Proposal for Fix What is missing from my point of view is the declaration of the Fragement-Host, this bundle shipping the native lib should get attached to. In case of epoll: `Fragment-Host: io.netty.transport-classes-epoll` Altered this in my environment (Karaf with Artemis using Netty) and it immediately works then - class EPoll can the nsuccessfully load the native lib being shipped in the fragment bundle. ### Netty version 4.1.73 ### JVM version openjdk version "17.0.1" 2021-10-19 LTS ### OS version Ubuntu 21.10, 5.13.0-25-generic
[ "resolver-dns-native-macos/pom.xml", "transport-native-epoll/pom.xml", "transport-native-kqueue/pom.xml" ]
[ "resolver-dns-native-macos/pom.xml", "transport-native-epoll/pom.xml", "transport-native-kqueue/pom.xml" ]
[]
diff --git a/resolver-dns-native-macos/pom.xml b/resolver-dns-native-macos/pom.xml index 536a4bf9396..82edc515075 100644 --- a/resolver-dns-native-macos/pom.xml +++ b/resolver-dns-native-macos/pom.xml @@ -107,6 +107,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_resolver_dns_native_macos_${os.detected.arch}.jnilib; osname=MacOSX; processor=${os.detected.arch}</Bundle-NativeCode> + <Fragment-Host>io.netty.resolver-dns-classes-macos</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> @@ -213,6 +214,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_resolver_dns_native_macos_aarch_64.jnilib; osname=MacOSX; processor=aarch_64</Bundle-NativeCode> + <Fragment-Host>io.netty.resolver-dns-classes-macos</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> diff --git a/transport-native-epoll/pom.xml b/transport-native-epoll/pom.xml index aba50c9d968..11ddb94ff38 100644 --- a/transport-native-epoll/pom.xml +++ b/transport-native-epoll/pom.xml @@ -181,6 +181,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_epoll_${os.detected.arch}.so; osname=Linux; processor=${os.detected.arch},*</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-epoll</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> @@ -331,6 +332,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_epoll_aarch_64.so; osname=Linux; processor=aarch_64,*</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-epoll</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> diff --git a/transport-native-kqueue/pom.xml b/transport-native-kqueue/pom.xml index 91f5ee07d8b..297a0a10cc9 100644 --- a/transport-native-kqueue/pom.xml +++ b/transport-native-kqueue/pom.xml @@ -110,6 +110,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_kqueue_${os.detected.arch}.jnilib; osname=MacOSX; processor=${os.detected.arch}</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-kqueue</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> @@ -216,6 +217,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_kqueue_aarch_64.jnilib; osname=MacOSX; processor=aarch_64</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-kqueue</Fragment-Host> <Automatic-Module-Name>${javaModuleName}</Automatic-Module-Name> </manifestEntries> <index>true</index> @@ -320,6 +322,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_kqueue_${os.detected.arch}.jnilib; osname=OpenBSD; processor=${os.detected.arch}</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-kqueue</Fragment-Host> </manifestEntries> <index>true</index> <manifestFile>${project.build.outputDirectory}/META-INF/MANIFEST.MF</manifestFile> @@ -423,6 +426,7 @@ </manifest> <manifestEntries> <Bundle-NativeCode>META-INF/native/libnetty_transport_native_kqueue_${os.detected.arch}.jnilib; osname=FreeBSD; processor=${os.detected.arch}</Bundle-NativeCode> + <Fragment-Host>io.netty.transport-classes-kqueue</Fragment-Host> </manifestEntries> <index>true</index> <manifestFile>${project.build.outputDirectory}/META-INF/MANIFEST.MF</manifestFile>
null
val
test
"2022-01-19T09:06:31"
"2022-01-18T13:41:48Z"
MrEasy
val
netty/netty/10801_12066
netty/netty
netty/netty/10801
netty/netty/12066
[ "keyword_pr_to_issue" ]
ac17a516d443f5087ac1bf0e51ef748d8314d0dc
77ed04330487e7b7f319c41dbe8dd96568d46ba2
[ "I will see this for sure.", "> I will see this for sure.\r\n\r\nsorry, is there any conclusion?", "HTTP/2 has chunked data frames already. Do you want some sort of automatic chunking of a big `ByteBuf` or `RandomFileAccess`?", "Yes, just something like the `ChunkedInput` for writing a large file.", "@Ech0Fan It's on my TO-DO list. But super busy these days so I'll tackle this a little later.", "Hi @normanmaurer \r\nAs this ticket is still open, do you confirm there's currently no way to use `ChunkedInput` with HTTP/2 in Netty?\r\nCheers", "lol, I seriously forgot this. I will fire a POC PR by this weekend.", "Actually, I suspect my issue is that I have a `ChunkedInput<ByteBuf>` but it might work if I have a `ChunkedInput<HttpContent>`.", "Confirmed. `ChunkedInput<HttpContent>` works nicely with `HttpToHttp2ConnectionHandler`. My issue was that `HttpToHttp2ConnectionHandler` lets anything else than `HttpMessage` and `HttpContent` pass through instead of generating DataFrames. I just had to change my `ChunkedInput<ByteBuf>` and decorate my chunks with `DefaultHttpContent`.", "There we go, there are three HTTP/2 implementation (if I'm correct). Should I focus on `Http2FrameCodec`?\r\n\r\n@normanmaurer Let me know your opinion. ", "@slandelle this should be fixed now... \r\nThanks to @hyperxpro " ]
[ "```suggestion\r\npublic final class Http2DataChunkedInput implements ChunkedInput<Http2DataFrame> {\r\n```", "```suggestion\r\n return false;\r\n```", "We can optimize this a bit imho.\r\n\r\n```suggestion\r\n public Http2DataFrame readChunk(ByteBufAllocator allocator) throws Exception {\r\n if (endStreamSent) {\r\n return null;\r\n }\r\n if (input.isEndOfInput()) {\r\n endStreamSent = true;\r\n return new DefaultHttp2DataFrame(true).stream(stream);\r\n }\r\n ByteBuf buf = input.readChunk(allocator);\r\n if (buf == null) {\r\n return null;\r\n }\r\n final Http2DataFrame dataFrame = new DefaultHttp2DataFrame(buf, input.isEndOfInput()).stream(stream);\r\n if (dataFrame.isEndStream()) {\r\n endStreamSent = true;\r\n }\r\n return dataFrame;\r\n }\r\n```", "don't use wildcards ", "```suggestion\r\n @Override\r\n public Http2DataFrame readChunk(ByteBufAllocator allocator) throws Exception {\r\n```", "Honestly I think this is not something we need to assert.`\r\n```suggestion\r\n```" ]
"2022-02-02T06:45:57Z"
[]
Considering supporting http2 chunked input
In Http1, we can use a `ChunkedWriteHandler` to write a `HttpChunkedInput`, but we can only use the `Http2DataWriter#writeData(xx)` to write a `ByteBuf` instance in Http2. Considering supporting `writeChunked(xx)` or a unity class (probably implemetated by `Http2RemoteFlowController#isWritable()` and `Http2RemoteFlowController$Listener#writabilityChanged(stream)`) for writing a large data stream asynchronously which would cause OOM?
[]
[ "codec-http2/src/main/java/io/netty/handler/codec/http2/Http2DataChunkedInput.java", "example/src/main/java/io/netty/example/http2/file/Http2StaticFileServer.java", "example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerHandler.java", "example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerInitializer.java" ]
[ "codec-http2/src/test/java/io/netty/handler/codec/http2/Http2DataChunkedInputTest.java" ]
diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2DataChunkedInput.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2DataChunkedInput.java new file mode 100644 index 00000000000..ac382ffde63 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2DataChunkedInput.java @@ -0,0 +1,116 @@ +/* + * Copyright 2022 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.stream.ChunkedInput; +import io.netty.util.internal.ObjectUtil; + +/** + * A {@link ChunkedInput} that fetches data chunk by chunk for use with HTTP/2 Data Frames. + * <p> + * Each chunk from the input data will be wrapped within a {@link Http2DataFrame}. At the end of the input data, + * {@link Http2DataFrame#isEndStream()} will be set to true and will be written. + * <p> + * <p> + * <pre> + * + * public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + * if (msg instanceof Http2HeadersFrame) { + * Http2HeadersFrame http2HeadersFrame = (Http2HeadersFrame) msg; + * + * Http2HeadersFrame response = new DefaultHttp2HeadersFrame(new DefaultHttp2Headers().status("200")); + * response.stream(http2HeadersFrame.stream()); + * ctx.write(response); + * + * ChannelFuture sendFileFuture = ctx.writeAndFlush(new Http2DataChunkedInput( + * new ChunkedFile(new File(("/home/meow/cats.mp4"))), http2HeadersFrame.stream())); + * } + * } + * </pre> + */ +public final class Http2DataChunkedInput implements ChunkedInput<Http2DataFrame> { + + private final ChunkedInput<ByteBuf> input; + private final Http2FrameStream stream; + private boolean endStreamSent; + + /** + * Creates a new instance using the specified input. + * + * @param input {@link ChunkedInput} containing data to write + * @param stream {@link Http2FrameStream} holding stream info + */ + public Http2DataChunkedInput(ChunkedInput<ByteBuf> input, Http2FrameStream stream) { + this.input = ObjectUtil.checkNotNull(input, "input"); + this.stream = ObjectUtil.checkNotNull(stream, "stream"); + } + + @Override + public boolean isEndOfInput() throws Exception { + if (input.isEndOfInput()) { + // Only end of input after last HTTP chunk has been sent + return endStreamSent; + } + return false; + } + + @Override + public void close() throws Exception { + input.close(); + } + + @Deprecated + @Override + public Http2DataFrame readChunk(ChannelHandlerContext ctx) throws Exception { + return readChunk(ctx.alloc()); + } + + @Override + public Http2DataFrame readChunk(ByteBufAllocator allocator) throws Exception { + if (endStreamSent) { + return null; + } + + if (input.isEndOfInput()) { + endStreamSent = true; + return new DefaultHttp2DataFrame(true).stream(stream); + } + + ByteBuf buf = input.readChunk(allocator); + if (buf == null) { + return null; + } + + final Http2DataFrame dataFrame = new DefaultHttp2DataFrame(buf, input.isEndOfInput()).stream(stream); + if (dataFrame.isEndStream()) { + endStreamSent = true; + } + + return dataFrame; + } + + @Override + public long length() { + return input.length(); + } + + @Override + public long progress() { + return input.progress(); + } +} diff --git a/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServer.java b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServer.java new file mode 100644 index 00000000000..03a04b60a6a --- /dev/null +++ b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServer.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.example.http2.file; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.Http2SecurityUtil; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.ApplicationProtocolConfig; +import io.netty.handler.ssl.ApplicationProtocolNames; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.ssl.SupportedCipherSuiteFilter; +import io.netty.handler.ssl.util.SelfSignedCertificate; + +public final class Http2StaticFileServer { + + private static final int PORT = Integer.parseInt(System.getProperty("port", "8443")); + + public static void main(String[] args) throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + SslContext sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()) + .sslProvider(SslProvider.JDK) + .ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig(new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + // NO_ADVERTISE is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + // ACCEPT is currently the only mode supported by both OpenSsl and JDK providers. + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + ApplicationProtocolNames.HTTP_2, + ApplicationProtocolNames.HTTP_1_1)) + .build(); + + EventLoopGroup bossGroup = new NioEventLoopGroup(2); + EventLoopGroup workerGroup = new NioEventLoopGroup(4); + try { + ServerBootstrap b = new ServerBootstrap(); + b.group(bossGroup, workerGroup) + .channel(NioServerSocketChannel.class) + .handler(new LoggingHandler(LogLevel.INFO)) + .childHandler(new Http2StaticFileServerInitializer(sslCtx)); + + Channel ch = b.bind(PORT).sync().channel(); + + System.out.println("Open your web browser and navigate to https://127.0.0.1:" + PORT + '/'); + + ch.closeFuture().sync(); + } finally { + bossGroup.shutdownGracefully(); + workerGroup.shutdownGracefully(); + } + } +} diff --git a/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerHandler.java b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerHandler.java new file mode 100644 index 00000000000..2f4ca8ae587 --- /dev/null +++ b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerHandler.java @@ -0,0 +1,380 @@ +/* + * Copyright 2022 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.example.http2.file; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelProgressiveFuture; +import io.netty.channel.ChannelProgressiveFutureListener; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataChunkedInput; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2FrameStream; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2HeadersFrame; +import io.netty.handler.stream.ChunkedFile; +import io.netty.util.CharsetUtil; +import io.netty.util.internal.SystemPropertyUtil; + +import javax.activation.MimetypesFileTypeMap; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.RandomAccessFile; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.Locale; +import java.util.TimeZone; +import java.util.regex.Pattern; + +import static io.netty.handler.codec.http.HttpMethod.GET; +import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN; +import static io.netty.handler.codec.http.HttpResponseStatus.FOUND; +import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; +import static io.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED; +import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND; +import static io.netty.handler.codec.http.HttpResponseStatus.NOT_MODIFIED; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; + +/** + * A simple handler that serves incoming HTTP requests to send their respective + * HTTP responses. It also implements {@code 'If-Modified-Since'} header to + * take advantage of browser cache, as described in + * <a href="https://tools.ietf.org/html/rfc2616#section-14.25">RFC 2616</a>. + * + * <h3>How Browser Caching Works</h3> + * <p> + * Web browser caching works with HTTP headers as illustrated by the following + * sample: + * <ol> + * <li>Request #1 returns the content of {@code /file1.txt}.</li> + * <li>Contents of {@code /file1.txt} is cached by the browser.</li> + * <li>Request #2 for {@code /file1.txt} does not return the contents of the + * file again. Rather, a 304 Not Modified is returned. This tells the + * browser to use the contents stored in its cache.</li> + * <li>The server knows the file has not been modified because the + * {@code If-Modified-Since} date is the same as the file's last + * modified date.</li> + * </ol> + * + * <pre> + * Request #1 Headers + * =================== + * GET /file1.txt HTTP/1.1 + * + * Response #1 Headers + * =================== + * HTTP/1.1 200 OK + * Date: Tue, 01 Mar 2011 22:44:26 GMT + * Last-Modified: Wed, 30 Jun 2010 21:36:48 GMT + * Expires: Tue, 01 Mar 2012 22:44:26 GMT + * Cache-Control: private, max-age=31536000 + * + * Request #2 Headers + * =================== + * GET /file1.txt HTTP/1.1 + * If-Modified-Since: Wed, 30 Jun 2010 21:36:48 GMT + * + * Response #2 Headers + * =================== + * HTTP/1.1 304 Not Modified + * Date: Tue, 01 Mar 2011 22:44:28 GMT + * + * </pre> + */ +public class Http2StaticFileServerHandler extends ChannelDuplexHandler { + + public static final String HTTP_DATE_FORMAT = "EEE, dd MMM yyyy HH:mm:ss zzz"; + public static final String HTTP_DATE_GMT_TIMEZONE = "GMT"; + public static final int HTTP_CACHE_SECONDS = 60; + + private Http2FrameStream stream; + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof Http2HeadersFrame) { + Http2HeadersFrame headersFrame = (Http2HeadersFrame) msg; + this.stream = headersFrame.stream(); + + if (!GET.toString().equals(headersFrame.headers().method().toString())) { + sendError(ctx, METHOD_NOT_ALLOWED); + return; + } + + final String uri = headersFrame.headers().path().toString(); + final String path = sanitizeUri(uri); + if (path == null) { + sendError(ctx, FORBIDDEN); + return; + } + + File file = new File(path); + if (file.isHidden() || !file.exists()) { + sendError(ctx, NOT_FOUND); + return; + } + + if (file.isDirectory()) { + if (uri.endsWith("/")) { + sendListing(ctx, file, uri); + } else { + sendRedirect(ctx, uri + '/'); + } + return; + } + + if (!file.isFile()) { + sendError(ctx, FORBIDDEN); + return; + } + + // Cache Validation + CharSequence ifModifiedSince = headersFrame.headers().get(HttpHeaderNames.IF_MODIFIED_SINCE); + if (ifModifiedSince != null && !ifModifiedSince.toString().isEmpty()) { + SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); + Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince.toString()); + + // Only compare up to the second because the datetime format we send to the client + // does not have milliseconds + long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000; + long fileLastModifiedSeconds = file.lastModified() / 1000; + if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) { + sendNotModified(ctx); + return; + } + } + + RandomAccessFile raf; + try { + raf = new RandomAccessFile(file, "r"); + } catch (FileNotFoundException ignore) { + sendError(ctx, NOT_FOUND); + return; + } + long fileLength = raf.length(); + + Http2Headers headers = new DefaultHttp2Headers(); + headers.status("200"); + headers.setLong(HttpHeaderNames.CONTENT_LENGTH, fileLength); + + setContentTypeHeader(headers, file); + setDateAndCacheHeaders(headers, file); + + // Write the initial line and the header. + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(headers).stream(stream)); + + // Write the content. + ChannelFuture sendFileFuture; + sendFileFuture = ctx.writeAndFlush(new Http2DataChunkedInput( + new ChunkedFile(raf, 0, fileLength, 8192), stream), ctx.newProgressivePromise()); + + sendFileFuture.addListener(new ChannelProgressiveFutureListener() { + @Override + public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) { + if (total < 0) { // total unknown + System.err.println(future.channel() + " Transfer progress: " + progress); + } else { + System.err.println(future.channel() + " Transfer progress: " + progress + " / " + total); + } + } + + @Override + public void operationComplete(ChannelProgressiveFuture future) { + System.err.println(future.channel() + " Transfer complete."); + } + }); + } else { + // Unsupported message type + System.out.println("Unsupported message type: " + msg); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + cause.printStackTrace(); + if (ctx.channel().isActive()) { + sendError(ctx, INTERNAL_SERVER_ERROR); + } + } + + private static final Pattern INSECURE_URI = Pattern.compile(".*[<>&\"].*"); + + private static String sanitizeUri(String uri) throws UnsupportedEncodingException { + // Decode the path. + uri = URLDecoder.decode(uri, "UTF-8"); + + if (uri.isEmpty() || uri.charAt(0) != '/') { + return null; + } + + // Convert file separators. + uri = uri.replace('/', File.separatorChar); + + // Simplistic dumb security check. + // You will have to do something serious in the production environment. + if (uri.contains(File.separator + '.') || + uri.contains('.' + File.separator) || + uri.charAt(0) == '.' || uri.charAt(uri.length() - 1) == '.' || + INSECURE_URI.matcher(uri).matches()) { + return null; + } + + // Convert to absolute path. + return SystemPropertyUtil.get("user.dir") + File.separator + uri; + } + + private static final Pattern ALLOWED_FILE_NAME = Pattern.compile("[^-\\._]?[^<>&\\\"]*"); + + private void sendListing(ChannelHandlerContext ctx, File dir, String dirPath) { + StringBuilder buf = new StringBuilder() + .append("<!DOCTYPE html>\r\n") + .append("<html><head><meta charset='utf-8' /><title>") + .append("Listing of: ") + .append(dirPath) + .append("</title></head><body>\r\n") + + .append("<h3>Listing of: ") + .append(dirPath) + .append("</h3>\r\n") + + .append("<ul>") + .append("<li><a href=\"../\">..</a></li>\r\n"); + + File[] files = dir.listFiles(); + if (files != null) { + for (File f : files) { + if (f.isHidden() || !f.canRead()) { + continue; + } + + String name = f.getName(); + if (!ALLOWED_FILE_NAME.matcher(name).matches()) { + continue; + } + + buf.append("<li><a href=\"") + .append(name) + .append("\">") + .append(name) + .append("</a></li>\r\n"); + } + } + + buf.append("</ul></body></html>\r\n"); + + ByteBuf buffer = ctx.alloc().buffer(buf.length()); + buffer.writeCharSequence(buf.toString(), CharsetUtil.UTF_8); + + Http2Headers headers = new DefaultHttp2Headers(); + headers.status(OK.toString()); + headers.add(HttpHeaderNames.CONTENT_TYPE, "text/html; charset=UTF-8"); + + ctx.write(new DefaultHttp2HeadersFrame(headers).stream(stream)); + ctx.writeAndFlush(new DefaultHttp2DataFrame(buffer, true).stream(stream)); + } + + private void sendRedirect(ChannelHandlerContext ctx, String newUri) { + Http2Headers headers = new DefaultHttp2Headers(); + headers.status(FOUND.toString()); + headers.add(HttpHeaderNames.LOCATION, newUri); + + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(headers, true).stream(stream)); + } + + private void sendError(ChannelHandlerContext ctx, HttpResponseStatus status) { + Http2Headers headers = new DefaultHttp2Headers(); + headers.status(status.toString()); + headers.add(HttpHeaderNames.CONTENT_TYPE, "text/plain; charset=UTF-8"); + + Http2HeadersFrame headersFrame = new DefaultHttp2HeadersFrame(headers); + headersFrame.stream(stream); + + Http2DataFrame dataFrame = new DefaultHttp2DataFrame( + Unpooled.copiedBuffer("Failure: " + status + "\r\n", CharsetUtil.UTF_8), true); + dataFrame.stream(stream); + + ctx.write(headersFrame); + ctx.writeAndFlush(dataFrame); + } + + /** + * When file timestamp is the same as what the browser is sending up, send a "304 Not Modified" + * + * @param ctx Context + */ + private void sendNotModified(ChannelHandlerContext ctx) { + Http2Headers headers = new DefaultHttp2Headers(); + headers.status(NOT_MODIFIED.toString()); + setDateHeader(headers); + + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(headers, true).stream(stream)); + } + + /** + * Sets the Date header for the HTTP response + * + * @param headers Http2 Headers + */ + private static void setDateHeader(Http2Headers headers) { + SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); + dateFormatter.setTimeZone(TimeZone.getTimeZone(HTTP_DATE_GMT_TIMEZONE)); + + Calendar time = new GregorianCalendar(); + headers.set(HttpHeaderNames.DATE, dateFormatter.format(time.getTime())); + } + + /** + * Sets the Date and Cache headers for the HTTP Response + * + * @param headers Http2 Headers + * @param fileToCache file to extract content type + */ + private static void setDateAndCacheHeaders(Http2Headers headers, File fileToCache) { + SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); + dateFormatter.setTimeZone(TimeZone.getTimeZone(HTTP_DATE_GMT_TIMEZONE)); + + // Date header + Calendar time = new GregorianCalendar(); + headers.set(HttpHeaderNames.DATE, dateFormatter.format(time.getTime())); + + // Add cache headers + time.add(Calendar.SECOND, HTTP_CACHE_SECONDS); + headers.set(HttpHeaderNames.EXPIRES, dateFormatter.format(time.getTime())); + headers.set(HttpHeaderNames.CACHE_CONTROL, "private, max-age=" + HTTP_CACHE_SECONDS); + headers.set(HttpHeaderNames.LAST_MODIFIED, dateFormatter.format(new Date(fileToCache.lastModified()))); + } + + /** + * Sets the content type header for the HTTP Response + * + * @param headers Http2 Headers + * @param file file to extract content type + */ + private static void setContentTypeHeader(Http2Headers headers, File file) { + MimetypesFileTypeMap mimeTypesMap = new MimetypesFileTypeMap(); + headers.set(HttpHeaderNames.CONTENT_TYPE, mimeTypesMap.getContentType(file.getPath())); + } +} diff --git a/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerInitializer.java b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerInitializer.java new file mode 100644 index 00000000000..c52215eda4d --- /dev/null +++ b/example/src/main/java/io/netty/example/http2/file/Http2StaticFileServerInitializer.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.example.http2.file; + +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.stream.ChunkedWriteHandler; + +public class Http2StaticFileServerInitializer extends ChannelInitializer<SocketChannel> { + + private final SslContext sslCtx; + + public Http2StaticFileServerInitializer(SslContext sslCtx) { + this.sslCtx = sslCtx; + } + + @Override + public void initChannel(SocketChannel ch) { + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + pipeline.addLast(Http2FrameCodecBuilder.forServer().build()); + pipeline.addLast(new ChunkedWriteHandler()); + pipeline.addLast(new Http2StaticFileServerHandler()); + } +}
diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2DataChunkedInputTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2DataChunkedInputTest.java new file mode 100644 index 00000000000..26ea3fd05d9 --- /dev/null +++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2DataChunkedInputTest.java @@ -0,0 +1,177 @@ +/* + * Copyright 2022 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.stream.ChunkedFile; +import io.netty.handler.stream.ChunkedInput; +import io.netty.handler.stream.ChunkedNioFile; +import io.netty.handler.stream.ChunkedNioStream; +import io.netty.handler.stream.ChunkedStream; +import io.netty.handler.stream.ChunkedWriteHandler; +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class Http2DataChunkedInputTest { + private static final byte[] BYTES = new byte[1024 * 64]; + private static final File TMP; + + // Just a dummy interface implementation of stream + private static final Http2FrameStream STREAM = new Http2FrameStream() { + @Override + public int id() { + return 1; + } + + @Override + public Http2Stream.State state() { + return Http2Stream.State.OPEN; + } + }; + + static { + for (int i = 0; i < BYTES.length; i++) { + BYTES[i] = (byte) i; + } + + FileOutputStream out = null; + try { + TMP = PlatformDependent.createTempFile("netty-chunk-", ".tmp", null); + TMP.deleteOnExit(); + out = new FileOutputStream(TMP); + out.write(BYTES); + out.flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (out != null) { + try { + out.close(); + } catch (IOException e) { + // ignore + } + } + } + } + + @Test + public void testChunkedStream() { + check(new Http2DataChunkedInput(new ChunkedStream(new ByteArrayInputStream(BYTES)), STREAM)); + } + + @Test + public void testChunkedNioStream() { + check(new Http2DataChunkedInput(new ChunkedNioStream(Channels.newChannel(new ByteArrayInputStream(BYTES))), + STREAM)); + } + + @Test + public void testChunkedFile() throws IOException { + check(new Http2DataChunkedInput(new ChunkedFile(TMP), STREAM)); + } + + @Test + public void testChunkedNioFile() throws IOException { + check(new Http2DataChunkedInput(new ChunkedNioFile(TMP), STREAM)); + } + + @Test + public void testWrappedReturnNull() throws Exception { + Http2DataChunkedInput input = new Http2DataChunkedInput(new ChunkedInput<ByteBuf>() { + + @Override + public boolean isEndOfInput() throws Exception { + return false; + } + + @Override + public void close() throws Exception { + // NOOP + } + + @Override + public ByteBuf readChunk(ChannelHandlerContext ctx) throws Exception { + return null; + } + + @Override + public ByteBuf readChunk(ByteBufAllocator allocator) throws Exception { + return null; + } + + @Override + public long length() { + return 0; + } + + @Override + public long progress() { + return 0; + } + }, STREAM); + assertNull(input.readChunk(ByteBufAllocator.DEFAULT)); + } + + private static void check(ChunkedInput<?>... inputs) { + EmbeddedChannel ch = new EmbeddedChannel(new ChunkedWriteHandler()); + + for (ChunkedInput<?> input : inputs) { + ch.writeOutbound(input); + } + + assertTrue(ch.finish()); + + int i = 0; + int read = 0; + Http2DataFrame http2DataFrame = null; + for (;;) { + Http2DataFrame dataFrame = ch.readOutbound(); + if (dataFrame == null) { + break; + } + + ByteBuf buffer = dataFrame.content(); + while (buffer.isReadable()) { + assertEquals(BYTES[i++], buffer.readByte()); + read++; + if (i == BYTES.length) { + i = 0; + } + } + buffer.release(); + + // Save last chunk + http2DataFrame = dataFrame; + } + + assertEquals(BYTES.length * inputs.length, read); + assertNotNull(http2DataFrame); + assertTrue(http2DataFrame.isEndStream(), "Last chunk must be Http2DataFrame#isEndStream() set to true"); + } +}
train
test
"2022-02-01T15:19:47"
"2020-11-16T08:53:51Z"
Ech0Fan
val
netty/netty/12071_12079
netty/netty
netty/netty/12071
netty/netty/12079
[ "keyword_pr_to_issue" ]
fb4a3206ea49364b5a251029ea9618cd61549380
2ab9d653f6199c1438653e1d28c9948b7b9a541c
[ "@normanmaurer I created the Netty Discord server a few days back. Should we make it public so Netty people can engage with each other?", "I am just a (proud) Netty committer but I love the @hyperxpro idea 👍 \r\nI saw many people struggling to find the right place to share their experience/hints...and with Netty 5 I believe this is going to happen more frequently ", "@hyperxpro sure lets do it. ", "Here we go: https://discord.gg/q4aQ2XjaCa\r\n\r\nLet's spread the word and see how things go." ]
[]
"2022-02-07T08:42:08Z"
[]
Clarify how to engage with the community
_Note: this is kind of a meta-issue, as it is not related to the code_ I have been working with Netty 4 for the past two months and it has been a great experience. At times I would have liked to talk to someone about design choices and general questions about the library, but I wasn't able to find a place where Netty users come together (e.g. an IRC channel, a Discord server, etc). Netty's website brought me to StackOverflow, but my impression was that most questions go unanswered. The website's link to IRC is broken, and when I checked out #netty on liberachat I only saw 3 users in the channel. This leaves me wondering: where does the Netty community hang around? It would be helpful to clarify this somewhere visible, because right now the first impression is that the project is not as active as it used to be. By the way, in case you are curious, I implemented a MySQL proxy from scratch (i.e. it receives MySQL connections and forwards them to arbitrary backends). I have the feeling that the library has helped me follow good design practices and that it has provided me a solid foundation to build upon, so thanks for that 😉 !
[ "README.md" ]
[ "README.md" ]
[]
diff --git a/README.md b/README.md index 887e3c3ce13..0bf67c3c409 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ Netty is an asynchronous event-driven network application framework for rapid de * [Downloads](https://netty.io/downloads.html) * [Documentation](https://netty.io/wiki/) * [@netty_project](https://twitter.com/netty_project) +* [Official Discord server](https://discord.gg/q4aQ2XjaCa) ## How to build
null
train
test
"2022-02-04T17:09:25"
"2022-02-03T14:58:46Z"
aochagavia
val
netty/netty/12080_12091
netty/netty
netty/netty/12080
netty/netty/12091
[ "keyword_pr_to_issue" ]
d1a8cd2dec422f3b8fbfd186ae1ec29e680ffd4e
86603872776e3ff5a60dea3c104358e486eed588
[ "@chrisvest WDYT ?", "@yawkat @normanmaurer In this PR, we would just be adding this protected do-nothing method, and call it in the right places, right? And then expect down-stream clients to use e.g. `ResourceLeakDetectorFactory` to install custom detectors that override this method? If that's the case, I think it sounds fine, and would be happy to review a PR." ]
[ "You don't need to allocate a single huge array. You can instead allocate a large volume of smaller arrays, until the `GarbageCollectorMXBean`s tell you a GC cycle has occurred.", "Ok I've just tried this but can't get this to work reliably either.\r\n\r\nRelying on `GarbageCollectorMXBean` fails for two reasons: \"scavenge\" GC (young GC I assume?) has its own MX Bean, but might not collect all references. So that bean might show a young GC has happened, but references wouldn't be processed yet, because they're only done in old GC. I've also tried excluding beans with the name \"scavenge\", which made it more reliable, but still sometimes failed. I think this might be because references can be processed concurrently: https://developers.redhat.com/articles/2021/05/20/shenandoah-garbage-collection-openjdk-16-concurrent-reference-processing – not sure if this applies to non-shenandoah, but it looks like GC can sometimes do reference processing after the GC has already nominally completed, so we can't rely on the MX bean to tell us when references have been processed. It's also \"fixed\" by adding a `Thread.sleep(10)` just after the enqueue check, which corroborates this.\r\n\r\n```java\r\n private static long getCollectionCount() {\r\n long count = 0;\r\n for (GarbageCollectorMXBean bean : garbageCollectorMxBeans) {\r\n if (bean.getName().equals(\"scavenge\")) {\r\n continue;\r\n }\r\n count += bean.getCollectionCount();\r\n }\r\n return count;\r\n }\r\n\r\n private static void triggerGc() {\r\n Assumptions.assumeFalse(garbageCollectorMxBeans.isEmpty(), \"GarbageCollectorMXBean not available\");\r\n long collectionCountPrevious = getCollectionCount();\r\n\r\n System.gc();\r\n\r\n @SuppressWarnings(\"MismatchedQueryAndUpdateOfCollection\")\r\n List<Object> storage = new ArrayList<Object>();\r\n try {\r\n while (getCollectionCount() == collectionCountPrevious) {\r\n storage.add(new byte[1024 * 1024]); // 1M\r\n }\r\n } catch (OutOfMemoryError ignored) {\r\n }\r\n storage.clear();\r\n\r\n try {\r\n Thread.sleep(10);\r\n } catch (InterruptedException e) {\r\n }\r\n }\r\n```\r\n\r\nAnother approach I tried is to use a second `ReferenceQueue` with a reference that is definitely unreachable, and simply allocating until that reference has been enqueued. However even this approach was not 100% reliable (maybe ~80%?). I think this might also be due to concurrent reference processing, where there is a race condition between the GC thread enqueuing the reference, and the test thread checking for the reference's presence. The same \"fix\" of adding a 10ms delay works here too.\r\n\r\n```java\r\n private static void triggerGc() {\r\n ReferenceQueue<Object> queue = new ReferenceQueue<Object>();\r\n Object obj = new Object();\r\n WeakReference<Object> ref = new WeakReference<Object>(obj, queue);\r\n obj = null;\r\n\r\n System.gc();\r\n\r\n @SuppressWarnings(\"MismatchedQueryAndUpdateOfCollection\")\r\n List<Object> storage = new ArrayList<Object>();\r\n try {\r\n while (queue.poll() == null) {\r\n storage.add(new byte[1024 * 1024]); // 1M\r\n }\r\n } catch (OutOfMemoryError ignored) {\r\n }\r\n ref.hashCode();\r\n storage.clear();\r\n\r\n try {\r\n TimeUnit.MILLISECONDS.sleep(10);\r\n } catch (InterruptedException ignored) {\r\n }\r\n }\r\n```\r\n\r\nHonestly neither solution seems great. The latter is a bit better imo, because it checks that reference processing actually happened, but it's still far from ideal. What do you think?" ]
"2022-02-10T08:56:23Z"
[]
Allow attaching additional metadata to ResourceLeakTrackers
It would be useful to add an API to `ResourceLeakDetector` that allows subclasses to supply custom metadata on `DefaultResourceLeak` creation. This would make it easier to associate detected leaks with parameters of the code that caused the leak, e.g. the test case involved (or in my case, the fuzzer input). Right now there is no way to hook into the `ResourceLeakTracker` creation at all, so the only workaround seems to be a custom allocator that `touch`es any buffer on creation, but that doesn't work with separately tracked objects. IMO a sensible API to implement that would be to add a `protected Object initialHint() { return null; }` method that is called for the "creation" `TraceRecord`. This hint would then be reported alongside the "Created at:" stack trace. Another alternative is to make `track(T obj)` not be final, so that subclasses can `touch` the record with their own information. I can make a PR for this, if you think this is a sensible thing to support. ### Netty version 4.1.73 ### JVM version (e.g. `java -version`) openjdk version "17.0.1" 2021-10-19 OpenJDK Runtime Environment Temurin-17.0.1+12 (build 17.0.1+12) OpenJDK 64-Bit Server VM Temurin-17.0.1+12 (build 17.0.1+12, mixed mode, sharing) ### OS version (e.g. `uname -a`) Linux yawkat-oracle 5.13.0-28-generic #31-Ubuntu SMP Thu Jan 13 17:41:06 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
[ "common/src/main/java/io/netty/util/ResourceLeakDetector.java" ]
[ "common/src/main/java/io/netty/util/ResourceLeakDetector.java" ]
[ "common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java" ]
diff --git a/common/src/main/java/io/netty/util/ResourceLeakDetector.java b/common/src/main/java/io/netty/util/ResourceLeakDetector.java index be6845683d9..0408e39975a 100644 --- a/common/src/main/java/io/netty/util/ResourceLeakDetector.java +++ b/common/src/main/java/io/netty/util/ResourceLeakDetector.java @@ -255,12 +255,12 @@ private DefaultResourceLeak track0(T obj) { if (level.ordinal() < Level.PARANOID.ordinal()) { if ((PlatformDependent.threadLocalRandom().nextInt(samplingInterval)) == 0) { reportLeak(); - return new DefaultResourceLeak(obj, refQueue, allLeaks); + return new DefaultResourceLeak(obj, refQueue, allLeaks, getInitialHint(resourceType)); } return null; } reportLeak(); - return new DefaultResourceLeak(obj, refQueue, allLeaks); + return new DefaultResourceLeak(obj, refQueue, allLeaks, getInitialHint(resourceType)); } private void clearRefQueue() { @@ -300,7 +300,7 @@ private void reportLeak() { continue; } - String records = ref.toString(); + String records = ref.getReportAndClearRecords(); if (reportedLeaks.add(records)) { if (records.isEmpty()) { reportUntracedLeak(resourceType); @@ -342,6 +342,15 @@ protected void reportUntracedLeak(String resourceType) { protected void reportInstancesLeak(String resourceType) { } + /** + * Create a hint object to be attached to an object tracked by this record. Similar to the additional information + * supplied to {@link ResourceLeakTracker#record(Object)}, will be printed alongside the stack trace of the + * creation of the resource. + */ + protected Object getInitialHint(String resourceType) { + return null; + } + @SuppressWarnings("deprecation") private static final class DefaultResourceLeak<T> extends WeakReference<Object> implements ResourceLeakTracker<T>, ResourceLeak { @@ -367,7 +376,8 @@ private static final class DefaultResourceLeak<T> DefaultResourceLeak( Object referent, ReferenceQueue<Object> refQueue, - Set<DefaultResourceLeak<?>> allLeaks) { + Set<DefaultResourceLeak<?>> allLeaks, + Object initialHint) { super(referent, refQueue); assert referent != null; @@ -378,7 +388,8 @@ private static final class DefaultResourceLeak<T> trackedHash = System.identityHashCode(referent); allLeaks.add(this); // Create a new Record so we always have the creation stacktrace included. - headUpdater.set(this, new TraceRecord(TraceRecord.BOTTOM)); + headUpdater.set(this, initialHint == null ? + new TraceRecord(TraceRecord.BOTTOM) : new TraceRecord(TraceRecord.BOTTOM, initialHint)); this.allLeaks = allLeaks; } @@ -508,7 +519,16 @@ private static void reachabilityFence0(Object ref) { @Override public String toString() { + TraceRecord oldHead = headUpdater.get(this); + return generateReport(oldHead); + } + + String getReportAndClearRecords() { TraceRecord oldHead = headUpdater.getAndSet(this, null); + return generateReport(oldHead); + } + + private String generateReport(TraceRecord oldHead) { if (oldHead == null) { // Already closed return EMPTY_STRING;
diff --git a/common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java b/common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java index 2a55da2ff78..3947d60f514 100644 --- a/common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java +++ b/common/src/test/java/io/netty/util/ResourceLeakDetectorTest.java @@ -20,12 +20,18 @@ import java.util.ArrayDeque; import java.util.Queue; +import java.util.UUID; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.assertj.core.api.Assertions.assertThat; + public class ResourceLeakDetectorTest { + @SuppressWarnings("unused") + private static volatile int sink; @Test @Timeout(value = 60000, unit = TimeUnit.MILLISECONDS) @@ -98,6 +104,34 @@ private boolean closeResources(boolean checkClosed) { assertNoErrors(error); } + @Timeout(10) + @Test + public void testLeakSetupHints() throws Throwable { + DefaultResource.detectorWithSetupHint.initialise(); + leakResource(); + + do { + // Trigger GC. + System.gc(); + // Track another resource to trigger refqueue visiting. + Resource resource2 = new DefaultResource(); + DefaultResource.detectorWithSetupHint.track(resource2).close(resource2); + // Give the GC something to work on. + for (int i = 0; i < 1000; i++) { + sink = System.identityHashCode(new byte[10000]); + } + } while (DefaultResource.detectorWithSetupHint.getLeaksFound() < 1 && !Thread.interrupted()); + + assertThat(DefaultResource.detectorWithSetupHint.getLeaksFound()).isOne(); + DefaultResource.detectorWithSetupHint.assertNoErrors(); + } + + private static void leakResource() { + Resource resource = new DefaultResource(); + // We'll never close this ResourceLeakTracker. + DefaultResource.detectorWithSetupHint.track(resource); + } + // Mimic the way how we implement our classes that should help with leak detection private static final class LeakAwareResource implements Resource { private final Resource resource; @@ -123,6 +157,8 @@ private static final class DefaultResource implements Resource { // Sample every allocation static final TestResourceLeakDetector<Resource> detector = new TestResourceLeakDetector<Resource>( Resource.class, 1, Integer.MAX_VALUE); + static final CreationRecordLeakDetector<Resource> detectorWithSetupHint = + new CreationRecordLeakDetector<Resource>(Resource.class, 1); @Override public boolean close() { @@ -172,4 +208,56 @@ void assertNoErrors() throws Throwable { ResourceLeakDetectorTest.assertNoErrors(error); } } + + private static final class CreationRecordLeakDetector<T> extends ResourceLeakDetector<T> { + private String canaryString; + + private final AtomicReference<Throwable> error = new AtomicReference<Throwable>(); + private final AtomicInteger leaksFound = new AtomicInteger(0); + + CreationRecordLeakDetector(Class<?> resourceType, int samplingInterval) { + super(resourceType, samplingInterval); + } + + public void initialise() { + canaryString = "creation-canary-" + UUID.randomUUID(); + leaksFound.set(0); + } + + @Override + protected boolean needReport() { + return true; + } + + @Override + protected void reportTracedLeak(String resourceType, String records) { + if (!records.contains(canaryString)) { + reportError(new AssertionError("Leak records did not contain canary string")); + } + leaksFound.incrementAndGet(); + } + + @Override + protected void reportUntracedLeak(String resourceType) { + reportError(new AssertionError("Got untraced leak w/o canary string")); + leaksFound.incrementAndGet(); + } + + private void reportError(AssertionError cause) { + error.compareAndSet(null, cause); + } + + @Override + protected Object getInitialHint(String resourceType) { + return canaryString; + } + + int getLeaksFound() { + return leaksFound.get(); + } + + void assertNoErrors() throws Throwable { + ResourceLeakDetectorTest.assertNoErrors(error); + } + } }
val
test
"2022-02-09T10:45:34"
"2022-02-07T09:30:50Z"
yawkat
val
netty/netty/12067_12109
netty/netty
netty/netty/12067
netty/netty/12109
[ "keyword_pr_to_issue" ]
cd0738aa02698a33e1deb30d997f3d0ef84f386e
4797b88d89304763fd92eeff73d077effa53685e
[ "Looking at the git history, this is guarding against issues with class-loaders in e.g. Tomcat containers.\r\n\r\nIf you only allocate from `FastThreadLocalThreads`, then it won't be a problem since those always clear out their thread-locals before terminating, which makes the `PoolThreadCache` free - from within the thread itself - all the memory its holding on to, and properly update the counters.", "Thank you for your reply.\r\nI agree that it's better to use `FastThreadLocalThread` as much as possible. But this is not always possible.\r\nI use the Reactor project (without Tomcat) and some allocation of ByteBuff can take place in a classic `Thread` (use of `Scheduler.elastic()` or use of a `CompletableFuture` from a third-party lib, etc.).\r\nWhen this happens and when the Thread is terminated, the metrics seems to not correctly updated and `poolArenaMetric.numDeallocations()` is wrong and therefore `poolArenaMetric.numActiveAllocations()` is also wrong.", "I found an other source of wrong deallocation metric (also exists with `FastThreadLocalThread`) :\r\n`byteBuf.release()` can be call in some differents Thread than initial allocator Thread.\r\nSo maybe it's better if `deallocationsSmall` and `deallocationsNormal` are `LongCounter` (like `deallocationsHuge`) for be protected against concurent use.\r\n\r\nhttps://github.com/netty/netty/blob/1f357fc00e68091b320ada074d198b90a7c75377/buffer/src/main/java/io/netty/buffer/PoolArena.java#L62-L66", "@xavier-b these are only modified within a synchronized block. So it should be a not a problem or I am missing something ?", "Here is my Test (only one direct pool arena for easy log) : \r\n```java\r\npackage fr.xav.brouillon.netty;\r\n\r\nimport io.netty.buffer.*;\r\nimport io.netty.util.concurrent.FastThreadLocalThread;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.Deque;\r\nimport java.util.List;\r\nimport java.util.concurrent.ConcurrentLinkedDeque;\r\n\r\npublic class ExampleCache {\r\n\r\n private static volatile boolean releaseIt = false;\r\n\r\n private static Deque<ByteBuf> listAllByteBuff = new ConcurrentLinkedDeque<>();\r\n\r\n public static void main(String[] args) throws InterruptedException {\r\n\r\n final boolean testWithFastThread = false;\r\n final int nbThread = 80;\r\n\r\n System.out.println(\"Start test with \" + nbThread + \" \" + (testWithFastThread ? \"FastThreadLocalThread\" : \"classic Thread\"));\r\n // Only one Direct arena for easy test\r\n System.setProperty(\"io.netty.allocator.numDirectArenas\", \"1\");\r\n System.setProperty(\"io.netty.allocator.numHeapArenas\", \"0\");\r\n\r\n System.setProperty(\"io.netty.allocator.cacheTrimIntervalMillis\", \"100\");\r\n\r\n List<Thread> threadList = new ArrayList<>();\r\n for (int i = 0; i < nbThread; i++) {\r\n if (testWithFastThread) {\r\n threadList.add(new FastThreadLocalThread(() -> doInThread(400, 500)));\r\n } else {\r\n threadList.add(new Thread(() -> doInThread(400, 500)));\r\n }\r\n }\r\n\r\n for (Thread thread : threadList) {\r\n thread.start();\r\n }\r\n\r\n threadList.clear();\r\n\r\n System.out.println(\"Sleep 10s\");\r\n Thread.sleep(10_000);\r\n\r\n System.out.println(\"releaseIt=true\");\r\n releaseIt = true;\r\n System.out.println(\"Sleep 15s\");\r\n Thread.sleep(15_000);\r\n\r\n System.out.println(\"GC\");\r\n System.gc();\r\n System.out.println(\"Sleep 5s\");\r\n Thread.sleep(10_000);\r\n\r\n if (!listAllByteBuff.isEmpty()) {\r\n throw new IllegalStateException(\"Error, all ByteBuf was not yet released\");\r\n }\r\n\r\n printMetric(PooledByteBufAllocator.DEFAULT.metric());\r\n }\r\n\r\n\r\n private static void doInThread(int taille, int nb) {\r\n for (int i = 0; i < nb; i++) {\r\n ByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.directBuffer(taille);\r\n listAllByteBuff.addFirst(byteBuf);\r\n }\r\n\r\n while (!releaseIt) {\r\n try {\r\n Thread.sleep(3L);\r\n } catch (InterruptedException e) {\r\n e.printStackTrace();\r\n }\r\n }\r\n\r\n // ByteBuf.release is not necessarily on a ByteBuf created by this Thread\r\n for (int i = 0; i < nb; i++) {\r\n ByteBuf byteBuf = listAllByteBuff.pollLast();\r\n if (byteBuf != null) {\r\n byteBuf.release();\r\n }\r\n }\r\n\r\n }\r\n\r\n private static void printMetric(PooledByteBufAllocatorMetric metric) {\r\n\r\n System.out.println();\r\n System.out.println(\">>> METRICS :\");\r\n System.out.println(\"numThreadLocalCaches : \" + metric.numThreadLocalCaches());\r\n System.out.println(\"smallCacheSize : \" + metric.smallCacheSize());\r\n System.out.println(\"normalCacheSize : \" + metric.normalCacheSize());\r\n System.out.println(String.format(\"chunkSize : %,d\", metric.chunkSize()));\r\n\r\n for (int i = 0; i < metric.directArenas().size(); i++) {\r\n PoolArenaMetric poolArenaMetric = metric.directArenas().get(i);\r\n System.out.println(\"=========== PoolArenaMetric direct \" + i + \" ===========\");\r\n System.out.println(\"numThreadCaches : \" + poolArenaMetric.numThreadCaches());\r\n System.out.println(String.format(\"numSmallSubpages : %,d\", poolArenaMetric.numSmallSubpages()));\r\n System.out.println(String.format(\"numChunkLists : %,d\", poolArenaMetric.numChunkLists()));\r\n System.out.println(String.format(\"numAllocations : %,d\", poolArenaMetric.numAllocations()));\r\n System.out.println(String.format(\"numSmallAllocations : %,d\", poolArenaMetric.numSmallAllocations()));\r\n System.out.println(String.format(\"numNormalAllocations : %,d\", poolArenaMetric.numNormalAllocations()));\r\n System.out.println(String.format(\"numHugeAllocations : %,d\", poolArenaMetric.numHugeAllocations()));\r\n System.out.println(String.format(\"numDeallocations : %,d\", poolArenaMetric.numDeallocations()));\r\n System.out.println(String.format(\"numSmallDeallocations : %,d\", poolArenaMetric.numSmallDeallocations()));\r\n System.out.println(String.format(\"numNormalDeallocations : %,d\", poolArenaMetric.numNormalDeallocations()));\r\n System.out.println(String.format(\"numHugeDeallocations : %,d\", poolArenaMetric.numHugeDeallocations()));\r\n System.out.println(String.format(\"numActiveAllocations : %,d\", poolArenaMetric.numActiveAllocations()));\r\n System.out.println(String.format(\"numActiveSmallAllocations : %,d\", poolArenaMetric.numActiveSmallAllocations()));\r\n System.out.println(String.format(\"numActiveNormalAllocations : %,d\", poolArenaMetric.numActiveNormalAllocations()));\r\n System.out.println(String.format(\"numActiveHugeAllocations : %,d\", poolArenaMetric.numActiveHugeAllocations()));\r\n System.out.println(String.format(\"numActiveBytes : %,d\", poolArenaMetric.numActiveBytes()));\r\n\r\n if (poolArenaMetric.numActiveAllocations() != 0) {\r\n throw new IllegalStateException(\"poolArenaMetric.numActiveAllocations() must be equals to 0, here : \" + poolArenaMetric.numActiveAllocations());\r\n } else {\r\n System.out.println(\"OK, poolArenaMetric.numActiveAllocations() is equals to zero\");\r\n }\r\n }\r\n }\r\n}\r\n```\r\n\r\nWhen I use classique Thread (`final boolean testWithFastThread = false;`), I have 20480 wrong active allocation (80 * 256, cad nbThread * io.netty.allocator.smallCacheSize) : \r\n```\r\nStart test with 80 classic Thread\r\nSleep 10s\r\nreleaseIt=true\r\nSleep 15s\r\nGC\r\nSleep 5s\r\n\r\n>>> METRICS :\r\nnumThreadLocalCaches : 0\r\nsmallCacheSize : 256\r\nnormalCacheSize : 64\r\nchunkSize : 16 777 216\r\n=========== PoolArenaMetric direct 0 ===========\r\nnumThreadCaches : 0\r\nnumSmallSubpages : 39\r\nnumChunkLists : 6\r\nnumAllocations : 40 000\r\nnumSmallAllocations : 40 000\r\nnumNormalAllocations : 0\r\nnumHugeAllocations : 0\r\nnumDeallocations : 19 520\r\nnumSmallDeallocations : 19 520\r\nnumNormalDeallocations : 0\r\nnumHugeDeallocations : 0\r\nnumActiveAllocations : 20 480\r\nnumActiveSmallAllocations : 20 480\r\nnumActiveNormalAllocations : 0\r\nnumActiveHugeAllocations : 0\r\nnumActiveBytes : 33 554 432\r\nException in thread \"main\" java.lang.IllegalStateException: poolArenaMetric.numActiveAllocations() must be equals to 0, here : 20480\r\n\tat fr.xav.brouillon.netty.ExampleCache.printMetric(ExampleCache.java:119)\r\n\tat fr.xav.brouillon.netty.ExampleCache.main(ExampleCache.java:61)\r\n\r\nProcess finished with exit code 1\r\n```\r\nAnd when I use FastThreadLocalThread (`final boolean testWithFastThread = true;`), I have ~6000 wrong allocations : \r\n```\r\nStart test with 80 FastThreadLocalThread\r\nSleep 10s\r\nreleaseIt=true\r\nSleep 15s\r\nGC\r\nSleep 5s\r\n\r\n>>> METRICS :\r\nnumThreadLocalCaches : 0\r\nsmallCacheSize : 256\r\nnormalCacheSize : 64\r\nchunkSize : 16 777 216\r\n=========== PoolArenaMetric direct 0 ===========\r\nnumThreadCaches : 0\r\nnumSmallSubpages : 39\r\nnumChunkLists : 6\r\nnumAllocations : 40 000\r\nnumSmallAllocations : 40 000\r\nnumNormalAllocations : 0\r\nnumHugeAllocations : 0\r\nnumDeallocations : 34 085\r\nnumSmallDeallocations : 34 085\r\nnumNormalDeallocations : 0\r\nnumHugeDeallocations : 0\r\nnumActiveAllocations : 5 915\r\nnumActiveSmallAllocations : 5 915\r\nnumActiveNormalAllocations : 0\r\nnumActiveHugeAllocations : 0\r\nnumActiveBytes : 33 554 432\r\nException in thread \"main\" java.lang.IllegalStateException: poolArenaMetric.numActiveAllocations() must be equals to 0, here : 5915\r\n\tat fr.xav.brouillon.netty.ExampleCache.printMetric(ExampleCache.java:119)\r\n\tat fr.xav.brouillon.netty.ExampleCache.main(ExampleCache.java:61)\r\n\r\nProcess finished with exit code 1\r\n```\r\n", "Note that if I use\r\n```java\r\nSystem.setProperty(\"io.netty.allocator.smallCacheSize\", \"0\");\r\nSystem.setProperty(\"io.netty.allocator.normalCacheSize\", \"0\");\r\n```\r\nThere is no problem.", "> @xavier-b these are only modified within a synchronized block. So it should be a not a problem or I am missing something ?\r\n\r\n@normanmaurer, effectively.\r\n\r\nI don't understand why I have `poolArenaMetric.numActiveAllocations != 0` when all byteBuf was released and when `PoolArenaMetric.numThreadCaches==0` in the Arena.\r\n", "I'm trying to follow this issue.\r\n\r\nSo, if I understand correctly what @chrisvest described earlier, when you use a FastThreadLocalThreads, then when this one terminates, it will clear the PoolThreadCache which will release the memory it holds and the metrics will then be updated.\r\n\r\nBut, when using a PooledByteBufAllocator from a classic Thread (like the use case from @xavier-b), then when the Thread terminates, it won't clear the PoolThreadCache (because of the issue #8955 if I'm correct).\r\n\r\nSo, this means that even if you release all buffers to the pool before the Thread terminates, then since the PooledByteBufAllocator maintains a cache of buffers, then few buffers won't be released when the Thread terminates (for example, for small buffers, the pool caches at least 256 buffers, see \r\nPooledByteBufAllocator DEFAULT_SMALL_CACHE_SIZE).\r\n\r\n@chrisvest,\r\n\r\nso, in this case, do we have a memory leak in case many threads are terminated and getting re created ? In this case, maybe there is a way to force the PooledByteBufAllocator to release all its cached buffers ? I tried to call the PooledByteBufAllocator.trimCurrentThreadCache() just before exiting from the Thread, but the numActiveAllocations meter is still positive ?\r\n\r\nthanks !\r\n\r\n", "@chrisvest \r\n> But, when using a PooledByteBufAllocator from a classic Thread (like the use case from @xavier-b), then when the Thread terminates, it won't clear the PoolThreadCache (because of the issue #8955 if I'm correct).\r\n\r\nNo, in the case of using a standard thread, it seems only the metric value is wrong. Threads cache seems to be correctly freed.\r\nIn the case of using FastThreadLocalThread, poolArenaMetric.numActiveAllocations != 0 at the end of the test, but I don't know why. (a wrong metric too ? An other cache ?)\r\n", "I think I understood why, with FastThreadLocalThread, my test does not end with 0 active allocation.\r\nIt's because PoolThreadCache is cleaned directly at the end of runnable action :\r\nhttps://github.com/netty/netty/blob/dc8bb40bba2dc4b86fcfb62a4ff02b9ef852014a/common/src/main/java/io/netty/util/concurrent/FastThreadLocalRunnable.java#L28-L34\r\n\r\nA ByteBuf is created in FastThreadLocalThread A and released in FastThreadLocalThread B.\r\nBut if FastThreadLocalThread A is ended before release action in Thread B, there is the problem : an entry is added to the already freed and unused PoolThreadCache. And when GC free PoolThreadCache, PoolThreadCache is already marked internally as freed, so no other entry release action is performed.: \r\nhttps://github.com/netty/netty/blob/dc8bb40bba2dc4b86fcfb62a4ff02b9ef852014a/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java#L214-L217\r\n\r\nWe can have a small leak.\r\nOf course, FastThreadLocalThread is usually used in a pool with fixed size, so never ended.\r\n\r\nFor standard thread, there is no such problem, PoolThreadCache is cleaned only when the PoolThreadCache is released by GC. And because ByteBuf keep a ref to its PoolThreadCache, it's not possible to have an early release.\r\n\r\nHere is a little test with a FastThreadLocalThread :\r\n```java\r\nimport io.netty.buffer.ByteBuf;\r\nimport io.netty.buffer.PoolArenaMetric;\r\nimport io.netty.buffer.PooledByteBufAllocator;\r\nimport io.netty.buffer.PooledByteBufAllocatorMetric;\r\nimport io.netty.util.concurrent.FastThreadLocalThread;\r\n\r\nimport java.util.concurrent.atomic.AtomicReference;\r\n\r\npublic class ExampleCache2 {\r\n\r\n\r\n public static void main(String[] args) throws InterruptedException {\r\n System.out.println(\"Start test\");\r\n // Only one Direct arena for easy test\r\n System.setProperty(\"io.netty.allocator.numDirectArenas\", \"1\");\r\n System.setProperty(\"io.netty.allocator.numHeapArenas\", \"0\");\r\n System.setProperty(\"io.netty.allocator.cacheTrimIntervalMillis\", \"100\");\r\n \r\n AtomicReference<ByteBuf> refByteBuf = new AtomicReference<>();\r\n \r\n Thread threadA = new FastThreadLocalThread(() -> {\r\n refByteBuf.set(PooledByteBufAllocator.DEFAULT.directBuffer(400));\r\n });\r\n System.out.println(\"Start Thread A\");\r\n threadA.start();\r\n threadA = null;\r\n System.out.println(\"Sleep 10s\");\r\n Thread.sleep(10_000);\r\n \r\n System.out.println(\"Release in main thread\");\r\n refByteBuf.getAndSet(null).release();\r\n \r\n printMetric(PooledByteBufAllocator.DEFAULT.metric());\r\n }\r\n \r\n private static void printMetric(PooledByteBufAllocatorMetric metric) {\r\n \r\n System.out.println();\r\n System.out.println(\">>> METRICS :\");\r\n System.out.println(\"numThreadLocalCaches : \" + metric.numThreadLocalCaches());\r\n \r\n for (int i = 0; i < metric.directArenas().size(); i++) {\r\n PoolArenaMetric poolArenaMetric = metric.directArenas().get(i);\r\n System.out.println(\"=========== PoolArenaMetric direct \" + i + \" ===========\");\r\n System.out.println(\"numThreadCaches : \" + poolArenaMetric.numThreadCaches());\r\n System.out.println(String.format(\"numAllocations : %,d\", poolArenaMetric.numAllocations()));\r\n System.out.println(String.format(\"numDeallocations : %,d\", poolArenaMetric.numDeallocations()));\r\n System.out.println(String.format(\"numActiveAllocations : %,d\", poolArenaMetric.numActiveAllocations()));\r\n \r\n if (poolArenaMetric.numActiveAllocations() != 0) {\r\n throw new IllegalStateException(\"poolArenaMetric.numActiveAllocations() must be equals to 0, here : \" + poolArenaMetric.numActiveAllocations());\r\n } else {\r\n System.out.println(\"OK, poolArenaMetric.numActiveAllocations() is equals to zero\");\r\n }\r\n }\r\n }\r\n}\r\n````" ]
[]
"2022-02-17T21:51:27Z"
[]
PoolArenaMetric.numDeallocations seem to be wrong
PoolArenaMetric.numDeallocations seem to be wrong because counter are not updated when free comes from PoolThreadLocal GC. https://github.com/netty/netty/blob/1f357fc00e68091b320ada074d198b90a7c75377/buffer/src/main/java/io/netty/buffer/PoolArena.java#L246-L256
[ "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java" ]
[ "buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java" ]
[ "buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java" ]
diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java index d3445b524ea..4a55a469a88 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java @@ -144,7 +144,7 @@ public void run() { } DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean( - "io.netty.allocator.useCacheForAllThreads", true); + "io.netty.allocator.useCacheForAllThreads", false); // Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array // of 1024 elements. Otherwise we would allocate 2048 and only use 1024 which is wasteful.
diff --git a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java index fd4c4d3cf0d..f79d8def0ab 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java @@ -149,7 +149,7 @@ public void testArenaMetricsNoCache() { @Test public void testArenaMetricsCache() { - testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 1000, 1000, 1000), 100, 1, 1, 0); + testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 9, 1000, 1000, 1000, true, 0), 100, 1, 1, 0); } @Test
test
test
"2022-02-28T20:34:11"
"2022-02-03T01:06:47Z"
xavier-b
val
netty/netty/12103_12118
netty/netty
netty/netty/12103
netty/netty/12118
[ "keyword_pr_to_issue" ]
ec0fc8fc725c1575be0bfabd79c663a8a60ebbb2
9c77e4021e3296ee445fe1512cf8b9c3ea3a7ab1
[ "@chrisvest what do you think... Not sure we want to expose just another public method. I guess we could do it tho", "@normanmaurer it's -1 to me, but I would like to understand why @Shoothzj need it/the exact use case", "@franz1981 It's started with https://github.com/apache/bookkeeper/pull/2989 when locating a `Out of DirectMemory Error`. We found the the maxDirectMemory read by bookkeeper is not correct with *huawei jdk 11*, but the result read by netty is correct.\r\nI want to improve the maxDirectMemory readed by bookkeepe but without the `io.netty.maxDirectMemory`.\r\n\r\nPlease point out if I didn't make it clear. My English is not good. Thanks\r\ncc @normanmaurer ", "wdyt @normanmaurer ?\r\n\r\nI know that Netty's `OutOfDirectMemoryError` throwing condition is tied with the Netty's notion of max direct memory, hence if we allow the exception to leak ie `OutOfDirectMemoryError` is public, maybe makes sense our limits to leak as well. \r\nAnother option for @Shoothzj could be to react to such information (if structured to be used) while the limit is reached, but we need to change `OutOfDirectMemoryError` in order to expose exception fields that represent our limits/current values ie \r\n`requestedCapacity, usedCapacity, DIRECT_MEMORY_LIMIT as max allowed capacity`", "@normanmaurer @franz1981 I don't see a big problem in exposing `maxDirectMemory0` as `estimateMaxDirectMemory`.", "technically it wouldn't even be part of the public API, last I recall PlatformDependent is in the \"internal\" package, that said could annotate it as UnstableApi?", "Let's wait what @normanmaurer answer on this, but I like both @chrisvest and @johnou answers " ]
[]
"2022-02-22T18:31:37Z"
[ "feature", "discussion" ]
[feautre request] Add a util method to get maxDirectMemory
Netty's `PlatformDependent.maxDirectMemory()` is powerful and general. In other project, we want to read the `maxDirectMemory` too. But we can't directly use `PlatformDependent.maxDirectMemory()` , because it will wrong if user set `io.netty.maxDirectMemory` greater than zero. Instead of copy and paste netty's code(Under Apache License), I want a UtilityClass to get `PlatformDependent.maxDirectMemory0` result. I am not very familiar with netty code, I can work on this under guides. Thanks for your help.
[ "common/src/main/java/io/netty/util/internal/PlatformDependent.java" ]
[ "common/src/main/java/io/netty/util/internal/PlatformDependent.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/internal/PlatformDependent.java b/common/src/main/java/io/netty/util/internal/PlatformDependent.java index a116da2451f..d6b140988b4 100644 --- a/common/src/main/java/io/netty/util/internal/PlatformDependent.java +++ b/common/src/main/java/io/netty/util/internal/PlatformDependent.java @@ -89,7 +89,7 @@ public final class PlatformDependent { private static final Throwable UNSAFE_UNAVAILABILITY_CAUSE = unsafeUnavailabilityCause0(); private static final boolean DIRECT_BUFFER_PREFERRED; - private static final long MAX_DIRECT_MEMORY = maxDirectMemory0(); + private static final long MAX_DIRECT_MEMORY = estimateMaxDirectMemory(); private static final int MPSC_CHUNK_SIZE = 1024; private static final int MIN_MAX_MPSC_CAPACITY = MPSC_CHUNK_SIZE * 2; @@ -1147,7 +1147,16 @@ private static boolean isIkvmDotNet0() { return vmName.equals("IKVM.NET"); } - private static long maxDirectMemory0() { + /** + * Compute an estimate of the maximum amount of direct memory available to this JVM. + * <p> + * The computation is not cached, so you probably want to use {@link #maxDirectMemory()} instead. + * <p> + * This will produce debug log output when called. + * + * @return The estimated max direct memory, in bytes. + */ + public static long estimateMaxDirectMemory() { long maxDirectMemory = 0; ClassLoader systemClassLoader = null;
null
train
test
"2022-02-21T12:23:27"
"2022-02-16T01:05:49Z"
shoothzj
val
netty/netty/12129_12131
netty/netty
netty/netty/12129
netty/netty/12131
[ "keyword_pr_to_issue" ]
f650303911e308dfdf10761273c2b8e4436ea0a4
09dbf99ffaf9b00c1df8e55590d58a524ea639d2
[ "@devsprint I wonder if it would be better to write a `SLF4J ` implement for it and then just use the provided SLF4J implementation for netty ?", "To implement my current task, it is exactly what I did, but I was thinking of removing layers of indirections..." ]
[]
"2022-02-28T15:35:40Z"
[]
Internal Logging API - make MessageFormatter and FormattingTuple public
### Expected behavior While trying to use netty with ZIO 2.x Logging support I found out that I can't implement properly AbstractInternalLogger interface for integration with ZIO 2.x, relying on the optimised implementation of MessageFormatter. ### Actual behavior Should be able to add new Logging integrations. ### Steps to reproduce ### Minimal yet complete reproducer code (or URL to code) ### Netty version 4.1.74 ### JVM version (e.g. `java -version`) Not dependent on java version ### OS version (e.g. `uname -a`) Not dependent on OS version.
[ "common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java", "common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java" ]
[ "common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java", "common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java b/common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java index 59862691c82..c18a3a3b2ca 100644 --- a/common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java +++ b/common/src/main/java/io/netty/util/internal/logging/FormattingTuple.java @@ -42,7 +42,7 @@ /** * Holds the results of formatting done by {@link MessageFormatter}. */ -final class FormattingTuple { +public final class FormattingTuple { private final String message; private final Throwable throwable; diff --git a/common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java b/common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java index 82f56dce069..4301c3adea6 100644 --- a/common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java +++ b/common/src/main/java/io/netty/util/internal/logging/MessageFormatter.java @@ -108,7 +108,7 @@ * {@link #format(String, Object, Object)} and * {@link #arrayFormat(String, Object[])} methods for more details. */ -final class MessageFormatter { +public final class MessageFormatter { private static final String DELIM_STR = "{}"; private static final char ESCAPE_CHAR = '\\';
null
train
test
"2022-02-25T22:12:54"
"2022-02-28T12:11:18Z"
devsprint
val
netty/netty/12123_12138
netty/netty
netty/netty/12123
netty/netty/12138
[ "keyword_pr_to_issue" ]
09dbf99ffaf9b00c1df8e55590d58a524ea639d2
4374612a4bedefd528d22daac5094e3c740baeb9
[ "Let me check... ", "I have another question about spliceTo(), when native splice returns EAGAIN, why not suspend spliceTask and resume it when EPOLLIN is triggered next time. Infinite calls to splice will fully load the CPU.", "@AriseFX I am happy to review PRs. Please open a PR with fixes if you have time :)", "> @AriseFX I am happy to review PRs. Please open a PR with fixes if you have time :)\r\n\r\nI'll try :)", "> @AriseFX I am happy to review PRs. Please open a PR with fixes if you have time :)\r\n\r\nhttps://github.com/netty/netty/pull/12138" ]
[]
"2022-03-02T12:15:18Z"
[]
About the bug of spliceTo()
When I use splice for traffic transmissions, if I still go to epollInReady when EPOLLRDHUP occurs, this will cause me to not receive channelInactive() events ### Expected behavior when the other party closes the connection, I can receive channelinactive() ### Actual behavior Cannot receive channelinactive(),and CPU 100% ![image](https://user-images.githubusercontent.com/41415014/155658646-0fff6505-f1b9-4066-9852-087f0040fd50.png) ### code ```java channel1.config().setAutoRead(false); channel2.config().setAutoRead(false); EpollSocketChannel ch1 = (EpollSocketChannel) channel1; EpollSocketChannel ch2 = (EpollSocketChannel) channel2; channel1.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { ctx.channel().close(); } }); channel2.pipeline().addLast(new ChannelInboundHandlerAdapter() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { ctx.channel().close(); } }); ch1.spliceTo(ch2, Integer.MAX_VALUE).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { if (!future.isSuccess()) { future.cancel(true); future.channel().close(); } } }); ch2.spliceTo(ch1, Integer.MAX_VALUE).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { if (!future.isSuccess()) { future.cancel(true); future.channel().close(); } } }); channel1.config().setAutoRead(true); channel2.config().setAutoRead(true); ``` ### Netty version 4.1.70.Final ### Jdk version openjdk version "1.8.0_322" ### Linux version linux 3.10.0-229.el7.x86_64 ### Solution Modify epollRdHupReady() ```java /** * Called once EPOLLRDHUP event is ready to be processed */ final void epollRdHupReady() { // This must happen before we attempt to read. This will ensure reading continues until an error occurs. recvBufAllocHandle().receivedRdHup(); if (isActive()) { // here if (AbstractEpollChannel.this instanceof AbstractEpollStreamChannel) { if (((AbstractEpollStreamChannel) AbstractEpollChannel.this).spliceQueue != null) { shutdownInput(true); } } else { // If it is still active, we need to call epollInReady as otherwise we may miss to // read pending data from the underlying file descriptor. // See https://github.com/netty/netty/issues/3709 epollInReady(); } } else { // Just to be safe make sure the input marked as closed. shutdownInput(true); } // Clear the EPOLLRDHUP flag to prevent continuously getting woken up on this event. clearEpollRdHup(); } ```
[ "transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java" ]
[ "transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java" ]
[]
diff --git a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java index 011b992b45f..0b7044bb8f3 100644 --- a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/AbstractEpollStreamChannel.java @@ -756,13 +756,18 @@ void epollInReady() { ByteBuf byteBuf = null; boolean close = false; + Queue<SpliceInTask> sQueue = null; try { - Queue<SpliceInTask> sQueue = null; do { if (sQueue != null || (sQueue = spliceQueue) != null) { SpliceInTask spliceTask = sQueue.peek(); if (spliceTask != null) { - if (spliceTask.spliceIn(allocHandle)) { + boolean spliceInResult = spliceTask.spliceIn(allocHandle); + + if (allocHandle.isReceivedRdHup()) { + shutdownInput(true); + } + if (spliceInResult) { // We need to check if it is still active as if not we removed all SpliceTasks in // doClose(...) if (isActive()) { @@ -820,7 +825,13 @@ void epollInReady() { } catch (Throwable t) { handleReadException(pipeline, byteBuf, t, close, allocHandle); } finally { - epollInFinally(config); + if (sQueue == null) { + epollInFinally(config); + } else { + if (!config.isAutoRead()) { + clearEpollIn(); + } + } } } } @@ -856,6 +867,7 @@ protected final int spliceIn(FileDescriptor pipeOut, RecvByteBufAllocator.Handle for (;;) { // Splicing until there is nothing left to splice. int localSplicedIn = Native.splice(socket.intValue(), -1, pipeOut.intValue(), -1, length); + handle.lastBytesRead(localSplicedIn); if (localSplicedIn == 0) { break; }
null
train
test
"2022-03-01T11:32:18"
"2022-02-25T05:13:26Z"
AriseFX
val
netty/netty/12175_12188
netty/netty
netty/netty/12175
netty/netty/12188
[ "keyword_pr_to_issue" ]
5cda0444187c6f7c54b03a97b1956c44a2f24ccc
40382fb1af275e931a765c5001f4b6bd8061991e
[ "The simple solution of just slapping in some random name (`io.netty.all`) is that this will cause the packages within this jar to be exported from more than one Java Module, and that will also cause problems.\r\n\r\nThe other fix is to remove the attribute from the manifest entirely.", "Netty-all is a pom-only dependency now, so it doesn't have any packages in the jar file. Still, it _probably_ shouldn't try to declare any automatic module name." ]
[ "wouldn't it be better to not define a module at all if it not ships any classes ? \r\n\r\n/cc @shs96c", "As long as this jar doesn't contain any classes, it'll be fine. The problems with the module system start when the same class is present in two or more jars: split packages (and therefore duplicate classes) are strictly forbidden.", "ok then it is fine.. Thanks!", "> wouldn't it be better to not define a module at all if it not ships any classes ?\r\n\r\nLike I said above, I tried to make that happen, couldn't get it to work." ]
"2022-03-17T01:11:40Z"
[]
`io.netty:netty-all:4.1.75.Final` declares an empty automatic module name
### Expected behavior If the `Automatic-Module-Name` manifest property is present, it should be set to a meaningful value. This attribute is defined in the manifest for `io.netty:netty-all:4.1.75.Final` ### Actual behavior In `io.netty:netty-all:4.1.75.Final` (and earlier versions), this is declared but not set. This causes any toolchain that attempts to use java modules to get upset. ### Steps to reproduce ```shell jar xvf ~/Downloads/netty-all-4.1.75.Final.jar META-INF/MANIFEST.MF head META-INF/MANIFEST.MF ``` The output is: ``` Manifest-Version: 1.0 Implementation-Title: Netty/All-in-One Bundle-Description: Netty is an asynchronous event-driven network appl ication framework for rapid development of maintainable high perfo rmance protocol servers and clients. Automatic-Module-Name: Bundle-License: https://www.apache.org/licenses/LICENSE-2.0 Bundle-SymbolicName: io.netty.all Implementation-Version: 4.1.75.Final Built-By: norman ``` The problematic thing is that `Automatic-Module-Name` is declared but not set. ### Netty version 4.1.75.Final ### JVM version (e.g. `java -version`) 9+
[ "all/pom.xml" ]
[ "all/pom.xml" ]
[]
diff --git a/all/pom.xml b/all/pom.xml index 7dcd227bd84..1265fe3d3de 100644 --- a/all/pom.xml +++ b/all/pom.xml @@ -29,6 +29,7 @@ <name>Netty/All-in-One</name> <properties> + <javaModuleName>io.netty.all</javaModuleName> <generatedSourceDir>${project.build.directory}/src</generatedSourceDir> <dependencyVersionsDir>${project.build.directory}/versions</dependencyVersionsDir> <japicmp.skip>true</japicmp.skip>
null
train
test
"2022-03-16T12:36:01"
"2022-03-15T16:43:58Z"
shs96c
val
netty/netty/12269_12270
netty/netty
netty/netty/12269
netty/netty/12270
[ "keyword_pr_to_issue" ]
34ae67a5b1fb1f119c1ed509f576c205d1e7fd78
960121db545f2d8aec7a87da8c54a9645fde266c
[]
[]
"2022-04-03T01:05:18Z"
[]
Allow explicitly choosing protocol family
### Expected behavior Netty should be able to bind to specific protocol family only (IPv4, IPv6) if requested by the caller. ### Actual behavior Netty uses `java.net.preferIPv4Stack` JVM property to determine whether to bind to IPv4 or IPv6 (possibly IPv6+IPv4 given by settings of IPV6_V6ONLY socket option). ### Steps to reproduce ``` ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channelFactory(configAdapter.getServerChannel(address)) .childHandler(channelInitializer).option(ChannelOption.SO_BACKLOG, Integer.MAX_VALUE) .childOption(ChannelOption.SO_KEEPALIVE, true); bindFuture = b.bind(address); ``` ### Minimal yet complete reproducer code (or URL to code) https://github.com/kvr000/netty-specific-family ### Netty version 4.1.75.Final ### JVM version (e.g. `java -version`) openjdk version "18-ea" 2022-03-15 OpenJDK Runtime Environment (build 18-ea+15-Ubuntu-4) OpenJDK 64-Bit Server VM (build 18-ea+15-Ubuntu-4, mixed mode, sharing) ### OS version (e.g. `uname -a`) Linux ratteburg 5.13.0-37-generic #42-Ubuntu SMP Tue Mar 15 14:34:06 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux ### Solution Add `InternetProtocolFamily` parameter to Netty `SocketChannel` classes, so it can construct appropriate socket handler with correct family. JDK supports specifying family since JDK 15, for Epoll and Kqueue, those need to be added. Suggestion provided in Pull Request and confirmed as working (so far without automated tests though). Pull request: https://github.com/netty/netty/pull/12270
[ "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java", "transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java", "transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java", "transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java" ]
[ "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java", "transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java", "transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java", "transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java", "transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java" ]
[]
diff --git a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java index 79e1fbfddc3..0ca449b6654 100644 --- a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollDatagramChannel.java @@ -89,9 +89,7 @@ public EpollDatagramChannel() { * on the Operation Systems default which will be chosen. */ public EpollDatagramChannel(InternetProtocolFamily family) { - this(family == null ? - newSocketDgram(Socket.isIPv6Preferred()) : newSocketDgram(family == InternetProtocolFamily.IPv6), - false); + this(newSocketDgram(family), false); } /** diff --git a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java index c794a8f3ec7..a2f188d2211 100644 --- a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollServerSocketChannel.java @@ -17,6 +17,7 @@ import io.netty.channel.Channel; import io.netty.channel.EventLoop; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.socket.ServerSocketChannel; import java.io.IOException; @@ -41,7 +42,11 @@ public final class EpollServerSocketChannel extends AbstractEpollServerChannel i private volatile Collection<InetAddress> tcpMd5SigAddresses = Collections.emptyList(); public EpollServerSocketChannel() { - super(newSocketStream(), false); + this((InternetProtocolFamily) null); + } + + public EpollServerSocketChannel(InternetProtocolFamily protocol) { + super(newSocketStream(protocol), false); config = new EpollServerSocketChannelConfig(this); } diff --git a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java index 48f8a931a3f..ea06b4a1fd3 100644 --- a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollSocketChannel.java @@ -19,6 +19,7 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelException; import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.socket.ServerSocketChannel; import io.netty.channel.socket.SocketChannel; import io.netty.util.concurrent.GlobalEventExecutor; @@ -50,6 +51,11 @@ public EpollSocketChannel() { config = new EpollSocketChannelConfig(this); } + public EpollSocketChannel(InternetProtocolFamily protocol) { + super(newSocketStream(protocol), false); + config = new EpollSocketChannelConfig(this); + } + public EpollSocketChannel(int fd) { super(fd); config = new EpollSocketChannelConfig(this); diff --git a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java index 29271c1290c..d12e60c66f9 100644 --- a/transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java +++ b/transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java @@ -331,6 +331,10 @@ public static LinuxSocket newSocketStream(boolean ipv6) { return new LinuxSocket(newSocketStream0(ipv6)); } + public static LinuxSocket newSocketStream(InternetProtocolFamily protocol) { + return new LinuxSocket(newSocketStream0(protocol)); + } + public static LinuxSocket newSocketStream() { return newSocketStream(isIPv6Preferred()); } @@ -339,6 +343,10 @@ public static LinuxSocket newSocketDgram(boolean ipv6) { return new LinuxSocket(newSocketDgram0(ipv6)); } + public static LinuxSocket newSocketDgram(InternetProtocolFamily family) { + return new LinuxSocket(newSocketDgram0(family)); + } + public static LinuxSocket newSocketDgram() { return newSocketDgram(isIPv6Preferred()); } diff --git a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java index ad2cd5f4396..97fe4a43c5e 100644 --- a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java +++ b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/BsdSocket.java @@ -16,6 +16,7 @@ package io.netty.channel.kqueue; import io.netty.channel.DefaultFileRegion; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.unix.IovArray; import io.netty.channel.unix.PeerCredentials; import io.netty.channel.unix.Socket; @@ -196,10 +197,18 @@ public static BsdSocket newSocketStream() { return new BsdSocket(newSocketStream0()); } + public static BsdSocket newSocketStream(InternetProtocolFamily protocol) { + return new BsdSocket(newSocketStream0(protocol)); + } + public static BsdSocket newSocketDgram() { return new BsdSocket(newSocketDgram0()); } + public static BsdSocket newSocketDgram(InternetProtocolFamily protocol) { + return new BsdSocket(newSocketDgram0(protocol)); + } + public static BsdSocket newSocketDomain() { return new BsdSocket(newSocketDomain0()); } diff --git a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java index af99bcc9bdd..7c8dfc9b1ab 100644 --- a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java +++ b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueDatagramChannel.java @@ -25,6 +25,7 @@ import io.netty.channel.socket.DatagramChannel; import io.netty.channel.socket.DatagramChannelConfig; import io.netty.channel.socket.DatagramPacket; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.unix.DatagramSocketAddress; import io.netty.channel.unix.Errors; import io.netty.channel.unix.IovArray; @@ -61,6 +62,11 @@ public KQueueDatagramChannel() { config = new KQueueDatagramChannelConfig(this); } + public KQueueDatagramChannel(InternetProtocolFamily protocol) { + super(null, newSocketDgram(protocol), false); + config = new KQueueDatagramChannelConfig(this); + } + public KQueueDatagramChannel(int fd) { this(new BsdSocket(fd), true); } diff --git a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java index e024a6eaf51..486a895f7ea 100644 --- a/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java +++ b/transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/KQueueSocketChannel.java @@ -18,6 +18,7 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.socket.ServerSocketChannel; import io.netty.channel.socket.SocketChannel; import io.netty.channel.unix.IovArray; @@ -37,6 +38,11 @@ public KQueueSocketChannel() { config = new KQueueSocketChannelConfig(this); } + public KQueueSocketChannel(InternetProtocolFamily protocol) { + super(null, BsdSocket.newSocketStream(protocol), false); + config = new KQueueSocketChannelConfig(this); + } + public KQueueSocketChannel(int fd) { super(new BsdSocket(fd)); config = new KQueueSocketChannelConfig(this); diff --git a/transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java b/transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java index e58d6342b69..4fb894dc566 100644 --- a/transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java +++ b/transport-native-unix-common/src/main/java/io/netty/channel/unix/Socket.java @@ -16,6 +16,7 @@ package io.netty.channel.unix; import io.netty.channel.ChannelException; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.util.CharsetUtil; import io.netty.util.NetUtil; @@ -499,6 +500,11 @@ public static boolean isIPv6Preferred() { return isIpv6Preferred; } + public static boolean shouldUseIpv6(InternetProtocolFamily family) { + return family == null ? isIPv6Preferred() : + family == InternetProtocolFamily.IPv6; + } + private static native boolean isIPv6Preferred0(boolean ipv4Preferred); private static native boolean isIPv6(int fd); @@ -534,6 +540,10 @@ protected static int newSocketStream0() { return newSocketStream0(isIPv6Preferred()); } + protected static int newSocketStream0(InternetProtocolFamily protocol) { + return newSocketStream0(shouldUseIpv6(protocol)); + } + protected static int newSocketStream0(boolean ipv6) { int res = newSocketStreamFd(ipv6); if (res < 0) { @@ -546,6 +556,10 @@ protected static int newSocketDgram0() { return newSocketDgram0(isIPv6Preferred()); } + protected static int newSocketDgram0(InternetProtocolFamily family) { + return newSocketDgram0(shouldUseIpv6(family)); + } + protected static int newSocketDgram0(boolean ipv6) { int res = newSocketDgramFd(ipv6); if (res < 0) { diff --git a/transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java b/transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java index a90a4277165..e9304a89098 100644 --- a/transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java +++ b/transport/src/main/java/io/netty/channel/socket/nio/NioServerSocketChannel.java @@ -19,6 +19,7 @@ import io.netty.channel.ChannelMetadata; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.util.internal.SocketUtils; import io.netty.channel.nio.AbstractNioMessageChannel; import io.netty.channel.socket.DefaultServerSocketChannelConfig; @@ -29,7 +30,10 @@ import io.netty.util.internal.logging.InternalLoggerFactory; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.net.ProtocolFamily; import java.net.ServerSocket; import java.net.SocketAddress; import java.nio.channels.SelectionKey; @@ -51,7 +55,21 @@ public class NioServerSocketChannel extends AbstractNioMessageChannel private static final InternalLogger logger = InternalLoggerFactory.getInstance(NioServerSocketChannel.class); - private static ServerSocketChannel newSocket(SelectorProvider provider) { + private static final Method OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY; + + static { + Method found = null; + try { + found = SelectorProvider.class.getMethod( + "openServerSocketChannel", ProtocolFamily.class); + } catch (Throwable e) { + logger.info("openServerSocketChannel(ProtocolFamily) not available, will use default method", e); + } + OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY = found; + } + + @SuppressJava6Requirement(reason = "Usage guarded by java version check") + private static ServerSocketChannel newSocket(SelectorProvider provider, InternetProtocolFamily family) { try { /** * Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in @@ -59,6 +77,16 @@ private static ServerSocketChannel newSocket(SelectorProvider provider) { * * See <a href="https://github.com/netty/netty/issues/2308">#2308</a>. */ + if (family != null && OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY != null) { + try { + return (ServerSocketChannel) OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY.invoke( + provider, ProtocolFamilyConverter.convert(family)); + } catch (InvocationTargetException e) { + throw new IOException(e); + } catch (IllegalAccessException e) { + throw new IOException(e); + } + } return provider.openServerSocketChannel(); } catch (IOException e) { throw new ChannelException( @@ -72,14 +100,21 @@ private static ServerSocketChannel newSocket(SelectorProvider provider) { * Create a new instance */ public NioServerSocketChannel() { - this(newSocket(DEFAULT_SELECTOR_PROVIDER)); + this(DEFAULT_SELECTOR_PROVIDER); } /** * Create a new instance using the given {@link SelectorProvider}. */ public NioServerSocketChannel(SelectorProvider provider) { - this(newSocket(provider)); + this(provider, null); + } + + /** + * Create a new instance using the given {@link SelectorProvider} and protocol family (supported only since JDK 15). + */ + public NioServerSocketChannel(SelectorProvider provider, InternetProtocolFamily family) { + this(newSocket(provider, family)); } /** diff --git a/transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java b/transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java index b1570a9fa6a..15a745eafbf 100644 --- a/transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java +++ b/transport/src/main/java/io/netty/channel/socket/nio/NioSocketChannel.java @@ -28,6 +28,7 @@ import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.AbstractNioByteChannel; import io.netty.channel.socket.DefaultSocketChannelConfig; +import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.socket.ServerSocketChannel; import io.netty.channel.socket.SocketChannelConfig; import io.netty.util.concurrent.GlobalEventExecutor; @@ -39,7 +40,10 @@ import io.netty.util.internal.logging.InternalLoggerFactory; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.net.ProtocolFamily; import java.net.Socket; import java.net.SocketAddress; import java.nio.ByteBuffer; @@ -58,6 +62,19 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty private static final InternalLogger logger = InternalLoggerFactory.getInstance(NioSocketChannel.class); private static final SelectorProvider DEFAULT_SELECTOR_PROVIDER = SelectorProvider.provider(); + private static final Method OPEN_SOCKET_CHANNEL_WITH_FAMILY; + + static { + Method found = null; + try { + found = SelectorProvider.class.getMethod( + "openSocketChannel", ProtocolFamily.class); + } catch (Throwable e) { + logger.warn("openSocketChannel(ProtocolFamily) not available, will use default", e); + } + OPEN_SOCKET_CHANNEL_WITH_FAMILY = found; + } + private static SocketChannel newSocket(SelectorProvider provider) { try { /** @@ -72,6 +89,31 @@ private static SocketChannel newSocket(SelectorProvider provider) { } } + @SuppressJava6Requirement(reason = "Usage guarded by java version check") + private static SocketChannel newSocket(SelectorProvider provider, InternetProtocolFamily family) { + try { + /** + * Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in + * {@link SelectorProvider#provider()} which is called by each SocketChannel.open() otherwise. + * + * See <a href="https://github.com/netty/netty/issues/2308">#2308</a>. + */ + if (family != null && OPEN_SOCKET_CHANNEL_WITH_FAMILY != null) { + try { + return (SocketChannel) OPEN_SOCKET_CHANNEL_WITH_FAMILY.invoke( + provider, ProtocolFamilyConverter.convert(family)); + } catch (InvocationTargetException e) { + throw new IOException(e); + } catch (IllegalAccessException e) { + throw new IOException(e); + } + } + return provider.openSocketChannel(); + } catch (IOException e) { + throw new ChannelException("Failed to open a socket.", e); + } + } + private final SocketChannelConfig config; /** @@ -85,7 +127,14 @@ public NioSocketChannel() { * Create a new instance using the given {@link SelectorProvider}. */ public NioSocketChannel(SelectorProvider provider) { - this(newSocket(provider)); + this(provider, null); + } + + /** + * Create a new instance using the given {@link SelectorProvider} and protocol family (supported only since JDK 15). + */ + public NioSocketChannel(SelectorProvider provider, InternetProtocolFamily family) { + this(newSocket(provider, family)); } /**
null
val
test
"2022-03-31T22:48:23"
"2022-04-03T01:03:59Z"
kvr000
val
netty/netty/11701_12272
netty/netty
netty/netty/11701
netty/netty/12272
[ "keyword_pr_to_issue" ]
92599a7ebbff6fcb5a8a2eab8f217ea51ad73f01
81e90785e8ef4596817748648540a52e3e681944
[ "The binary compatibility baseline for our native code is the GLIBC version that's used on CentOS 6. The work-around is to use the Nio transport.\r\n\r\nIt's not clear from the error reports how this is really showing up. If it's an ABI problem, then I'm not sure what we can do to mitigate it. If we are calling glibc specific functions, then I wonder if we can somehow check what libc we are using at runtime.", ">It's not clear from the error reports how this is really showing up.\r\n\r\nI'm not sure what your question is. The error happens when Netty native is used without having glibc.\r\n\r\n>If it's an ABI problem...\r\n\r\nI don't think it is an ABI problem since glibc is missing in Alpine, there is nothing to be compatible with (and based on [this comment ](https://github.com/netty/netty/issues/6841#issuecomment-307716433), other libc implementations are out of question).\r\n\r\nI think checking if glibc is available or not and/or fixing `Epoll.isAvailable()` would fix the issue (it seems it returns true even if glibc is missing).", "I put together a little test for this: https://github.com/jonatan-ivanov/netty-gh-11701\r\n\r\nI called `Epoll.isAvailable()` and `Epoll.ensureAvailability()` on different distros to see if they return false/fail in case glibc is missing. It seems `isAvailable` does not return false and `ensureAvailability` does not fail even if glibc is missing.\r\n\r\nImage | `isAvailable` | `ensureAvailability` | Comment\r\n------ | ------------------- | --------------------------- | -------------\r\nbellsoft_liberica-openjre-debian:17 | `true` | `OK` | works\r\nbellsoft_liberica-openjre-alpine:17 | `true` | `OK` | works since it contains glibc\r\n**bellsoft_liberica-openjre-alpine-musl:17** | `true` | `OK` | **does not work as expected**\r\nazul_zulu-openjdk:17 | `true` | `OK` | works as expected\r\n**azul_zulu-openjdk-alpine:17-jre** | `true` | `OK` | **does not work as expected**\r\n\r\n", "There's no way to detect the libc vendor from within a process, as far as I can see. And calling out to `lld`, or something like that, is quite awkward and can be a security liability. I think the best we can do for the moment is to document that only the Nio transport is supported on musl.\r\n\r\nWhat do you think, @normanmaurer ?", "I agree with @chrisvest here ", "I'm not very familiar with this domain but do you think is it at least possible to throw an exception instead of crashing a JVM if such a thing occur?\r\n\r\nAlso can you use the `__GLIBC__ ` from [`features.h`](https://sourceware.org/git/?p=glibc.git;a=blob;f=include/features.h;h=d974eabfafc24ffb9ff4cd066b650134df32048b;hb=refs/heads/master#l477) to detect if you are dealing with glibc or not or is this only possible compile time?\r\n", "@jonatan-ivanov That's only at compile time. At runtime you can feature detect by inspecting linked symbols, except you cant do that without calling into libc which in this case would cause a crash. Alternatively we shell out to a program to inspect libc, but that is also bad for multiple reasons, and we don't want to do that.", "I have updated the documentation on the native transports to note that musl is not officially supported: https://github.com/netty/netty/wiki/Native-transports\r\n\r\nI think that's the best we can do for now.", "musl generally tries to support glibc-linked binaries invoking POSIX and some BSD extensions. errors related to GNU-specific extensions (and glibc header bastardizations) should be caught at load time, as in #6841 \"__strndup: symbol not found\". that particular issue can be fixed by installing gcompat package in alpine linux. segfaults are, as you say, caused by some ABI incompatibility.\r\n\r\ni briefly glanced through netty c code and found some suspicious items: https://github.com/netty/netty/blob/6b3ec62e6e4e19a85304aecd8cb5331a3fcc70be/transport-native-unix-common/src/main/c/netty_unix_errors.c#L41 is suspicious: application code should never check feature test macros, because it should be defining it themselves. https://github.com/netty/netty/blob/51ebcbd9aba7eac854716e45f07765744f8dcdaf/transport-native-epoll/src/main/c/netty_epoll_native.c#L140 is suspicious: sysctl values should always be accessed using `read`/`write`, never stdio. some sysctls will not be read correctly if using stdio. neither of these should cause segfault at run time though.", "@Hello71 The netty_unit_errors code is because we wish to use a thread-safe strerror variant, which is not immediately portable. (I'm not familiar with the other one you point out)\r\nIf you know of a way to improve it, we'd love to review a PR!", "Fix reverted because it negatively impacts musl-based systems that also use the glibc compatibility layer.", "@chrisvest Would you consider reopening this issue so that we can track another fix (if any)?", "reopened ", "fwiw if you want to do this in the dumbest possible way, you can check for `gnu_get_libc_version`. if your program is segfaulting and not just failing with symbol not found, then most likely there is some more serious issue with your program, not related to musl.", "@Hello71 The JVM itself might rely on signals and tie them to internal semantics, i.e. rare NullPointerExceptions sometimes rely on segfaults to optimise a non-null fast path. So I wouldn't feel very confident trying to trap signals in JNI library code.", "I think this would be fixed by https://github.com/netty/netty/pull/12272 ... " ]
[]
"2022-04-04T02:08:13Z"
[]
Throwing an exception in case glibc is missing instead of segfaulting the JVM
It seems Netty's native support depends on glibc (btw is this documented?). When glibc is missing (for example using vanilla Alpine), the JVM could end up with a crash (`SIGSEGV`), see details in this issue: https://github.com/micrometer-metrics/micrometer/issues/2776 It also seems that `Epoll.isAvailable()` returns `true` even if glibc is missing. ### Expected behavior Throwing an exception ### Actual behavior JVM crash ### Steps to reproduce Using Netty native with an environment that does not have glibc, e.g.: vanilla Alpine ### Minimal yet complete reproducer code (or URL to code) This seems to be a known issue, please let me know if you really need a reproducer. ### Netty version `4.1.68` (latest) ### JVM version (e.g. `java -version`) Latest 11: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'java --version' openjdk 11.0.12 2021-07-20 LTS OpenJDK Runtime Environment Zulu11.50+19-CA (build 11.0.12+7-LTS) OpenJDK 64-Bit Server VM Zulu11.50+19-CA (build 11.0.12+7-LTS, mixed mode) ``` Or also latest 17: ```bash ❯ docker run --rm 'bellsoft/liberica-openjdk-alpine-musl:17' 'sh' '-c' 'java --version' openjdk 17 2021-09-14 LTS OpenJDK Runtime Environment (build 17+35-LTS) OpenJDK 64-Bit Server VM (build 17+35-LTS, mixed mode) ``` I assume this is an issue in every JRE that is currently supported. ### OS version (e.g. `uname -a`) Both images above have the same output: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'uname -a' Linux 1ee8d1090f14 5.10.47-linuxkit #1 SMP Sat Jul 3 21:51:47 UTC 2021 x86_64 Linux ``` I assume this is an issue in every OS where glibc is missing.
[ "transport-native-epoll/pom.xml" ]
[ "transport-native-epoll/pom.xml" ]
[]
diff --git a/transport-native-epoll/pom.xml b/transport-native-epoll/pom.xml index 1dec5ca0dbc..7707b7b100e 100644 --- a/transport-native-epoll/pom.xml +++ b/transport-native-epoll/pom.xml @@ -32,8 +32,9 @@ <unix.common.lib.dir>${project.build.directory}/unix-common-lib</unix.common.lib.dir> <unix.common.lib.unpacked.dir>${unix.common.lib.dir}/META-INF/native/lib</unix.common.lib.unpacked.dir> <unix.common.include.unpacked.dir>${unix.common.lib.dir}/META-INF/native/include</unix.common.include.unpacked.dir> - <jni.compiler.args.cflags>CFLAGS=-O3 -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden -I${unix.common.include.unpacked.dir}</jni.compiler.args.cflags> - <jni.compiler.args.ldflags>LDFLAGS=-L${unix.common.lib.unpacked.dir} -Wl,--no-as-needed -lrt -ldl -Wl,--whole-archive -l${unix.common.lib.name} -Wl,--no-whole-archive</jni.compiler.args.ldflags> + <jni.compiler.args.cflags>CFLAGS=-O2 -pipe -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden -D_FORTIFY_SOURCE=2 -ffunction-sections -fdata-sections -I${unix.common.include.unpacked.dir}</jni.compiler.args.cflags> + <jni.compiler.args.ldflags>LDFLAGS=-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -Wl,--gc-sections -L${unix.common.lib.unpacked.dir}</jni.compiler.args.ldflags> + <jni.compiler.args.libs>LIBS=-Wl,--whole-archive -l${unix.common.lib.name} -Wl,--no-whole-archive -ldl</jni.compiler.args.libs> <nativeSourceDirectory>${project.basedir}/src/main/c</nativeSourceDirectory> <skipTests>true</skipTests> </properties> @@ -154,6 +155,7 @@ <platform>.</platform> <configureArgs> <arg>${jni.compiler.args.ldflags}</arg> + <arg>${jni.compiler.args.libs}</arg> <arg>${jni.compiler.args.cflags}</arg> <configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg> </configureArgs> @@ -303,6 +305,7 @@ <platform>.</platform> <configureArgs> <arg>${jni.compiler.args.ldflags}</arg> + <arg>${jni.compiler.args.libs}</arg> <arg>${jni.compiler.args.cflags}</arg> <configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg> <configureArg>--host=aarch64-linux-gnu</configureArg>
null
test
test
"2022-04-09T18:22:50"
"2021-09-22T02:15:46Z"
jonatan-ivanov
val
netty/netty/12269_12281
netty/netty
netty/netty/12269
netty/netty/12281
[ "keyword_pr_to_issue" ]
e267958b4c91c214bb6810189b1a9acb65122018
243893082feaa26cd89fc0dbc099f63b56610456
[]
[]
"2022-04-05T07:29:02Z"
[]
Allow explicitly choosing protocol family
### Expected behavior Netty should be able to bind to specific protocol family only (IPv4, IPv6) if requested by the caller. ### Actual behavior Netty uses `java.net.preferIPv4Stack` JVM property to determine whether to bind to IPv4 or IPv6 (possibly IPv6+IPv4 given by settings of IPV6_V6ONLY socket option). ### Steps to reproduce ``` ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channelFactory(configAdapter.getServerChannel(address)) .childHandler(channelInitializer).option(ChannelOption.SO_BACKLOG, Integer.MAX_VALUE) .childOption(ChannelOption.SO_KEEPALIVE, true); bindFuture = b.bind(address); ``` ### Minimal yet complete reproducer code (or URL to code) https://github.com/kvr000/netty-specific-family ### Netty version 4.1.75.Final ### JVM version (e.g. `java -version`) openjdk version "18-ea" 2022-03-15 OpenJDK Runtime Environment (build 18-ea+15-Ubuntu-4) OpenJDK 64-Bit Server VM (build 18-ea+15-Ubuntu-4, mixed mode, sharing) ### OS version (e.g. `uname -a`) Linux ratteburg 5.13.0-37-generic #42-Ubuntu SMP Tue Mar 15 14:34:06 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux ### Solution Add `InternetProtocolFamily` parameter to Netty `SocketChannel` classes, so it can construct appropriate socket handler with correct family. JDK supports specifying family since JDK 15, for Epoll and Kqueue, those need to be added. Suggestion provided in Pull Request and confirmed as working (so far without automated tests though). Pull request: https://github.com/netty/netty/pull/12270
[ "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java", "transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java", "transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java", "transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java" ]
[ "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java", "transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java", "transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java", "transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java", "transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java", "transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java" ]
[]
diff --git a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java index 70200462201..b702a2c8a10 100644 --- a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollDatagramChannel.java @@ -98,9 +98,7 @@ public EpollDatagramChannel(EventLoop eventLoop) { * on the Operation Systems default which will be chosen. */ public EpollDatagramChannel(EventLoop eventLoop, InternetProtocolFamily family) { - this(eventLoop, family == null ? - newSocketDgram(Socket.isIPv6Preferred()) : - newSocketDgram(family == InternetProtocolFamily.IPv6), + this(eventLoop, newSocketDgram(family), false); } diff --git a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java index a982c32a858..f22305f1fdb 100644 --- a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollServerSocketChannel.java @@ -18,6 +18,7 @@ import io.netty5.channel.Channel; import io.netty5.channel.EventLoop; import io.netty5.channel.EventLoopGroup; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.channel.socket.ServerSocketChannel; import java.io.IOException; @@ -42,7 +43,12 @@ public final class EpollServerSocketChannel extends AbstractEpollServerChannel i private volatile Collection<InetAddress> tcpMd5SigAddresses = Collections.emptyList(); public EpollServerSocketChannel(EventLoop eventLoop, EventLoopGroup childEventLoopGroup) { - super(eventLoop, childEventLoopGroup, newSocketStream(), false); + this(eventLoop, childEventLoopGroup, (InternetProtocolFamily) null); + } + + public EpollServerSocketChannel(EventLoop eventLoop, EventLoopGroup childEventLoopGroup, + InternetProtocolFamily protocolFamily) { + super(eventLoop, childEventLoopGroup, newSocketStream(protocolFamily), false); config = new EpollServerSocketChannelConfig(this); } diff --git a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java index cc58f984afc..2c6b07260e6 100644 --- a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java +++ b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/EpollSocketChannel.java @@ -22,6 +22,7 @@ import io.netty5.channel.ChannelException; import io.netty5.channel.ChannelOutboundBuffer; import io.netty5.channel.EventLoop; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.channel.socket.ServerSocketChannel; import io.netty5.channel.socket.SocketChannel; import io.netty5.util.concurrent.GlobalEventExecutor; @@ -49,7 +50,11 @@ public final class EpollSocketChannel extends AbstractEpollStreamChannel impleme private volatile Collection<InetAddress> tcpMd5SigAddresses = Collections.emptyList(); public EpollSocketChannel(EventLoop eventLoop) { - super(eventLoop, newSocketStream(), false); + this(eventLoop, null); + } + + public EpollSocketChannel(EventLoop eventLoop, InternetProtocolFamily protocolFamily) { + super(eventLoop, newSocketStream(protocolFamily), false); config = new EpollSocketChannelConfig(this); } diff --git a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java index ed74ec651f6..32916a72432 100644 --- a/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java +++ b/transport-classes-epoll/src/main/java/io/netty5/channel/epoll/LinuxSocket.java @@ -328,6 +328,10 @@ public static LinuxSocket newSocketStream(boolean ipv6) { return new LinuxSocket(newSocketStream0(ipv6)); } + public static LinuxSocket newSocketStream(InternetProtocolFamily protocol) { + return new LinuxSocket(newSocketStream0(protocol)); + } + public static LinuxSocket newSocketStream() { return newSocketStream(isIPv6Preferred()); } @@ -336,6 +340,10 @@ public static LinuxSocket newSocketDgram(boolean ipv6) { return new LinuxSocket(newSocketDgram0(ipv6)); } + public static LinuxSocket newSocketDgram(InternetProtocolFamily family) { + return new LinuxSocket(newSocketDgram0(family)); + } + public static LinuxSocket newSocketDgram() { return newSocketDgram(isIPv6Preferred()); } diff --git a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java index ba9563b34ae..70bfd22b011 100644 --- a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java +++ b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/BsdSocket.java @@ -16,6 +16,8 @@ package io.netty5.channel.kqueue; import io.netty5.channel.DefaultFileRegion; +import io.netty5.channel.socket.InternetProtocolFamily; + import io.netty5.channel.unix.IovArray; import io.netty5.channel.unix.PeerCredentials; import io.netty5.channel.unix.Socket; @@ -196,10 +198,18 @@ public static BsdSocket newSocketStream() { return new BsdSocket(newSocketStream0()); } + public static BsdSocket newSocketStream(InternetProtocolFamily protocol) { + return new BsdSocket(newSocketStream0(protocol)); + } + public static BsdSocket newSocketDgram() { return new BsdSocket(newSocketDgram0()); } + public static BsdSocket newSocketDgram(InternetProtocolFamily protocol) { + return new BsdSocket(newSocketDgram0(protocol)); + } + public static BsdSocket newSocketDomain() { return new BsdSocket(newSocketDomain0()); } diff --git a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java index cde38c730f7..4c9a7657f3d 100644 --- a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java +++ b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueDatagramChannel.java @@ -28,6 +28,7 @@ import io.netty5.channel.socket.DatagramChannel; import io.netty5.channel.socket.DatagramChannelConfig; import io.netty5.channel.socket.DatagramPacket; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.channel.unix.DatagramSocketAddress; import io.netty5.channel.unix.Errors; import io.netty5.channel.unix.IovArray; @@ -64,7 +65,11 @@ public final class KQueueDatagramChannel extends AbstractKQueueDatagramChannel i private final KQueueDatagramChannelConfig config; public KQueueDatagramChannel(EventLoop eventLoop) { - super(null, eventLoop, newSocketDgram(), false); + this(eventLoop, null); + } + + public KQueueDatagramChannel(EventLoop eventLoop, InternetProtocolFamily protocolFamily) { + super(null, eventLoop, newSocketDgram(protocolFamily), false); config = new KQueueDatagramChannelConfig(this); } diff --git a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java index 0c1e6b589c5..2aa1f32a105 100644 --- a/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java +++ b/transport-classes-kqueue/src/main/java/io/netty5/channel/kqueue/KQueueSocketChannel.java @@ -19,6 +19,7 @@ import io.netty5.channel.Channel; import io.netty5.channel.ChannelOutboundBuffer; import io.netty5.channel.EventLoop; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.channel.socket.ServerSocketChannel; import io.netty5.channel.socket.SocketChannel; import io.netty5.channel.unix.IovArray; @@ -34,7 +35,11 @@ public final class KQueueSocketChannel extends AbstractKQueueStreamChannel imple private final KQueueSocketChannelConfig config; public KQueueSocketChannel(EventLoop eventLoop) { - super(null, eventLoop, BsdSocket.newSocketStream(), false); + this(eventLoop, null); + } + + public KQueueSocketChannel(EventLoop eventLoop, InternetProtocolFamily protocol) { + super(null, eventLoop, BsdSocket.newSocketStream(protocol), false); config = new KQueueSocketChannelConfig(this); } diff --git a/transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java b/transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java index 3325f206243..1253fd15446 100644 --- a/transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java +++ b/transport-native-unix-common/src/main/java/io/netty5/channel/unix/Socket.java @@ -16,6 +16,7 @@ package io.netty5.channel.unix; import io.netty5.channel.ChannelException; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.util.CharsetUtil; import io.netty5.util.NetUtil; @@ -499,6 +500,11 @@ public static boolean isIPv6Preferred() { return isIpv6Preferred; } + public static boolean shouldUseIpv6(InternetProtocolFamily family) { + return family == null ? isIPv6Preferred() : + family == InternetProtocolFamily.IPv6; + } + private static native boolean isIPv6Preferred0(boolean ipv4Preferred); private static native boolean isIPv6(int fd); @@ -534,6 +540,10 @@ protected static int newSocketStream0() { return newSocketStream0(isIPv6Preferred()); } + protected static int newSocketStream0(InternetProtocolFamily protocol) { + return newSocketStream0(shouldUseIpv6(protocol)); + } + protected static int newSocketStream0(boolean ipv6) { int res = newSocketStreamFd(ipv6); if (res < 0) { @@ -546,6 +556,10 @@ protected static int newSocketDgram0() { return newSocketDgram0(isIPv6Preferred()); } + protected static int newSocketDgram0(InternetProtocolFamily family) { + return newSocketDgram0(shouldUseIpv6(family)); + } + protected static int newSocketDgram0(boolean ipv6) { int res = newSocketDgramFd(ipv6); if (res < 0) { diff --git a/transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java b/transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java index 93d98274bfb..b9adc172308 100644 --- a/transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java +++ b/transport/src/main/java/io/netty5/channel/socket/nio/NioServerSocketChannel.java @@ -23,6 +23,7 @@ import io.netty5.channel.ChannelOutboundBuffer; import io.netty5.channel.EventLoop; import io.netty5.channel.EventLoopGroup; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.util.internal.SocketUtils; import io.netty5.channel.nio.AbstractNioMessageChannel; import io.netty5.channel.socket.DefaultServerSocketChannelConfig; @@ -31,7 +32,10 @@ import io.netty5.util.internal.logging.InternalLoggerFactory; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.net.ProtocolFamily; import java.net.ServerSocket; import java.net.SocketAddress; import java.nio.channels.SelectionKey; @@ -53,12 +57,35 @@ public class NioServerSocketChannel extends AbstractNioMessageChannel private static final InternalLogger logger = InternalLoggerFactory.getInstance(NioServerSocketChannel.class); - private static ServerSocketChannel newSocket(SelectorProvider provider) { + private static final Method OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY; + + static { + Method found = null; + try { + found = SelectorProvider.class.getMethod( + "openServerSocketChannel", ProtocolFamily.class); + } catch (Throwable e) { + logger.info("openServerSocketChannel(ProtocolFamily) not available, will use default method", e); + } + OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY = found; + } + + private static ServerSocketChannel newSocket(SelectorProvider provider, InternetProtocolFamily family) { try { - // Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in - // {@link SelectorProvider#provider()} which is called by each ServerSocketChannel.open() otherwise. - // - // See <a href="https://github.com/netty/netty/issues/2308">#2308</a>. + /** + * Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in + * {@link SelectorProvider#provider()} which is called by each ServerSocketChannel.open() otherwise. + * + * See <a href="https://github.com/netty/netty/issues/2308">#2308</a>. + */ + if (family != null && OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY != null) { + try { + return (ServerSocketChannel) OPEN_SERVER_SOCKET_CHANNEL_WITH_FAMILY.invoke( + provider, ProtocolFamilyConverter.convert(family)); + } catch (InvocationTargetException | IllegalAccessException e) { + throw new IOException(e); + } + } return provider.openServerSocketChannel(); } catch (IOException e) { throw new ChannelException( @@ -73,14 +100,22 @@ private static ServerSocketChannel newSocket(SelectorProvider provider) { * Create a new instance */ public NioServerSocketChannel(EventLoop eventLoop, EventLoopGroup childEventLoopGroup) { - this(eventLoop, childEventLoopGroup, newSocket(DEFAULT_SELECTOR_PROVIDER)); + this(eventLoop, childEventLoopGroup, DEFAULT_SELECTOR_PROVIDER); } /** * Create a new instance using the given {@link SelectorProvider}. */ public NioServerSocketChannel(EventLoop eventLoop, EventLoopGroup childEventLoopGroup, SelectorProvider provider) { - this(eventLoop, childEventLoopGroup, newSocket(provider)); + this(eventLoop, childEventLoopGroup, provider, null); + } + + /** + * Create a new instance using the given {@link SelectorProvider} and protocol family (supported only since JDK 15). + */ + public NioServerSocketChannel(EventLoop eventLoop, EventLoopGroup childEventLoopGroup, + SelectorProvider provider, InternetProtocolFamily protocolFamily) { + this(eventLoop, childEventLoopGroup, newSocket(provider, protocolFamily)); } /** diff --git a/transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java b/transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java index a48254a28f4..c1f99967d74 100644 --- a/transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java +++ b/transport/src/main/java/io/netty5/channel/socket/nio/NioSocketChannel.java @@ -26,6 +26,7 @@ import io.netty5.channel.RecvBufferAllocator; import io.netty5.channel.nio.AbstractNioByteChannel; import io.netty5.channel.socket.DefaultSocketChannelConfig; +import io.netty5.channel.socket.InternetProtocolFamily; import io.netty5.channel.socket.ServerSocketChannel; import io.netty5.channel.socket.SocketChannelConfig; import io.netty5.util.concurrent.Future; @@ -37,7 +38,10 @@ import io.netty5.util.internal.logging.InternalLoggerFactory; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.net.ProtocolFamily; import java.net.Socket; import java.net.SocketAddress; import java.nio.ByteBuffer; @@ -56,6 +60,19 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty private static final InternalLogger logger = InternalLoggerFactory.getInstance(NioSocketChannel.class); private static final SelectorProvider DEFAULT_SELECTOR_PROVIDER = SelectorProvider.provider(); + private static final Method OPEN_SOCKET_CHANNEL_WITH_FAMILY; + + static { + Method found = null; + try { + found = SelectorProvider.class.getMethod( + "openSocketChannel", ProtocolFamily.class); + } catch (Throwable e) { + logger.warn("openSocketChannel(ProtocolFamily) not available, will use default", e); + } + OPEN_SOCKET_CHANNEL_WITH_FAMILY = found; + } + private static SocketChannel newSocket(SelectorProvider provider) { try { // Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in @@ -68,6 +85,28 @@ private static SocketChannel newSocket(SelectorProvider provider) { } } + private static SocketChannel newSocket(SelectorProvider provider, InternetProtocolFamily family) { + try { + /** + * Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in + * {@link SelectorProvider#provider()} which is called by each SocketChannel.open() otherwise. + * + * See <a href="https://github.com/netty/netty/issues/2308">#2308</a>. + */ + if (family != null && OPEN_SOCKET_CHANNEL_WITH_FAMILY != null) { + try { + return (SocketChannel) OPEN_SOCKET_CHANNEL_WITH_FAMILY.invoke( + provider, ProtocolFamilyConverter.convert(family)); + } catch (InvocationTargetException | IllegalAccessException e) { + throw new IOException(e); + } + } + return provider.openSocketChannel(); + } catch (IOException e) { + throw new ChannelException("Failed to open a socket.", e); + } + } + private final SocketChannelConfig config; /** @@ -84,6 +123,13 @@ public NioSocketChannel(EventLoop eventLoop, SelectorProvider provider) { this(eventLoop, newSocket(provider)); } + /** + * Create a new instance using the given {@link SelectorProvider} and protocol family (supported only since JDK 15). + */ + public NioSocketChannel(EventLoop eventLoop, SelectorProvider provider, InternetProtocolFamily family) { + this(eventLoop, newSocket(provider, family)); + } + /** * Create a new instance using the given {@link SocketChannel}. */
null
train
test
"2022-04-05T08:46:29"
"2022-04-03T01:03:59Z"
kvr000
val
netty/netty/11701_12313
netty/netty
netty/netty/11701
netty/netty/12313
[ "keyword_pr_to_issue" ]
f9e765e0ceddf30a5951e1f59ae6d9dece59819f
7be44fba673147ff881903bb994fbdbd112f19d5
[ "The binary compatibility baseline for our native code is the GLIBC version that's used on CentOS 6. The work-around is to use the Nio transport.\r\n\r\nIt's not clear from the error reports how this is really showing up. If it's an ABI problem, then I'm not sure what we can do to mitigate it. If we are calling glibc specific functions, then I wonder if we can somehow check what libc we are using at runtime.", ">It's not clear from the error reports how this is really showing up.\r\n\r\nI'm not sure what your question is. The error happens when Netty native is used without having glibc.\r\n\r\n>If it's an ABI problem...\r\n\r\nI don't think it is an ABI problem since glibc is missing in Alpine, there is nothing to be compatible with (and based on [this comment ](https://github.com/netty/netty/issues/6841#issuecomment-307716433), other libc implementations are out of question).\r\n\r\nI think checking if glibc is available or not and/or fixing `Epoll.isAvailable()` would fix the issue (it seems it returns true even if glibc is missing).", "I put together a little test for this: https://github.com/jonatan-ivanov/netty-gh-11701\r\n\r\nI called `Epoll.isAvailable()` and `Epoll.ensureAvailability()` on different distros to see if they return false/fail in case glibc is missing. It seems `isAvailable` does not return false and `ensureAvailability` does not fail even if glibc is missing.\r\n\r\nImage | `isAvailable` | `ensureAvailability` | Comment\r\n------ | ------------------- | --------------------------- | -------------\r\nbellsoft_liberica-openjre-debian:17 | `true` | `OK` | works\r\nbellsoft_liberica-openjre-alpine:17 | `true` | `OK` | works since it contains glibc\r\n**bellsoft_liberica-openjre-alpine-musl:17** | `true` | `OK` | **does not work as expected**\r\nazul_zulu-openjdk:17 | `true` | `OK` | works as expected\r\n**azul_zulu-openjdk-alpine:17-jre** | `true` | `OK` | **does not work as expected**\r\n\r\n", "There's no way to detect the libc vendor from within a process, as far as I can see. And calling out to `lld`, or something like that, is quite awkward and can be a security liability. I think the best we can do for the moment is to document that only the Nio transport is supported on musl.\r\n\r\nWhat do you think, @normanmaurer ?", "I agree with @chrisvest here ", "I'm not very familiar with this domain but do you think is it at least possible to throw an exception instead of crashing a JVM if such a thing occur?\r\n\r\nAlso can you use the `__GLIBC__ ` from [`features.h`](https://sourceware.org/git/?p=glibc.git;a=blob;f=include/features.h;h=d974eabfafc24ffb9ff4cd066b650134df32048b;hb=refs/heads/master#l477) to detect if you are dealing with glibc or not or is this only possible compile time?\r\n", "@jonatan-ivanov That's only at compile time. At runtime you can feature detect by inspecting linked symbols, except you cant do that without calling into libc which in this case would cause a crash. Alternatively we shell out to a program to inspect libc, but that is also bad for multiple reasons, and we don't want to do that.", "I have updated the documentation on the native transports to note that musl is not officially supported: https://github.com/netty/netty/wiki/Native-transports\r\n\r\nI think that's the best we can do for now.", "musl generally tries to support glibc-linked binaries invoking POSIX and some BSD extensions. errors related to GNU-specific extensions (and glibc header bastardizations) should be caught at load time, as in #6841 \"__strndup: symbol not found\". that particular issue can be fixed by installing gcompat package in alpine linux. segfaults are, as you say, caused by some ABI incompatibility.\r\n\r\ni briefly glanced through netty c code and found some suspicious items: https://github.com/netty/netty/blob/6b3ec62e6e4e19a85304aecd8cb5331a3fcc70be/transport-native-unix-common/src/main/c/netty_unix_errors.c#L41 is suspicious: application code should never check feature test macros, because it should be defining it themselves. https://github.com/netty/netty/blob/51ebcbd9aba7eac854716e45f07765744f8dcdaf/transport-native-epoll/src/main/c/netty_epoll_native.c#L140 is suspicious: sysctl values should always be accessed using `read`/`write`, never stdio. some sysctls will not be read correctly if using stdio. neither of these should cause segfault at run time though.", "@Hello71 The netty_unit_errors code is because we wish to use a thread-safe strerror variant, which is not immediately portable. (I'm not familiar with the other one you point out)\r\nIf you know of a way to improve it, we'd love to review a PR!", "Fix reverted because it negatively impacts musl-based systems that also use the glibc compatibility layer.", "@chrisvest Would you consider reopening this issue so that we can track another fix (if any)?", "reopened ", "fwiw if you want to do this in the dumbest possible way, you can check for `gnu_get_libc_version`. if your program is segfaulting and not just failing with symbol not found, then most likely there is some more serious issue with your program, not related to musl.", "@Hello71 The JVM itself might rely on signals and tie them to internal semantics, i.e. rare NullPointerExceptions sometimes rely on segfaults to optimise a non-null fast path. So I wouldn't feel very confident trying to trap signals in JNI library code.", "I think this would be fixed by https://github.com/netty/netty/pull/12272 ... " ]
[]
"2022-04-19T15:28:46Z"
[]
Throwing an exception in case glibc is missing instead of segfaulting the JVM
It seems Netty's native support depends on glibc (btw is this documented?). When glibc is missing (for example using vanilla Alpine), the JVM could end up with a crash (`SIGSEGV`), see details in this issue: https://github.com/micrometer-metrics/micrometer/issues/2776 It also seems that `Epoll.isAvailable()` returns `true` even if glibc is missing. ### Expected behavior Throwing an exception ### Actual behavior JVM crash ### Steps to reproduce Using Netty native with an environment that does not have glibc, e.g.: vanilla Alpine ### Minimal yet complete reproducer code (or URL to code) This seems to be a known issue, please let me know if you really need a reproducer. ### Netty version `4.1.68` (latest) ### JVM version (e.g. `java -version`) Latest 11: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'java --version' openjdk 11.0.12 2021-07-20 LTS OpenJDK Runtime Environment Zulu11.50+19-CA (build 11.0.12+7-LTS) OpenJDK 64-Bit Server VM Zulu11.50+19-CA (build 11.0.12+7-LTS, mixed mode) ``` Or also latest 17: ```bash ❯ docker run --rm 'bellsoft/liberica-openjdk-alpine-musl:17' 'sh' '-c' 'java --version' openjdk 17 2021-09-14 LTS OpenJDK Runtime Environment (build 17+35-LTS) OpenJDK 64-Bit Server VM (build 17+35-LTS, mixed mode) ``` I assume this is an issue in every JRE that is currently supported. ### OS version (e.g. `uname -a`) Both images above have the same output: ```bash ❯ docker run --rm 'azul/zulu-openjdk-alpine:11-jre' 'sh' '-c' 'uname -a' Linux 1ee8d1090f14 5.10.47-linuxkit #1 SMP Sat Jul 3 21:51:47 UTC 2021 x86_64 Linux ``` I assume this is an issue in every OS where glibc is missing.
[ "transport-native-epoll/pom.xml" ]
[ "transport-native-epoll/pom.xml" ]
[]
diff --git a/transport-native-epoll/pom.xml b/transport-native-epoll/pom.xml index d2768a4d3d3..cad13b4b77b 100644 --- a/transport-native-epoll/pom.xml +++ b/transport-native-epoll/pom.xml @@ -34,8 +34,9 @@ <unix.common.lib.dir>${project.build.directory}/unix-common-lib</unix.common.lib.dir> <unix.common.lib.unpacked.dir>${unix.common.lib.dir}/META-INF/native/lib</unix.common.lib.unpacked.dir> <unix.common.include.unpacked.dir>${unix.common.lib.dir}/META-INF/native/include</unix.common.include.unpacked.dir> - <jni.compiler.args.cflags>CFLAGS=-O3 -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden -I${unix.common.include.unpacked.dir}</jni.compiler.args.cflags> - <jni.compiler.args.ldflags>LDFLAGS=-L${unix.common.lib.unpacked.dir} -Wl,--no-as-needed -lrt -ldl -Wl,--whole-archive -l${unix.common.lib.name} -Wl,--no-whole-archive</jni.compiler.args.ldflags> + <jni.compiler.args.cflags>CFLAGS=-O2 -pipe -Werror -fno-omit-frame-pointer -Wunused-variable -fvisibility=hidden -D_FORTIFY_SOURCE=2 -ffunction-sections -fdata-sections -I${unix.common.include.unpacked.dir}</jni.compiler.args.cflags> + <jni.compiler.args.ldflags>LDFLAGS=-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -Wl,--gc-sections -L${unix.common.lib.unpacked.dir}</jni.compiler.args.ldflags> + <jni.compiler.args.libs>LIBS=-Wl,--whole-archive -l${unix.common.lib.name} -Wl,--no-whole-archive -ldl</jni.compiler.args.libs> <nativeSourceDirectory>${project.basedir}/src/main/c</nativeSourceDirectory> <skipTests>true</skipTests> <japicmp.skip>true</japicmp.skip> @@ -157,6 +158,7 @@ <platform>.</platform> <configureArgs> <arg>${jni.compiler.args.ldflags}</arg> + <arg>${jni.compiler.args.libs}</arg> <arg>${jni.compiler.args.cflags}</arg> <configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg> </configureArgs> @@ -306,6 +308,7 @@ <platform>.</platform> <configureArgs> <arg>${jni.compiler.args.ldflags}</arg> + <arg>${jni.compiler.args.libs}</arg> <arg>${jni.compiler.args.cflags}</arg> <configureArg>--libdir=${project.build.directory}/native-build/target/lib</configureArg> <configureArg>--host=aarch64-linux-gnu</configureArg>
null
train
test
"2022-04-14T17:12:42"
"2021-09-22T02:15:46Z"
jonatan-ivanov
val
netty/netty/12351_12358
netty/netty
netty/netty/12351
netty/netty/12358
[ "keyword_pr_to_issue" ]
8e8e9cc7b4e05b5bfd81a2aeba23b4e550466494
eb005907caab01256c1d8baf6d96b06beae95be4
[ "The test passes if I comment out `ShadingIT.testShadingNativeTransport()`, but why? Why does `testShadingTcnative()` pass? I can see the shaded native libraries are all there for my architecture (osx_aarch_64):\r\n\r\n```\r\n❯ find target/classes-shaded/ | grep META-INF/native/\r\ntarget/classes-shaded/META-INF/native/libnetty_tcnative_linux_x86_64.so\r\ntarget/classes-shaded/META-INF/native/netty_tcnative_windows_x86_64.dll\r\ntarget/classes-shaded/META-INF/native/libshaded_netty_tcnative_osx_aarch_64.jnilib\r\ntarget/classes-shaded/META-INF/native/libnetty_tcnative_linux_aarch_64.so\r\ntarget/classes-shaded/META-INF/native/libshaded2_netty_transport_native_kqueue_aarch_64.jnilib\r\ntarget/classes-shaded/META-INF/native/libshaded2_netty_tcnative_osx_aarch_64.jnilib\r\ntarget/classes-shaded/META-INF/native/libshaded_netty_transport_native_kqueue_aarch_64.jnilib\r\ntarget/classes-shaded/META-INF/native/libnetty_tcnative_osx_x86_64.jnilib\r\n```", "~One major difference I found is that `lib{shaded,shaded2}_netty_transport_native_kqueue_aarch_64.jnilib` doesn't have `_osx_` before `aarch_64`. Could this be why?~ Looks like `...kqueue.Native.loadNativeLibrary()` doesn't add OS to the library name, so this is fine.", "I ran the above build commands on my Ubuntu ARM64 VM and saw the same failure:\r\n```\r\n[ERROR] Tests run: 2, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 0.417 s <<< FAILURE! - in io.netty.testsuite.shading.ShadingIT\r\n[ERROR] testShadingNativeTransport Time elapsed: 0.171 s <<< ERROR!\r\njava.lang.UnsatisfiedLinkError: failed to load the required native library\r\n\tat io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:54)\r\n\tat io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37)\r\nCaused by: java.lang.NoClassDefFoundError: shaded2/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods (wrong name: shaded2//io/netty/channel/epoll/NativeStaticallyReferencedJniMethods)\r\n\tat io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:52)\r\n\tat io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37)\r\n```\r\nLet me change the subject of this issue because it seems to affect both macOS and Linux.", "Will check ", "Thanks for looking, @normanmaurer. It looks like there's a bug in `parsePackagePrefix()` - it appends an extra `/` at the end of the package prefix. Let me dig a little bit more and get back.", "OK. I have a fix. Let me clean it up and send out a PR tomorrow. I'll also implement the rule documented [here](https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/design.html#resolving_native_method_names), so it handles a peculiar package name correctly, such as one with `_` or a non-ASCII character." ]
[]
"2022-05-04T09:39:11Z"
[ "ci" ]
`ShadingIT` fails on macOS and Linux
### Expected behavior The following commands should succeed: ``` $ ./mvnw install -DskipTests $ cd testsuite-shading $ ../mvnw clean integration-test failsafe:verify ``` ### Actual behavior The above command fails with the following error: ``` [ERROR] Tests run: 2, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 1.19 s <<< FAILURE! - in io.netty.testsuite.shading.ShadingIT [ERROR] testShadingNativeTransport Time elapsed: 0.374 s <<< ERROR! java.lang.UnsatisfiedLinkError: failed to load the required native library at io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:54) at io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37) Caused by: java.lang.NoClassDefFoundError: shaded2/io/netty/channel/kqueue/KQueueStaticallyReferencedJniMethods (wrong name: shaded2//io/netty/channel/kqueue/KQueueStaticallyReferencedJniMethods) at io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:52) at io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37) ``` <details> <summary>The complete test output:</summary> ``` 22:01:56.060 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using SLF4J as the default logging framework 22:01:56.062 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false 22:01:56.062 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 11 22:01:56.063 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available 22:01:56.063 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available 22:01:56.063 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.storeFence: available 22:01:56.063 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available 22:01:56.063 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled 22:01:56.064 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true 22:01:56.064 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$7 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @75e32c4c 22:01:56.065 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.<init>(long, int): unavailable 22:01:56.065 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available 22:01:56.071 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 8589934592 bytes (maybe) 22:01:56.071 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/90/dr7vn6w14vzdt65wm4tfswwc0000gp/T (java.io.tmpdir) 22:01:56.071 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) 22:01:56.072 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS 22:01:56.072 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes 22:01:56.072 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 22:01:56.073 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available 22:01:56.073 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false 22:01:56.076 [main] DEBUG shaded2.io.netty.util.internal.logging.InternalLoggerFactory - Using SLF4J as the default logging framework 22:01:56.090 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false 22:01:56.090 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - Java version: 11 22:01:56.090 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available 22:01:56.090 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available 22:01:56.091 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.storeFence: available 22:01:56.091 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available 22:01:56.091 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled 22:01:56.091 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class shaded2.io.netty.util.internal.PlatformDependent0$7 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @75e32c4c 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.<init>(long, int): unavailable 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - maxDirectMemory: 8589934592 bytes (maybe) 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/90/dr7vn6w14vzdt65wm4tfswwc0000gp/T (java.io.tmpdir) 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) 22:01:56.092 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - Platform: MacOS 22:01:56.093 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes 22:01:56.093 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 22:01:56.093 [main] DEBUG shaded2.io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available 22:01:56.093 [main] DEBUG shaded2.io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false 22:01:56.095 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - -Dio.netty.native.workdir: /var/folders/90/dr7vn6w14vzdt65wm4tfswwc0000gp/T (io.netty.tmpdir) 22:01:56.095 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - -Dio.netty.native.deleteLibAfterLoading: true 22:01:56.095 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - -Dio.netty.native.tryPatchShadedId: true 22:01:56.095 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - -Dio.netty.native.detectNativeLibraryDuplicates: true 22:01:56.110 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - Execution of 'install_name_tool -id Zu9wWlVpYUcosy50wS6A1VvqOI5ddgPAT8eIVm /var/folders/90/dr7vn6w14vzdt65wm4tfswwc0000gp/T/libshaded2_netty_transport_native_kqueue_aarch_641290578488165430543.dylib' succeed: 0 22:01:56.149 [main] DEBUG shaded2.io.netty.util.internal.NativeLibraryLoader - Execution of 'codesign -s - /var/folders/90/dr7vn6w14vzdt65wm4tfswwc0000gp/T/libshaded2_netty_transport_native_kqueue_aarch_641290578488165430543.dylib' succeed: 0 [ERROR] Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 0.392 s <<< FAILURE! - in io.netty.testsuite.shading.ShadingIT [ERROR] testShadingNativeTransport Time elapsed: 0.385 s <<< ERROR! java.lang.UnsatisfiedLinkError: failed to load the required native library at io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:54) at io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37) Caused by: java.lang.NoClassDefFoundError: shaded2/io/netty/channel/kqueue/KQueueStaticallyReferencedJniMethods (wrong name: shaded2//io/netty/channel/kqueue/KQueueStaticallyReferencedJniMethods) at io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:52) at io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37) ``` </details> I see the similar failure for epoll on Linux ARM64 as well: ``` ... Caused by: java.lang.NoClassDefFoundError: shaded2/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods (wrong name: shaded2//io/netty/channel/epoll/NativeStaticallyReferencedJniMethods) at io.netty.testsuite.shading.ShadingIT.testShading0(ShadingIT.java:52) at io.netty.testsuite.shading.ShadingIT.testShadingNativeTransport(ShadingIT.java:37) ``` What's the current state of `testsuite-shading`? It is running as a part of the CI process? ### Netty version Branch `4.1` f901ea2e712d9d16e199aaeb7b622e36944eb8dd ### JVM version (e.g. `java -version`) macOS: ``` ❯ java -version openjdk version "11.0.14" 2022-01-18 LTS OpenJDK Runtime Environment Zulu11.54+23-CA (build 11.0.14+9-LTS) OpenJDK 64-Bit Server VM Zulu11.54+23-CA (build 11.0.14+9-LTS, mixed mode) ``` Linux: ``` ❯ java -version openjdk version "11.0.8" 2020-07-14 LTS OpenJDK Runtime Environment Zulu11.41+75-CA (build 11.0.8+10-LTS) OpenJDK 64-Bit Server VM Zulu11.41+75-CA (build 11.0.8+10-LTS, mixed mode) ``` ### OS version (e.g. `uname -a`) macOS: ``` ❯ uname -a Darwin ... 21.4.0 Darwin Kernel Version 21.4.0: Mon Feb 21 20:35:58 PST 2022; root:xnu-8020.101.4~2/RELEASE_ARM64_T6000 arm64 ``` Linux: ``` ❯ uname -a Linux ... 5.15.0-27-generic #28-Ubuntu SMP Thu Apr 14 12:56:31 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux ```
[ "common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java", "docker/docker-compose.yaml", "pom.xml" ]
[ "common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java", "docker/docker-compose.yaml", "pom.xml" ]
[ "testsuite-shading/pom.xml" ]
diff --git a/common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java b/common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java index 795ac28006a..826cc38d9b9 100644 --- a/common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java +++ b/common/src/main/java/io/netty/util/internal/NativeLibraryLoader.java @@ -117,11 +117,25 @@ public static void loadFirstAvailable(ClassLoader loader, String... names) { } /** - * The shading prefix added to this class's full name. + * Calculates the mangled shading prefix added to this class's full name. + * + * <p>This method mangles the package name as follows, so we can unmangle it back later: + * <ul> + * <li>{@code _} to {@code _1}</li> + * <li>{@code .} to {@code _}</li> + * </ul> + * + * <p>Note that we don't mangle non-ASCII characters here because it's extremely unlikely to have + * a non-ASCII character in a package name. For more information, see: + * <ul> + * <li><a href="https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/design.html">JNI + * specification</a></li> + * <li>{@code parsePackagePrefix()} in {@code netty_jni_util.c}.</li> + * </ul> * * @throws UnsatisfiedLinkError if the shader used something other than a prefix */ - private static String calculatePackagePrefix() { + private static String calculateMangledPackagePrefix() { String maybeShaded = NativeLibraryLoader.class.getName(); // Use ! instead of . to avoid shading utilities from modifying the string String expected = "io!netty!util!internal!NativeLibraryLoader".replace('!', '.'); @@ -130,16 +144,17 @@ private static String calculatePackagePrefix() { "Could not find prefix added to %s to get %s. When shading, only adding a " + "package prefix is supported", expected, maybeShaded)); } - return maybeShaded.substring(0, maybeShaded.length() - expected.length()); + return maybeShaded.substring(0, maybeShaded.length() - expected.length()) + .replace("_", "_1") + .replace('.', '_'); } /** * Load the given library with the specified {@link ClassLoader} */ public static void load(String originalName, ClassLoader loader) { - // Adjust expected name to support shading of native libraries. - String packagePrefix = calculatePackagePrefix().replace('.', '_'); - String name = packagePrefix + originalName; + String mangledPackagePrefix = calculateMangledPackagePrefix(); + String name = mangledPackagePrefix + originalName; List<Throwable> suppressed = new ArrayList<Throwable>(); try { // first try to load from java.library.path @@ -189,7 +204,7 @@ public static void load(String originalName, ClassLoader loader) { } out.flush(); - if (shouldShadedLibraryIdBePatched(packagePrefix)) { + if (shouldShadedLibraryIdBePatched(mangledPackagePrefix)) { // Let's try to patch the id and re-sign it. This is a best-effort and might fail if a // SecurityManager is setup or the right executables are not installed :/ tryPatchShadedLibraryIdAndSign(tmpFile, originalName); diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index bf759fd3aff..ed134f51759 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -29,7 +29,11 @@ services: build: <<: *common - command: /bin/bash -cl "./mvnw -B -ntp clean install -Dio.netty.testsuite.badHost=netty.io -Dtcnative.classifier=linux-x86_64-fedora" + command: '/bin/bash -cl " + ./mvnw -B -ntp clean install -Dio.netty.testsuite.badHost=netty.io -Dtcnative.classifier=linux-x86_64-fedora && + cd testsuite-shading && + ../mvnw -B -ntp integration-test failsafe:verify -Dtcnative.classifier=linux-x86_64-fedora + "' deploy: <<: *common diff --git a/pom.xml b/pom.xml index 6453ed0545b..a27d57fbbcd 100644 --- a/pom.xml +++ b/pom.xml @@ -621,7 +621,7 @@ <dependency> <groupId>io.netty</groupId> <artifactId>netty-jni-util</artifactId> - <version>0.0.5.Final</version> + <version>0.0.6.Final</version> <classifier>sources</classifier> <optional>true</optional> </dependency>
diff --git a/testsuite-shading/pom.xml b/testsuite-shading/pom.xml index e3582786560..65a650263c7 100644 --- a/testsuite-shading/pom.xml +++ b/testsuite-shading/pom.xml @@ -34,7 +34,9 @@ <classesShadedDir>${project.build.directory}/classes-shaded</classesShadedDir> <classesShadedNativeDir>${classesShadedDir}/META-INF/native</classesShadedNativeDir> <shadingPrefix>shaded</shadingPrefix> - <shadingPrefix2>shaded2</shadingPrefix2> + <shadingPrefix2>shaded_2</shadingPrefix2> + <mangledShadingPrefix>shaded</mangledShadingPrefix> + <mangledShadingPrefix2>shaded_12</mangledShadingPrefix2> <skipShadingTestsuite>true</skipShadingTestsuite> <shadedPackagePrefix>io.netty.</shadedPackagePrefix> <japicmp.skip>true</japicmp.skip> @@ -126,12 +128,12 @@ <include name="shaded2.jar" /> </fileset> </unzip> - <copy file="${classesShadedNativeDir}/lib${nativeTransportLib}" tofile="${classesShadedNativeDir}/lib${shadingPrefix}_${nativeTransportLib}" /> - <copy file="${classesShadedNativeDir}/lib${nativeTransportLib}" tofile="${classesShadedNativeDir}/lib${shadingPrefix2}_${nativeTransportLib}" /> + <copy file="${classesShadedNativeDir}/lib${nativeTransportLib}" tofile="${classesShadedNativeDir}/lib${mangledShadingPrefix}_${nativeTransportLib}" /> + <copy file="${classesShadedNativeDir}/lib${nativeTransportLib}" tofile="${classesShadedNativeDir}/lib${mangledShadingPrefix2}_${nativeTransportLib}" /> <delete file="${classesShadedNativeDir}/lib${nativeTransportLib}" /> - <copy file="${classesShadedNativeDir}/lib${nativeTcnativeLib}" tofile="${classesShadedNativeDir}/lib${shadingPrefix}_${nativeTcnativeLib}" /> - <copy file="${classesShadedNativeDir}/lib${nativeTcnativeLib}" tofile="${classesShadedNativeDir}/lib${shadingPrefix2}_${nativeTcnativeLib}" /> + <copy file="${classesShadedNativeDir}/lib${nativeTcnativeLib}" tofile="${classesShadedNativeDir}/lib${mangledShadingPrefix}_${nativeTcnativeLib}" /> + <copy file="${classesShadedNativeDir}/lib${nativeTcnativeLib}" tofile="${classesShadedNativeDir}/lib${mangledShadingPrefix2}_${nativeTcnativeLib}" /> <delete file="${classesShadedNativeDir}/lib${nativeTcnativeLib}" /> <delete file="${project.build.directory}/shaded1.jar" />
train
test
"2022-05-04T08:53:08"
"2022-05-02T12:48:31Z"
trustin
val
netty/netty/12411_12414
netty/netty
netty/netty/12411
netty/netty/12414
[ "keyword_pr_to_issue" ]
b61d7d40f40e3a797e8a60cd567f849a9799c771
cb35e4b13485c674ea6118e9ecf99fba913a4804
[ "This is only during the class initialisation. If you want to get past an automated check with blockhound, you'll have to move the `NetUtil` initialisation forward, and out of any event loop.", "> This is only during the class initialisation. If you want to get past an automated check with blockhound, you'll have to move the `NetUtil` initialisation forward, and out of any event loop.\r\n\r\nHow could i do that? Can you please share code line?", "Read any of the public static fields in `NetUtil` before your event loop starts.", "> Read any of the public static fields in `NetUtil` before your event loop starts.\r\n\r\nI am sorry but i dont understand", "How are you running Blockhound, though? We have a [Blockhound integration](https://github.com/netty/netty/blob/4.1/common/src/main/java/io/netty/util/internal/Hidden.java#L39) that should tell it to allow those calls.\r\nThat should be [service loaded](https://github.com/netty/netty/blob/4.1/common/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration).", "> How are you running Blockhound, though? We have a [Blockhound integration](https://github.com/netty/netty/blob/4.1/common/src/main/java/io/netty/util/internal/Hidden.java#L39) that should tell it to allow those calls. That should be [service loaded](https://github.com/netty/netty/blob/4.1/common/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration).\r\n\r\nI guess there is a typo line 138 \r\nit says NetUil instead of NetUtil", "A-ha! Well spotted. I'll put up a PR soon with the fix." ]
[]
"2022-05-18T23:29:01Z"
[]
BlockHound detects blocking call with webflux webclient for simple http calling
I am on spring boot 2.6.7 Calling ``` webClient .get() .uri("/me", queryString) .exchangeToMono(clientResponse -> { if (clientResponse.statusCode().equals(HttpStatus.OK)) { return clientResponse.bodyToMono(FacebookAuthResponse.class); } return Mono.empty(); }) .filter(facebookAuthResponse -> StringUtils.hasLength(facebookAuthResponse.getId()) && StringUtils.hasLength(facebookAuthResponse.getName()) && StringUtils.hasLength(facebookAuthResponse.getEmail()) ) .switchIfEmpty(Mono.error(new Exception())) .onErrorResume((ex) -> {ex.printStackTrace(); return Mono.empty();}) ; ``` ``` Caused by: reactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes at java.base/java.io.FileInputStream.readBytes(FileInputStream.java) at java.base/java.io.FileInputStream.read(FileInputStream.java:276) at java.base/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:270) at java.base/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:313) at java.base/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:188) at java.base/java.io.InputStreamReader.read(InputStreamReader.java:177) at java.base/java.io.BufferedReader.fill(BufferedReader.java:162) at java.base/java.io.BufferedReader.readLine(BufferedReader.java:329) at java.base/java.io.BufferedReader.readLine(BufferedReader.java:396) at io.netty.util.NetUtil$SoMaxConnAction.run(NetUtil.java:171) at io.netty.util.NetUtil$SoMaxConnAction.run(NetUtil.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:318) at io.netty.util.NetUtil.<clinit>(NetUtil.java:152) at reactor.netty.transport.AddressUtils.attemptParsingIpString(AddressUtils.java:238) at reactor.netty.transport.AddressUtils.createForIpString(AddressUtils.java:259) at reactor.netty.transport.AddressUtils.createInetSocketAddress(AddressUtils.java:56) at reactor.netty.transport.AddressUtils.createUnresolved(AddressUtils.java:86) at reactor.netty.http.client.UriEndpointFactory.lambda$createUriEndpoint$0(UriEndpointFactory.java:64) at reactor.netty.http.client.UriEndpoint.toExternalForm(UriEndpoint.java:63) at reactor.netty.http.client.HttpClientConnect$HttpClientHandler.<init>(HttpClientConnect.java:510) at reactor.netty.http.client.HttpClientConnect$MonoHttpConnect.subscribe(HttpClientConnect.java:208) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157) at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816) at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:249) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398) at reactor.core.publisher.MonoFlatMap$FlatMapInner.onSubscribe(MonoFlatMap.java:238) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55) at reactor.core.publisher.MonoDeferContextual.subscribe(MonoDeferContextual.java:55) at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157) at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74) at reactor.core.publisher.FluxFilter$FilterSubscriber.onNext(FluxFilter.java:113) at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onNext(FluxOnErrorResume.java:79) at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onNext(FluxOnErrorResume.java:79) at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816) at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:151) at reactor.core.publisher.FluxContextWrite$ContextWriteSubscriber.onNext(FluxContextWrite.java:107) at reactor.core.publisher.FluxMapFuseable$MapFuseableConditionalSubscriber.onNext(FluxMapFuseable.java:295) at reactor.core.publisher.FluxFilterFuseable$FilterFuseableConditionalSubscriber.onNext(FluxFilterFuseable.java:337) at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816) at reactor.core.publisher.MonoCollect$CollectSubscriber.onComplete(MonoCollect.java:159) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277) at reactor.core.publisher.FluxContextWrite$ContextWriteSubscriber.onComplete(FluxContextWrite.java:126) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2400) at reactor.core.publisher.FluxContextWrite$ContextWriteSubscriber.request(FluxContextWrite.java:136) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144) at reactor.core.publisher.MonoCollect$CollectSubscriber.onSubscribe(MonoCollect.java:103) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178) at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178) at reactor.core.publisher.FluxContextWrite$ContextWriteSubscriber.onSubscribe(FluxContextWrite.java:101) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:200) at reactor.core.publisher.MonoFlatMap.subscribeOrReturn(MonoFlatMap.java:53) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:57) at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157) at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74) at reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82) at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:282) at reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:863) at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:120) at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:200) at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:200) at reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82) at reactor.core.publisher.FluxConcatArray$ConcatArraySubscriber.onNext(FluxConcatArray.java:201) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398) at reactor.core.publisher.FluxConcatArray$ConcatArraySubscriber.onSubscribe(FluxConcatArray.java:193) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.FluxConcatArray$ConcatArraySubscriber.onComplete(FluxConcatArray.java:258) at reactor.core.publisher.FluxConcatArray.subscribe(FluxConcatArray.java:78) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:451) at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onSubscribe(FluxConcatMap.java:219) at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165) at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:263) at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82) at reactor.core.publisher.MonoNext$NextSubscriber.onComplete(MonoNext.java:102) at reactor.core.publisher.FluxFilterWhen$FluxFilterWhenSubscriber.drain(FluxFilterWhen.java:236) at reactor.core.publisher.FluxFilterWhen$FluxFilterWhenSubscriber.onComplete(FluxFilterWhen.java:153) at reactor.core.publisher.FluxIterable$IterableSubscription.slowPath(FluxIterable.java:294) at reactor.core.publisher.FluxIterable$IterableSubscription.request(FluxIterable.java:230) at reactor.core.publisher.FluxFilterWhen$FluxFilterWhenSubscriber.onSubscribe(FluxFilterWhen.java:200) at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165) at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87) at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:263) at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.Mono.subscribeWith(Mono.java:4515) at reactor.core.publisher.Mono.subscribe(Mono.java:4371) at reactor.core.publisher.Mono.subscribe(Mono.java:4307) at org.springframework.test.web.reactive.server.HttpHandlerConnector.lambda$doConnect$2(HttpHandlerConnector.java:98) at org.springframework.mock.http.client.reactive.MockClientHttpRequest.lambda$null$2(MockClientHttpRequest.java:124) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:44) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.FluxConcatIterable$ConcatIterableSubscriber.onComplete(FluxConcatIterable.java:147) at reactor.core.publisher.FluxConcatIterable.subscribe(FluxConcatIterable.java:60) at reactor.core.publisher.MonoFromFluxOperator.subscribe(MonoFromFluxOperator.java:81) at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157) at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74) at reactor.core.publisher.Operators$MonoInnerProducerBase.complete(Operators.java:2664) at reactor.core.publisher.MonoSingle$SingleSubscriber.onComplete(MonoSingle.java:180) at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onComplete(FluxMapFuseable.java:150) at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2400) at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.request(FluxMapFuseable.java:169) at reactor.core.publisher.MonoSingle$SingleSubscriber.doOnRequest(MonoSingle.java:103) at reactor.core.publisher.Operators$MonoInnerProducerBase.request(Operators.java:2731) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:2194) at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:2068) at reactor.core.publisher.MonoSingle$SingleSubscriber.onSubscribe(MonoSingle.java:115) at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onSubscribe(FluxMapFuseable.java:96) at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.Mono.subscribeWith(Mono.java:4515) at reactor.core.publisher.Mono.subscribe(Mono.java:4371) at reactor.core.publisher.Mono.subscribe(Mono.java:4307) at org.springframework.test.web.reactive.server.HttpHandlerConnector.doConnect(HttpHandlerConnector.java:112) at org.springframework.test.web.reactive.server.HttpHandlerConnector.lambda$connect$0(HttpHandlerConnector.java:79) at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:44) at reactor.core.publisher.Mono.subscribe(Mono.java:4400) at reactor.core.publisher.MonoSubscribeOn$SubscribeOnSubscriber.run(MonoSubscribeOn.java:126) at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:84) at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:37) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base/java.lang.Thread.run(Thread.java:833) ```
[ "common/src/main/java/io/netty/util/internal/Hidden.java" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index c233acdb2c4..78c45f715db 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -135,7 +135,7 @@ public void applyTo(BlockHound.Builder builder) { "parse"); builder.allowBlockingCallsInside( - "io.netty.util.NetUil$SoMaxConnAction", + "io.netty.util.NetUtil$SoMaxConnAction", "run"); builder.nonBlockingThreadPredicate(new Function<Predicate<Thread>, Predicate<Thread>>() {
null
train
test
"2022-05-17T12:24:48"
"2022-05-18T20:52:43Z"
ilyas2016
val
netty/netty/12425_12435
netty/netty
netty/netty/12425
netty/netty/12435
[ "keyword_pr_to_issue" ]
871dd2e5730a477738a5a9b9af19be35244137d0
56f7c50d19125dad36b97ed33d9797da941a7591
[ "What does blockhound log? We'll need the stack trace for that blocking call.", "```\r\nreactor.blockhound.BlockingOperationError: Blocking call! java.io.FileInputStream#readBytes\r\n\r\n0 = {StackTraceElement@17697} \"java.base/java.io.FileInputStream.readBytes(FileInputStream.java)\"\r\n1 = {StackTraceElement@17698} \"java.base/java.io.FileInputStream.read(FileInputStream.java:276)\"\r\n2 = {StackTraceElement@17699} \"java.base/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)\"\r\n3 = {StackTraceElement@17700} \"java.base/java.io.BufferedInputStream.read(BufferedInputStream.java:263)\"\r\n4 = {StackTraceElement@17701} \"java.base/sun.security.util.DerValue.<init>(DerValue.java:412)\"\r\n5 = {StackTraceElement@17702} \"java.base/sun.security.util.DerValue.<init>(DerValue.java:459)\"\r\n6 = {StackTraceElement@17703} \"java.base/sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:2012)\"\r\n7 = {StackTraceElement@17704} \"java.base/sun.security.util.KeyStoreDelegator.engineLoad(KeyStoreDelegator.java:221)\"\r\n8 = {StackTraceElement@17705} \"java.base/java.security.KeyStore.load(KeyStore.java:1473)\"\r\n9 = {StackTraceElement@17706} \"java.base/sun.security.ssl.TrustStoreManager$TrustAnchorManager.loadKeyStore(TrustStoreManager.java:390)\"\r\n10 = {StackTraceElement@17707} \"java.base/sun.security.ssl.TrustStoreManager$TrustAnchorManager.getTrustedCerts(TrustStoreManager.java:336)\"\r\n11 = {StackTraceElement@17708} \"java.base/sun.security.ssl.TrustStoreManager.getTrustedCerts(TrustStoreManager.java:57)\"\r\n12 = {StackTraceElement@17709} \"java.base/sun.security.ssl.TrustManagerFactoryImpl.engineInit(TrustManagerFactoryImpl.java:49)\"\r\n13 = {StackTraceElement@17710} \"java.base/javax.net.ssl.TrustManagerFactory.init(TrustManagerFactory.java:282)\"\r\n14 = {StackTraceElement@17711} \"java.base/sun.security.ssl.SSLContextImpl.engineInit(SSLContextImpl.java:94)\"\r\n15 = {StackTraceElement@17712} \"java.base/javax.net.ssl.SSLContext.init(SSLContext.java:314)\"\r\n16 = {StackTraceElement@17713} \"io.netty.handler.ssl.JdkSslContext.<clinit>(JdkSslContext.java:75)\"\r\n17 = {StackTraceElement@17714} \"io.netty.handler.ssl.SslContext.newClientContextInternal(SslContext.java:824)\"\r\n18 = {StackTraceElement@17715} \"io.netty.handler.ssl.SslContextBuilder.build(SslContextBuilder.java:611)\"\r\n19 = {StackTraceElement@17716} \"reactor.netty.tcp.SslProvider.<init>(SslProvider.java:350)\"\r\n20 = {StackTraceElement@17717} \"reactor.netty.tcp.SslProvider$Build.build(SslProvider.java:734)\"\r\n21 = {StackTraceElement@17718} \"dev.miku.r2dbc.mysql.client.SslBridgeHandler.buildProvider(SslBridgeHandler.java:161)\"\r\n22 = {StackTraceElement@17719} \"dev.miku.r2dbc.mysql.client.SslBridgeHandler.handleSslState(SslBridgeHandler.java:135)\"\r\n23 = {StackTraceElement@17720} \"dev.miku.r2dbc.mysql.client.SslBridgeHandler.userEventTriggered(SslBridgeHandler.java:86)\"\r\n24 = {StackTraceElement@17721} \"io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:346)\"\r\n25 = {StackTraceElement@17722} \"io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:332)\"\r\n26 = {StackTraceElement@17723} \"io.netty.channel.AbstractChannelHandlerContext.fireUserEventTriggered(AbstractChannelHandlerContext.java:324)\"\r\n27 = {StackTraceElement@17724} \"io.netty.channel.DefaultChannelPipeline$HeadContext.userEventTriggered(DefaultChannelPipeline.java:1428)\"\r\n28 = {StackTraceElement@17725} \"io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:346)\"\r\n29 = {StackTraceElement@17726} \"io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:332)\"\r\n30 = {StackTraceElement@17727} \"io.netty.channel.DefaultChannelPipeline.fireUserEventTriggered(DefaultChannelPipeline.java:913)\"\r\n31 = {StackTraceElement@17728} \"dev.miku.r2dbc.mysql.client.MessageDuplexCodec.write(MessageDuplexCodec.java:118)\"\r\n32 = {StackTraceElement@17729} \"io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:717)\"\r\n33 = {StackTraceElement@17730} \"io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:764)\"\r\n34 = {StackTraceElement@17731} \"io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:790)\"\r\n35 = {StackTraceElement@17732} \"io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:758)\"\r\n36 = {StackTraceElement@17733} \"io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:808)\"\r\n37 = {StackTraceElement@17734} \"io.netty.channel.DefaultChannelPipeline.writeAndFlush(DefaultChannelPipeline.java:1025)\"\r\n38 = {StackTraceElement@17735} \"io.netty.channel.AbstractChannel.writeAndFlush(AbstractChannel.java:306)\"\r\n39 = {StackTraceElement@17736} \"reactor.netty.channel.ChannelOperations.lambda$sendObject$2(ChannelOperations.java:312)\"\r\n40 = {StackTraceElement@17737} \"reactor.netty.FutureMono$DeferredFutureMono.subscribe(FutureMono.java:100)\"\r\n41 = {StackTraceElement@17738} \"reactor.core.publisher.Mono.subscribe(Mono.java:4400)\"\r\n42 = {StackTraceElement@17739} \"reactor.netty.NettyOutbound.subscribe(NettyOutbound.java:336)\"\r\n43 = {StackTraceElement@17740} \"reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:451)\"\r\n44 = {StackTraceElement@17741} \"reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onNext(FluxConcatMap.java:251)\"\r\n45 = {StackTraceElement@17742} \"reactor.core.publisher.EmitterProcessor.drain(EmitterProcessor.java:491)\"\r\n46 = {StackTraceElement@17743} \"reactor.core.publisher.EmitterProcessor.tryEmitNext(EmitterProcessor.java:299)\"\r\n47 = {StackTraceElement@17744} \"reactor.core.publisher.InternalManySink.emitNext(InternalManySink.java:27)\"\r\n48 = {StackTraceElement@17745} \"reactor.core.publisher.EmitterProcessor.onNext(EmitterProcessor.java:265)\"\r\n49 = {StackTraceElement@17746} \"dev.miku.r2dbc.mysql.client.ReactorNettyClient.lambda$null$7(ReactorNettyClient.java:174)\"\r\n50 = {StackTraceElement@17747} \"reactor.core.publisher.LambdaSubscriber.onNext(LambdaSubscriber.java:160)\"\r\n51 = {StackTraceElement@17748} \"org.springframework.security.test.context.support.ReactorContextTestExecutionListener$DelegateTestExecutionListener$SecuritySubContext.onNext(ReactorContextTestExecutionListener.java:120)\"\r\n52 = {StackTraceElement@17749} \"reactor.core.publisher.EmitterProcessor.drain(EmitterProcessor.java:491)\"\r\n53 = {StackTraceElement@17750} \"reactor.core.publisher.EmitterProcessor.tryEmitNext(EmitterProcessor.java:299)\"\r\n54 = {StackTraceElement@17751} \"reactor.core.publisher.InternalManySink.emitNext(InternalManySink.java:27)\"\r\n55 = {StackTraceElement@17752} \"reactor.core.publisher.EmitterProcessor.onNext(EmitterProcessor.java:265)\"\r\n56 = {StackTraceElement@17753} \"dev.miku.r2dbc.mysql.InitHandler.accept(QueryFlow.java:605)\"\r\n57 = {StackTraceElement@17754} \"dev.miku.r2dbc.mysql.InitHandler.accept(QueryFlow.java:545)\"\r\n58 = {StackTraceElement@17755} \"reactor.core.publisher.FluxHandleFuseable$HandleFuseableSubscriber.onNext(FluxHandleFuseable.java:176)\"\r\n59 = {StackTraceElement@17756} \"reactor.core.publisher.FluxOnAssembly$OnAssemblySubscriber.onNext(FluxOnAssembly.java:539)\"\r\n60 = {StackTraceElement@17757} \"reactor.core.publisher.FluxPeekFuseable$PeekFuseableConditionalSubscriber.onNext(FluxPeekFuseable.java:503)\"\r\n61 = {StackTraceElement@17758} \"reactor.core.publisher.FluxOnAssembly$OnAssemblySubscriber.onNext(FluxOnAssembly.java:539)\"\r\n62 = {StackTraceElement@17759} \"reactor.core.publisher.MonoFlatMapMany$FlatMapManyInner.onNext(MonoFlatMapMany.java:250)\"\r\n63 = {StackTraceElement@17760} \"org.springframework.security.test.context.support.ReactorContextTestExecutionListener$DelegateTestExecutionListener$SecuritySubContext.onNext(ReactorContextTestExecutionListener.java:120)\"\r\n64 = {StackTraceElement@17761} \"reactor.core.publisher.FluxOnAssembly$OnAssemblySubscriber.onNext(FluxOnAssembly.java:539)\"\r\n65 = {StackTraceElement@17762} \"reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onNext(FluxPeekFuseable.java:210)\"\r\n66 = {StackTraceElement@17763} \"reactor.core.publisher.FluxOnAssembly$OnAssemblySubscriber.onNext(FluxOnAssembly.java:539)\"\r\n67 = {StackTraceElement@17764} \"reactor.core.publisher.FluxHandleFuseable$HandleFuseableSubscriber.onNext(FluxHandleFuseable.java:191)\"\r\n68 = {StackTraceElement@17765} \"reactor.core.publisher.FluxOnAssembly$OnAssemblySubscriber.onNext(FluxOnAssembly.java:539)\"\r\n69 = {StackTraceElement@17766} \"reactor.core.publisher.FluxPeekFuseable$PeekConditionalSubscriber.onNext(FluxPeekFuseable.java:854)\"\r\n70 = {StackTraceElement@17767} \"reactor.core.publisher.EmitterProcessor.drain(EmitterProcessor.java:491)\"\r\n71 = {StackTraceElement@17768} \"reactor.core.publisher.EmitterProcessor.tryEmitNext(EmitterProcessor.java:299)\"\r\n72 = {StackTraceElement@17769} \"reactor.core.publisher.InternalManySink.emitNext(InternalManySink.java:27)\"\r\n73 = {StackTraceElement@17770} \"reactor.core.publisher.EmitterProcessor.onNext(EmitterProcessor.java:265)\"\r\n74 = {StackTraceElement@17771} \"dev.miku.r2dbc.mysql.client.ReactorNettyClient$ResponseSink.next(ReactorNettyClient.java:340)\"\r\n75 = {StackTraceElement@17772} \"dev.miku.r2dbc.mysql.client.ReactorNettyClient.lambda$new$0(ReactorNettyClient.java:103)\"\r\n76 = {StackTraceElement@17773} \"reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:185)\"\r\n77 = {StackTraceElement@17774} \"reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:279)\"\r\n78 = {StackTraceElement@17775} \"reactor.netty.channel.FluxReceive.onInboundNext(FluxReceive.java:388)\"\r\n79 = {StackTraceElement@17776} \"reactor.netty.channel.ChannelOperations.onInboundNext(ChannelOperations.java:404)\"\r\n80 = {StackTraceElement@17777} \"reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:93)\"\r\n81 = {StackTraceElement@17778} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\"\r\n82 = {StackTraceElement@17779} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\"\r\n83 = {StackTraceElement@17780} \"io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\"\r\n84 = {StackTraceElement@17781} \"dev.miku.r2dbc.mysql.client.MessageDuplexCodec.handleDecoded(MessageDuplexCodec.java:187)\"\r\n85 = {StackTraceElement@17782} \"dev.miku.r2dbc.mysql.client.MessageDuplexCodec.channelRead(MessageDuplexCodec.java:95)\"\r\n86 = {StackTraceElement@17783} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\"\r\n87 = {StackTraceElement@17784} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\"\r\n88 = {StackTraceElement@17785} \"io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\"\r\n89 = {StackTraceElement@17786} \"io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:327)\"\r\n90 = {StackTraceElement@17787} \"io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:299)\"\r\n91 = {StackTraceElement@17788} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\"\r\n92 = {StackTraceElement@17789} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\"\r\n93 = {StackTraceElement@17790} \"io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)\"\r\n94 = {StackTraceElement@17791} \"io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410)\"\r\n95 = {StackTraceElement@17792} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)\"\r\n96 = {StackTraceElement@17793} \"io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)\"\r\n97 = {StackTraceElement@17794} \"io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919)\"\r\n98 = {StackTraceElement@17795} \"io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166)\"\r\n99 = {StackTraceElement@17796} \"io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:722)\"\r\n```" ]
[ "```suggestion\r\n List<String> ciphersNonTLSv13 = new ArrayList<String>(defaultCiphers);\r\n```", "```suggestion\r\n Set<String> suppertedCiphersNonTLSv13 = new LinkedHashSet<String>(supportedCiphers);\r\n```" ]
"2022-05-31T22:58:36Z"
[]
Jdk sslhandler have blocking call
I tried to integrate some reactive tests with miku-mysql and blockhound (which uses netty-handler under the hood), and noticed that there's a blocking call when using jdkssl implementation (leading to noclassdeffound in the end). With the netty version embedded in spring-boot-webflux 2.6.7 (tried to upgrade netty-handler version, with no more luck). What info do you need? Bests
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java" ]
[ "common/src/main/java/io/netty/util/internal/Hidden.java", "handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java" ]
[]
diff --git a/common/src/main/java/io/netty/util/internal/Hidden.java b/common/src/main/java/io/netty/util/internal/Hidden.java index 78c45f715db..5ebf8e780e2 100644 --- a/common/src/main/java/io/netty/util/internal/Hidden.java +++ b/common/src/main/java/io/netty/util/internal/Hidden.java @@ -108,6 +108,10 @@ public void applyTo(BlockHound.Builder builder) { "io.netty.handler.ssl.ReferenceCountedOpenSslClientContext$ExtendedTrustManagerVerifyCallback", "verify"); + builder.allowBlockingCallsInside( + "io.netty.handler.ssl.JdkSslContext$Defaults", + "init"); + // Let's whitelist SSLEngineImpl.unwrap(...) for now as it may fail otherwise for TLS 1.3. // See https://mail.openjdk.java.net/pipermail/security-dev/2020-August/022271.html builder.allowBlockingCallsInside( diff --git a/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java b/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java index a452924829a..516ef660ba9 100644 --- a/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java +++ b/handler/src/main/java/io/netty/handler/ssl/JdkSslContext.java @@ -69,33 +69,54 @@ public class JdkSslContext extends SslContext { private static final Provider DEFAULT_PROVIDER; static { - SSLContext context; - try { - context = SSLContext.getInstance(PROTOCOL); - context.init(null, null, null); - } catch (Exception e) { - throw new Error("failed to initialize the default SSL context", e); + Defaults defaults = new Defaults(); + defaults.init(); + + DEFAULT_PROVIDER = defaults.defaultProvider; + DEFAULT_PROTOCOLS = defaults.defaultProtocols; + SUPPORTED_CIPHERS = defaults.supportedCiphers; + DEFAULT_CIPHERS = defaults.defaultCiphers; + DEFAULT_CIPHERS_NON_TLSV13 = defaults.defaultCiphersNonTLSv13; + SUPPORTED_CIPHERS_NON_TLSV13 = defaults.supportedCiphersNonTLSv13; + + if (logger.isDebugEnabled()) { + logger.debug("Default protocols (JDK): {} ", Arrays.asList(DEFAULT_PROTOCOLS)); + logger.debug("Default cipher suites (JDK): {}", DEFAULT_CIPHERS); } + } - DEFAULT_PROVIDER = context.getProvider(); + private static final class Defaults { + String[] defaultProtocols; + List<String> defaultCiphers; + List<String> defaultCiphersNonTLSv13; + Set<String> supportedCiphers; + Set<String> supportedCiphersNonTLSv13; + Provider defaultProvider; - SSLEngine engine = context.createSSLEngine(); - DEFAULT_PROTOCOLS = defaultProtocols(context, engine); + void init() { + SSLContext context; + try { + context = SSLContext.getInstance(PROTOCOL); + context.init(null, null, null); + } catch (Exception e) { + throw new Error("failed to initialize the default SSL context", e); + } - SUPPORTED_CIPHERS = Collections.unmodifiableSet(supportedCiphers(engine)); - DEFAULT_CIPHERS = Collections.unmodifiableList(defaultCiphers(engine, SUPPORTED_CIPHERS)); + defaultProvider = context.getProvider(); - List<String> ciphersNonTLSv13 = new ArrayList<String>(DEFAULT_CIPHERS); - ciphersNonTLSv13.removeAll(Arrays.asList(SslUtils.DEFAULT_TLSV13_CIPHER_SUITES)); - DEFAULT_CIPHERS_NON_TLSV13 = Collections.unmodifiableList(ciphersNonTLSv13); + SSLEngine engine = context.createSSLEngine(); + defaultProtocols = defaultProtocols(context, engine); - Set<String> suppertedCiphersNonTLSv13 = new LinkedHashSet<String>(SUPPORTED_CIPHERS); - suppertedCiphersNonTLSv13.removeAll(Arrays.asList(SslUtils.DEFAULT_TLSV13_CIPHER_SUITES)); - SUPPORTED_CIPHERS_NON_TLSV13 = Collections.unmodifiableSet(suppertedCiphersNonTLSv13); + supportedCiphers = Collections.unmodifiableSet(supportedCiphers(engine)); + defaultCiphers = Collections.unmodifiableList(defaultCiphers(engine, supportedCiphers)); - if (logger.isDebugEnabled()) { - logger.debug("Default protocols (JDK): {} ", Arrays.asList(DEFAULT_PROTOCOLS)); - logger.debug("Default cipher suites (JDK): {}", DEFAULT_CIPHERS); + List<String> ciphersNonTLSv13 = new ArrayList<String>(defaultCiphers); + ciphersNonTLSv13.removeAll(Arrays.asList(SslUtils.DEFAULT_TLSV13_CIPHER_SUITES)); + defaultCiphersNonTLSv13 = Collections.unmodifiableList(ciphersNonTLSv13); + + Set<String> suppertedCiphersNonTLSv13 = new LinkedHashSet<String>(supportedCiphers); + suppertedCiphersNonTLSv13.removeAll(Arrays.asList(SslUtils.DEFAULT_TLSV13_CIPHER_SUITES)); + supportedCiphersNonTLSv13 = Collections.unmodifiableSet(suppertedCiphersNonTLSv13); } }
null
val
test
"2022-05-27T08:22:49"
"2022-05-25T13:56:26Z"
Tcharl
val
netty/netty/10757_12442
netty/netty
netty/netty/10757
netty/netty/12442
[ "keyword_pr_to_issue" ]
56f7c50d19125dad36b97ed33d9797da941a7591
c42a207f7242b2bdd170cc463b49bf50c7334165
[ "Is there any reason that JUnit 5 cannot be added to Netty 4.1?\r\n\r\nConsidering test artifacts are not exported as part of the build process, it doesn't seem like it should pose a compatibility issue.", "The testsuite is exposed so we can consume it . That said I would not consider this API\n\n> Am 30.10.2020 um 21:16 schrieb Bennett Lynch <notifications@github.com>:\n> \n> \n> Is there any reason that JUnit 5 cannot be added to Netty 4.1?\n> \n> Considering test artifacts are not exported as part of the build process, it doesn't seem like it should pose a compatibility issue.\n> \n> —\n> You are receiving this because you are subscribed to this thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "JUnit 5 is now enabled in both the `4.1` branch and the `master` branch.", "- [x] `all` - no tests\r\n- [x] `bom` - no tests\r\n- [x] `buffer` - #11305\r\n- [x] `codec` - #11306\r\n- [x] `codec-dns` - #11307\r\n- [x] `codec-haproxy` - #11308\r\n- [x] `codec-http` - #11316\r\n- [x] `codec-http2` - #11422\r\n- [x] `codec-memcache` - #11310\r\n- [ ] `codec-mqtt`\r\n- [x] `codec-redis` - #11318\r\n- [x] `codec-smtp` - #11309\r\n- [x] `codec-socks` - #11314\r\n- [x] `codec-stomp` - #11312\r\n- [x] `codec-xml` - #11311\r\n- [x] `common` - #11319\r\n- [x] `dev-tools` - no tests\r\n- [x] `example` - no tests\r\n- [ ] `handler`\r\n- [x] `handler-proxy` - #11313\r\n- [x] `microbench` - #11443\r\n- [ ] `resolver`\r\n- [x] `resolver-dns` - #11326\r\n- [x] `resolver-dns-native-macos`\r\n- [x] `tarball` - no tests\r\n- [x] `testsuite` - #11320 \r\n- [x] `testsuite-autobahn` - no tests\r\n- [x] `testsuite-http2` - no tests\r\n- [x] `testsuite-native`\r\n- [x] `testsuite-native-image` - no tests\r\n- [x] `testsuite-native-image-client` - no tests\r\n- [x] `testsuite-native-image-client-runtime-init` - no tests\r\n- [ ] `testsuite-osgi` - https://github.com/ops4j/org.ops4j.pax.exam2/issues/951\r\n- [x] `testsuite-shading` - #11323\r\n- [x] `transport` - #11315\r\n- [x] `transport-blockhound-tests` - #11322\r\n- [x] `transport-native-epoll` - #11320 #11442\r\n- [x] `transport-native-kqueue` - #11320 \r\n- [x] `transport-native-unix-common` - #11321\r\n- [x] `transport-native-unix-common-tests` - #11320 \r\n- [ ] `transport-sctp` - #11325", "I hope that's a good start for now! I have pushed all my progress so far, and I will probably pick up on this again later today.", "The `handler` module is proving problematic to migrate - it heavily uses class-scoped parameterized tests." ]
[]
"2022-06-03T00:30:43Z"
[ "help wanted" ]
Migrating to JUnit 5
I propose that we start migrating the test suite for Netty 5 (the `master` branch) to JUnit 5. JUnit 5 is more expressive, extensible, and composable in many ways, and it's better able to run tests in parallel. It can also directly run JUnit 4 tests, using the vintage engine, which means we can do this in small steps; there's no need to migrate all tests all at once. Once the JUnit 5 infrastructure is there, tests or modules can be migrated one by one, or be left as is for the time being. Regardless, new tests that are only added to the `master` branch can immediately use the new JUnit 5 test APIs. Some benefits I anticipate are: 1. Some tests will become easier to write, because JUnit 5 is more capable at expressing nesting of tests, and parameterisations. 2. Builds should complete faster because JUnit 5 is better able to run tests in parallel. This is something that needs to be enabled explicitly, however, since it can cause latent test isolation failures to pop up. 3. JUnit 5 is extensible enough that something like `TimedOutTestsListener` won't need to live in the `netty-build` anymore. A drawback I can think of is that forward porting changes to a JUnit 4 test in Netty `4.1`, to a migrated JUnit 5 test in `master`, will be more challenging and add to the workload of doing changes in `4.1`. What do people think?
[ "microbench/pom.xml", "pom.xml", "transport-native-unix-common-tests/pom.xml", "transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java" ]
[ "microbench/pom.xml", "pom.xml", "transport-native-unix-common-tests/pom.xml", "transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java" ]
[ "codec/src/test/resources/io/netty/handler/codec/xml/sample-04.xml", "testsuite-native/pom.xml", "testsuite-shading/pom.xml", "testsuite/pom.xml", "testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java", "testsuite/src/main/java/io/netty/testsuite/util/TestUtils.java" ]
diff --git a/microbench/pom.xml b/microbench/pom.xml index 18aca81bcaa..32a26bfb089 100644 --- a/microbench/pom.xml +++ b/microbench/pom.xml @@ -185,16 +185,6 @@ <artifactId>junit-jupiter-engine</artifactId> <scope>compile</scope> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - <scope>compile</scope> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <scope>compile</scope> - </dependency> <dependency> <groupId>org.openjdk.jmh</groupId> <artifactId>jmh-core</artifactId> diff --git a/pom.xml b/pom.xml index 512965f98a0..d2d22be0960 100644 --- a/pom.xml +++ b/pom.xml @@ -886,18 +886,6 @@ <version>${junit.version}</version> <scope>test</scope> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - <version>${junit.version}</version> - <scope>test</scope> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <version>4.13.1</version> - <scope>test</scope> - </dependency> <dependency> <groupId>${project.groupId}</groupId> <artifactId>netty-build-common</artifactId> @@ -1030,16 +1018,6 @@ <artifactId>junit-jupiter-params</artifactId> <scope>test</scope> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - <scope>test</scope> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <scope>test</scope> - </dependency> <dependency> <groupId>${project.groupId}</groupId> <artifactId>netty-build-common</artifactId> @@ -1148,6 +1126,12 @@ <classQualifiedName>io.netty.util.internal.InternalThreadLocalMap</classQualifiedName> <justification>Ignore cache padding.</justification> </item> + <item> + <ignore>true</ignore> + <code>java.method.removed</code> + <old>method java.lang.String io.netty.testsuite.util.TestUtils::testMethodName(org.junit.rules.TestName)</old> + <justification>This should be test-only, and we're removing support for JUnit 4.</justification> + </item> </differences> </revapi.differences> </analysisConfiguration> diff --git a/transport-native-unix-common-tests/pom.xml b/transport-native-unix-common-tests/pom.xml index 0f56941dbcc..5743bdc0f9f 100644 --- a/transport-native-unix-common-tests/pom.xml +++ b/transport-native-unix-common-tests/pom.xml @@ -54,15 +54,5 @@ <artifactId>junit-jupiter-engine</artifactId> <scope>compile</scope> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - <scope>compile</scope> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <scope>compile</scope> - </dependency> </dependencies> </project> diff --git a/transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java b/transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java index 115ab353958..c33afc2759b 100644 --- a/transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java +++ b/transport-native-unix-common-tests/src/main/java/io/netty/channel/unix/tests/SocketTest.java @@ -17,10 +17,10 @@ import io.netty.channel.unix.Buffer; import io.netty.channel.unix.Socket; -import org.junit.AssumptionViolatedException; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.opentest4j.TestAbortedException; import java.io.IOException; import java.nio.ByteBuffer; @@ -127,10 +127,10 @@ public void testRawOpt() throws IOException { } protected int level() { - throw new AssumptionViolatedException("Not supported"); + throw new TestAbortedException("Not supported"); } protected int optname() { - throw new AssumptionViolatedException("Not supported"); + throw new TestAbortedException("Not supported"); } }
diff --git a/codec/src/test/resources/io/netty/handler/codec/xml/sample-04.xml b/codec/src/test/resources/io/netty/handler/codec/xml/sample-04.xml index 8a75afccd6b..22b9d702b31 100644 --- a/codec/src/test/resources/io/netty/handler/codec/xml/sample-04.xml +++ b/codec/src/test/resources/io/netty/handler/codec/xml/sample-04.xml @@ -210,12 +210,6 @@ <dependencies> <!-- Testing frameworks and related dependencies --> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <version>4.12</version> - <scope>test</scope> - </dependency> <dependency> <groupId>org.easymock</groupId> <artifactId>easymock</artifactId> diff --git a/testsuite-native/pom.xml b/testsuite-native/pom.xml index 8f4c534ad58..afba034fb3d 100644 --- a/testsuite-native/pom.xml +++ b/testsuite-native/pom.xml @@ -45,14 +45,6 @@ <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-engine</artifactId> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - </dependency> </dependencies> <profiles> <profile> diff --git a/testsuite-shading/pom.xml b/testsuite-shading/pom.xml index 535552985ab..49d0f00f1c3 100644 --- a/testsuite-shading/pom.xml +++ b/testsuite-shading/pom.xml @@ -200,14 +200,6 @@ <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-engine</artifactId> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - </dependency> </dependencies> <profiles> <profile> diff --git a/testsuite/pom.xml b/testsuite/pom.xml index f6dc457b86c..6e7813c9103 100644 --- a/testsuite/pom.xml +++ b/testsuite/pom.xml @@ -94,16 +94,6 @@ <artifactId>junit-jupiter-params</artifactId> <scope>compile</scope> </dependency> - <dependency> - <groupId>org.junit.vintage</groupId> - <artifactId>junit-vintage-engine</artifactId> - <scope>compile</scope> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <scope>compile</scope> - </dependency> <dependency> <groupId>org.hamcrest</groupId> <artifactId>hamcrest-library</artifactId> diff --git a/testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java b/testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java index 2235747e91a..308669c6f5a 100644 --- a/testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java +++ b/testsuite/src/main/java/io/netty/testsuite/transport/socket/DatagramUnicastIPv6Test.java @@ -18,8 +18,8 @@ import io.netty.channel.socket.InternetProtocolFamily; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.SuppressJava6Requirement; -import org.junit.AssumptionViolatedException; import org.junit.jupiter.api.BeforeAll; +import org.opentest4j.TestAbortedException; import java.io.IOException; import java.net.StandardProtocolFamily; @@ -38,7 +38,7 @@ public static void assumeIpv6Supported() { Channel channel = SelectorProvider.provider().openDatagramChannel(StandardProtocolFamily.INET6); channel.close(); } catch (UnsupportedOperationException e) { - throw new AssumptionViolatedException("IPv6 not supported", e); + throw new TestAbortedException("IPv6 not supported", e); } catch (IOException ignore) { // Ignore } diff --git a/testsuite/src/main/java/io/netty/testsuite/util/TestUtils.java b/testsuite/src/main/java/io/netty/testsuite/util/TestUtils.java index eeb37f2f9f5..f1192def7c5 100644 --- a/testsuite/src/main/java/io/netty/testsuite/util/TestUtils.java +++ b/testsuite/src/main/java/io/netty/testsuite/util/TestUtils.java @@ -21,7 +21,6 @@ import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import org.junit.jupiter.api.TestInfo; -import org.junit.rules.TestName; import org.tukaani.xz.LZMA2Options; import org.tukaani.xz.XZOutputStream; @@ -120,17 +119,6 @@ public String apply(Method method) { return testMethodName; } - /** - * Returns the method name of the current test. - */ - public static String testMethodName(TestName testName) { - String testMethodName = testName.getMethodName(); - if (testMethodName.contains("[")) { - testMethodName = testMethodName.substring(0, testMethodName.indexOf('[')); - } - return testMethodName; - } - public static void dump(String filenamePrefix) throws IOException { ObjectUtil.checkNotNull(filenamePrefix, "filenamePrefix");
train
test
"2022-06-01T18:17:12"
"2020-10-30T15:10:12Z"
chrisvest
val