code
stringlengths 227
324k
| indentifier
stringlengths 171
323k
| lang
stringclasses 2
values |
---|---|---|
published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. length ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. length ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. length ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | java |
hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. length ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. length ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. length ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | java |
FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. <mask> <mask> <mask> ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetSocketAddress; import java.net.InetAddress; import java.net.MulticastSocket; import java.net.SocketException; import java.nio.channels.DatagramChannel; import static org.testng.Assert.assertThrows; /* * @test * @bug 8236105 8240533 * @summary Check that DatagramSocket throws expected * Exception when sending a DatagramPacket with port 0 * @run testng/othervm SendPortZero */ public class SendPortZero { private InetAddress loopbackAddr, wildcardAddr; private DatagramSocket datagramSocket, datagramSocketAdaptor; private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt; private static final Class<SocketException> SE = SocketException.class; @BeforeTest public void setUp() throws IOException { datagramSocket = new DatagramSocket(); datagramSocketAdaptor = DatagramChannel.open().socket(); byte[] buf = "test".getBytes(); // Addresses loopbackAddr = InetAddress.getLoopbackAddress(); wildcardAddr = new InetSocketAddress(0).getAddress(); // Packets // loopback w/port 0 loopbackZeroPkt = new DatagramPacket(buf, 0, buf. length ); loopbackZeroPkt.setAddress(loopbackAddr); loopbackZeroPkt.setPort(0); // wildcard w/port 0 wildcardZeroPkt = new DatagramPacket(buf, 0, buf. length ); wildcardZeroPkt.setAddress(wildcardAddr); wildcardZeroPkt.setPort(0); // wildcard addr w/valid port // Not currently tested. See JDK-8236807 wildcardValidPkt = new DatagramPacket(buf, 0, buf. length ); wildcardValidPkt.setAddress(wildcardAddr); wildcardValidPkt.setPort(datagramSocket.getLocalPort()); } @DataProvider(name = "data") public Object[][] variants() { return new Object[][]{ { datagramSocket, loopbackZeroPkt }, { datagramSocket, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocket, wildcardValidPkt }, { datagramSocketAdaptor, loopbackZeroPkt }, { datagramSocketAdaptor, wildcardZeroPkt }, // Re-enable when JDK-8236807 fixed //{ datagramSocketAdaptor, wildcardValidPkt }, }; } @Test(dataProvider = "data") public void testSend(DatagramSocket ds, DatagramPacket pkt) { assertThrows(SE, () -> ds.send(pkt)); } @AfterTest public void tearDown() { datagramSocket.close(); datagramSocketAdaptor.close(); } } | java |
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the | /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the | java |
* contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() | * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() | java |
You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap | You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap | java |
in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, | in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, | java |
distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform | distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform | java |
specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean <mask> <mask> <mask> <mask> <mask> ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. <mask> <mask> <mask> <mask> <mask> = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. <mask> <mask> <mask> <mask> <mask> = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean <mask> <mask> <mask> <mask> <mask> () { return <mask> <mask> <mask> <mask> <mask> ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; | specific language governing permissions and * limitations under the License. */ package org.apache.kafka.server.share.context; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.server.share.CachedSharePartition; import org.apache.kafka.server.share.ErroneousAndValidPartitionData; import org.apache.kafka.server.share.session.ShareSession; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; /** * The context for a share session fetch request. */ public class ShareSessionContext extends ShareFetchContext { private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class); private final ShareRequestMetadata reqMetadata; private final boolean isSubsequent ; private Map<TopicIdPartition, SharePartitionData> shareFetchData; private ShareSession session; /** * The share fetch context for the first request that starts a share session. * * @param reqMetadata The request metadata. * @param shareFetchData The share partition data from the share fetch request. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) { this.reqMetadata = reqMetadata; this.shareFetchData = shareFetchData; this. isSubsequent = false; } /** * The share fetch context for a subsequent request that utilizes an existing share session. * * @param reqMetadata The request metadata. * @param session The subsequent fetch request session. */ public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) { this.reqMetadata = reqMetadata; this.session = session; this. isSubsequent = true; } // Visible for testing public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() { return shareFetchData; } // Visible for testing public boolean isSubsequent () { return isSubsequent ; } // Visible for testing public ShareSession session() { return session; } @Override boolean isTraceEnabled() { return log.isTraceEnabled(); } @Override public ShareFetchResponse throttleResponse(int throttleTimeMs) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. " + "Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; | java |
sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! <mask> <mask> <mask> <mask> <mask> ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs, Collections.emptyIterator(), Collections.emptyList())); } /** * Iterator that goes over the given partition map and selects partitions that need to be included in the response. * If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected * partitions and also remove unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! isSubsequent ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! isSubsequent ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | java |
unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! <mask> <mask> <mask> <mask> <mask> ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | unselected ones as they are encountered. */ private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> { private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator; private final boolean updateShareContextAndRemoveUnselected; private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement; public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) { this.iterator = iterator; this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected; } @Override public boolean hasNext() { while ((nextElement == null) && iterator.hasNext()) { Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next(); TopicIdPartition topicPart = element.getKey(); ShareFetchResponseData.PartitionData respData = element.getValue(); synchronized (session) { CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart)); boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected); if (mustRespond) { nextElement = element; if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) { // Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements. // Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! isSubsequent ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! isSubsequent ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | java |
in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! <mask> <mask> <mask> <mask> <mask> ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! <mask> <mask> <mask> <mask> <mask> ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | in this case, we need to perform a remove and then a mustAdd to maintain the correct order session.partitionMap().remove(cachedPart); session.partitionMap().mustAdd(cachedPart); } } else { if (updateShareContextAndRemoveUnselected) { iterator.remove(); } } } } return nextElement != null; } @Override public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() { if (!hasNext()) throw new NoSuchElementException(); Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement; nextElement = null; return element; } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) { if (! isSubsequent ) return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator()); synchronized (session) { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); if (session.epoch != expectedEpoch) { return ShareFetchResponse.sizeOf(version, Collections.emptyIterator()); } // Pass the partition iterator which updates neither the share fetch context nor the partition map. return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false)); } } @Override public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId, LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) { if (! isSubsequent ) { return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } else { int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch()); int sessionEpoch; synchronized (session) { sessionEpoch = session.epoch; } if (sessionEpoch != expectedEpoch) { log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch); return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH, 0, Collections.emptyIterator(), Collections.emptyList())); } // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator( updates.entrySet().iterator(), true); while (partitionIterator.hasNext()) { partitionIterator.next(); } log.debug("Subsequent share session context with session key {} returning {}", session.key(), partitionsToLogString(updates.keySet())); return new ShareFetchResponse(ShareFetchResponse.toMessage( Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList())); } } @Override public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() { if (! isSubsequent ) { return new ErroneousAndValidPartitionData(shareFetchData); } Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>(); Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>(); // Take the session lock and iterate over all the cached partitions. synchronized (session) { session.partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData(); if (topicIdPartition.topic() == null) { erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID)); } else { valid.put(topicIdPartition, reqData); } }); return new ErroneousAndValidPartitionData(erroneous, valid); } } } | java |
file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws | file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws | java |
(the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // | (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // | java |
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); | * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); | java |
OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }) .as("The logical future must finish with the cancellation exception.") .hasCauseInstanceOf(CancellationException.class); }); } @Test void testCompletedLogicalSlotCancelationDoesNotCancelPhysicalSlotRequestAndDoesNotRemoveSharedSlot() throws Exception { // physical slot request is completed and completes logical requests | OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils; import org.apache.flink.runtime.executiongraph.ExecutionAttemptID; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.TestingPayload; import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk; import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker; import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory; import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }) .as("The logical future must finish with the cancellation exception.") .hasCauseInstanceOf(CancellationException.class); }); } @Test void testCompletedLogicalSlotCancelationDoesNotCancelPhysicalSlotRequestAndDoesNotRemoveSharedSlot() throws Exception { // physical slot request is completed and completes logical requests | java |
import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. <mask> <mask> <mask> <mask> () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }) .as("The logical future must finish with the cancellation exception.") .hasCauseInstanceOf(CancellationException.class); }); } @Test void testCompletedLogicalSlotCancelationDoesNotCancelPhysicalSlotRequestAndDoesNotRemoveSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, false, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }); } private static void testLogicalSlotRequestCancellationOrRelease( boolean completePhysicalSlotFutureManually, boolean cancelsPhysicalSlotRequestAndRemovesSharedSlot, BiConsumerWithException<AllocationContext, ExecutionSlotAssignment, Exception> cancelOrReleaseAction) throws Exception { AllocationContext.Builder allocationContextBuilder = AllocationContext. <mask> <mask> <mask> <mask> ().addGroup(EV1, EV2, EV3); if (completePhysicalSlotFutureManually) { allocationContextBuilder.withPhysicalSlotProvider( TestingPhysicalSlotProvider.createWithoutImmediatePhysicalSlotCreation()); } AllocationContext context = allocationContextBuilder.build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments = context.allocateSlotsFor(EV1, EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); // cancel or release only | import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID; import org.apache.flink.util.FlinkException; import org.apache.flink.util.function.BiConsumerWithException; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId; import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link SlotSharingExecutionSlotAllocator}. */ class SlotSharingExecutionSlotAllocatorTest { private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L); private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5); private static final ExecutionVertexID EV1 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV2 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV3 = createRandomExecutionVertexId(); private static final ExecutionVertexID EV4 = createRandomExecutionVertexId(); @Test void testSlotProfileRequestAskedBulkAndGroup() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); ExecutionSlotSharingGroup executionSlotSharingGroup = context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1); context.allocateSlotsFor(EV1, EV2); List<Set<ExecutionVertexID>> askedBulks = context.getSlotProfileRetrieverFactory().getAskedBulks(); assertThat(askedBulks).hasSize(1); assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2); assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups()) .containsExactly(executionSlotSharingGroup); } @Test void testSlotRequestProfile() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3).build(); ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3); context.allocateSlotsFor(EV1, EV2); Optional<PhysicalSlotRequest> slotRequest = context.getSlotProvider().getRequests().values().stream().findFirst(); assertThat(slotRequest).isPresent(); assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile()) .isEqualTo(physicalsSlotResourceProfile); } @Test void testAllocatePhysicalSlotForNewSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).addGroup(EV3, EV4).build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV1, EV2, EV3, EV4); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testAllocateLogicalSlotFromAvailableSharedSlot() { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1, EV2).build(); context.allocateSlotsFor(EV1); Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments = context.allocateSlotsFor(EV2); Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values()); // execution 0 from the first allocateSlotsFor call and execution 1 from the second // allocateSlotsFor call // share a slot, therefore only one physical slot allocation should happen assertThat(assignIds).containsExactly(EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); } @Test void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture() throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder ().addGroup(EV1).build(); ExecutionSlotAssignment assignment1 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); ExecutionSlotAssignment assignment2 = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1); assertThat(assignment1.getLogicalSlotFuture().get()) .isSameAs(assignment2.getLogicalSlotFuture().get()); } @Test void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .withPhysicalSlotProvider( TestingPhysicalSlotProvider .createWithoutImmediatePhysicalSlotCreation()) .build(); CompletableFuture<LogicalSlot> logicalSlotFuture = getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1) .getLogicalSlotFuture(); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); assertThat(logicalSlotFuture).isNotDone(); context.getSlotProvider() .getResponses() .get(slotRequestId) .completeExceptionally(new Throwable()); assertThat(logicalSlotFuture).isCompletedExceptionally(); // next allocation allocates new shared slot context.allocateSlotsFor(EV1); assertThat(context.getSlotProvider().getRequests()).hasSize(2); } @Test void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(false); } @Test void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException { testSlotWillBeOccupiedIndefinitely(true); } private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely) throws ExecutionException, InterruptedException { AllocationContext context = AllocationContext. newBuilder () .addGroup(EV1) .setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely) .build(); context.allocateSlotsFor(EV1); PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail(); assertThat(slotRequest.willSlotBeOccupiedIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); TestingPhysicalSlot physicalSlot = context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get(); assertThat(physicalSlot.getPayload()).isNotNull(); assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely()) .isEqualTo(slotWillBeOccupiedIndefinitely); } @Test void testReturningLogicalSlotsRemovesSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, true, (context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null)); } @Test void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception { // physical slot request is not completed and does not complete logical requests testLogicalSlotRequestCancellationOrRelease( true, true, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assertThatThrownBy( () -> { context.getAllocator() .cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }) .as("The logical future must finish with the cancellation exception.") .hasCauseInstanceOf(CancellationException.class); }); } @Test void testCompletedLogicalSlotCancelationDoesNotCancelPhysicalSlotRequestAndDoesNotRemoveSharedSlot() throws Exception { // physical slot request is completed and completes logical requests testLogicalSlotRequestCancellationOrRelease( false, false, (context, assignment) -> { context.getAllocator().cancel(assignment.getExecutionAttemptId()); assignment.getLogicalSlotFuture().get(); }); } private static void testLogicalSlotRequestCancellationOrRelease( boolean completePhysicalSlotFutureManually, boolean cancelsPhysicalSlotRequestAndRemovesSharedSlot, BiConsumerWithException<AllocationContext, ExecutionSlotAssignment, Exception> cancelOrReleaseAction) throws Exception { AllocationContext.Builder allocationContextBuilder = AllocationContext. newBuilder ().addGroup(EV1, EV2, EV3); if (completePhysicalSlotFutureManually) { allocationContextBuilder.withPhysicalSlotProvider( TestingPhysicalSlotProvider.createWithoutImmediatePhysicalSlotCreation()); } AllocationContext context = allocationContextBuilder.build(); Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments = context.allocateSlotsFor(EV1, EV2); assertThat(context.getSlotProvider().getRequests()).hasSize(1); // cancel or release only | java |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 2