Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError Exception: DatasetGenerationCastError Message: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'num_mask_tokens'}) This happened while the csv dataset builder was generating data using hf://datasets/anshulsc/EIR_REID/csi_train.csv (at revision af9dfe30ec6f0693d9c9f6e8cd19c20f14b3d480) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 622, in write_table pa_table = table_cast(pa_table, self._schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast code: string indentifier: string num_mask_tokens: int64 lang: string -- schema metadata -- pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' + 727 to {'code': Value(dtype='string', id=None), 'indentifier': Value(dtype='string', id=None), 'lang': Value(dtype='string', id=None)} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1872, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'num_mask_tokens'}) This happened while the csv dataset builder was generating data using hf://datasets/anshulsc/EIR_REID/csi_train.csv (at revision af9dfe30ec6f0693d9c9f6e8cd19c20f14b3d480) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
code
string | indentifier
string | lang
string |
---|---|---|
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetSocketAddress;
import java.net.InetAddress;
import java.net.MulticastSocket;
import java.net.SocketException;
import java.nio.channels.DatagramChannel;
import static org.testng.Assert.assertThrows;
/*
* @test
* @bug 8236105 8240533
* @summary Check that DatagramSocket throws expected
* Exception when sending a DatagramPacket with port 0
* @run testng/othervm SendPortZero
*/
public class SendPortZero {
private InetAddress loopbackAddr, wildcardAddr;
private DatagramSocket datagramSocket, datagramSocketAdaptor;
private DatagramPacket loopbackZeroPkt, wildcardZeroPkt, wildcardValidPkt;
private static final Class<SocketException> SE = SocketException.class;
@BeforeTest
public void setUp() throws IOException {
datagramSocket = new DatagramSocket();
datagramSocketAdaptor = DatagramChannel.open().socket();
byte[] buf = "test".getBytes();
// Addresses
loopbackAddr = InetAddress.getLoopbackAddress();
wildcardAddr = new InetSocketAddress(0).getAddress();
// Packets
// loopback w/port 0
loopbackZeroPkt = new DatagramPacket(buf, 0, buf. [MASK] );
loopbackZeroPkt.setAddress(loopbackAddr);
loopbackZeroPkt.setPort(0);
// wildcard w/port 0
wildcardZeroPkt = new DatagramPacket(buf, 0, buf. [MASK] );
wildcardZeroPkt.setAddress(wildcardAddr);
wildcardZeroPkt.setPort(0);
// wildcard addr w/valid port
// Not currently tested. See JDK-8236807
wildcardValidPkt = new DatagramPacket(buf, 0, buf. [MASK] );
wildcardValidPkt.setAddress(wildcardAddr);
wildcardValidPkt.setPort(datagramSocket.getLocalPort());
}
@DataProvider(name = "data")
public Object[][] variants() {
return new Object[][]{
{ datagramSocket, loopbackZeroPkt },
{ datagramSocket, wildcardZeroPkt },
// Re-enable when JDK-8236807 fixed
//{ datagramSocket, wildcardValidPkt },
{ datagramSocketAdaptor, loopbackZeroPkt },
{ datagramSocketAdaptor, wildcardZeroPkt },
// Re-enable when JDK-8236807 fixed
//{ datagramSocketAdaptor, wildcardValidPkt },
};
}
@Test(dataProvider = "data")
public void testSend(DatagramSocket ds, DatagramPacket pkt) {
assertThrows(SE, () -> ds.send(pkt));
}
@AfterTest
public void tearDown() {
datagramSocket.close();
datagramSocketAdaptor.close();
}
}
| length | java |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.share.context;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.ShareFetchResponseData;
import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.ShareFetchRequest;
import org.apache.kafka.common.requests.ShareFetchRequest.SharePartitionData;
import org.apache.kafka.common.requests.ShareFetchResponse;
import org.apache.kafka.common.requests.ShareRequestMetadata;
import org.apache.kafka.server.share.CachedSharePartition;
import org.apache.kafka.server.share.ErroneousAndValidPartitionData;
import org.apache.kafka.server.share.session.ShareSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
/**
* The context for a share session fetch request.
*/
public class ShareSessionContext extends ShareFetchContext {
private static final Logger log = LoggerFactory.getLogger(ShareSessionContext.class);
private final ShareRequestMetadata reqMetadata;
private final boolean [MASK] ;
private Map<TopicIdPartition, SharePartitionData> shareFetchData;
private ShareSession session;
/**
* The share fetch context for the first request that starts a share session.
*
* @param reqMetadata The request metadata.
* @param shareFetchData The share partition data from the share fetch request.
*/
public ShareSessionContext(ShareRequestMetadata reqMetadata,
Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData) {
this.reqMetadata = reqMetadata;
this.shareFetchData = shareFetchData;
this. [MASK] = false;
}
/**
* The share fetch context for a subsequent request that utilizes an existing share session.
*
* @param reqMetadata The request metadata.
* @param session The subsequent fetch request session.
*/
public ShareSessionContext(ShareRequestMetadata reqMetadata, ShareSession session) {
this.reqMetadata = reqMetadata;
this.session = session;
this. [MASK] = true;
}
// Visible for testing
public Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData() {
return shareFetchData;
}
// Visible for testing
public boolean [MASK] () {
return [MASK] ;
}
// Visible for testing
public ShareSession session() {
return session;
}
@Override
boolean isTraceEnabled() {
return log.isTraceEnabled();
}
@Override
public ShareFetchResponse throttleResponse(int throttleTimeMs) {
if (! [MASK] ) {
return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs,
Collections.emptyIterator(), Collections.emptyList()));
}
int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch());
int sessionEpoch;
synchronized (session) {
sessionEpoch = session.epoch;
}
if (sessionEpoch != expectedEpoch) {
log.debug("Subsequent share session {} expected epoch {}, but got {}. " +
"Possible duplicate request.", session.key(), expectedEpoch, sessionEpoch);
return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH,
throttleTimeMs, Collections.emptyIterator(), Collections.emptyList()));
}
return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.NONE, throttleTimeMs,
Collections.emptyIterator(), Collections.emptyList()));
}
/**
* Iterator that goes over the given partition map and selects partitions that need to be included in the response.
* If updateShareContextAndRemoveUnselected is set to true, the share context will be updated for the selected
* partitions and also remove unselected ones as they are encountered.
*/
private class PartitionIterator implements Iterator<Entry<TopicIdPartition, PartitionData>> {
private final Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator;
private final boolean updateShareContextAndRemoveUnselected;
private Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> nextElement;
public PartitionIterator(Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> iterator, boolean updateShareContextAndRemoveUnselected) {
this.iterator = iterator;
this.updateShareContextAndRemoveUnselected = updateShareContextAndRemoveUnselected;
}
@Override
public boolean hasNext() {
while ((nextElement == null) && iterator.hasNext()) {
Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = iterator.next();
TopicIdPartition topicPart = element.getKey();
ShareFetchResponseData.PartitionData respData = element.getValue();
synchronized (session) {
CachedSharePartition cachedPart = session.partitionMap().find(new CachedSharePartition(topicPart));
boolean mustRespond = cachedPart.maybeUpdateResponseData(respData, updateShareContextAndRemoveUnselected);
if (mustRespond) {
nextElement = element;
if (updateShareContextAndRemoveUnselected && ShareFetchResponse.recordsSize(respData) > 0) {
// Session.partitionMap is of type ImplicitLinkedHashCollection<> which tracks the order of insertion of elements.
// Since, we are updating an element in this case, we need to perform a remove and then a mustAdd to maintain the correct order
session.partitionMap().remove(cachedPart);
session.partitionMap().mustAdd(cachedPart);
}
} else {
if (updateShareContextAndRemoveUnselected) {
iterator.remove();
}
}
}
}
return nextElement != null;
}
@Override
public Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> next() {
if (!hasNext()) throw new NoSuchElementException();
Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData> element = nextElement;
nextElement = null;
return element;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
@Override
public int responseSize(LinkedHashMap<TopicIdPartition, PartitionData> updates, short version) {
if (! [MASK] )
return ShareFetchResponse.sizeOf(version, updates.entrySet().iterator());
synchronized (session) {
int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch());
if (session.epoch != expectedEpoch) {
return ShareFetchResponse.sizeOf(version, Collections.emptyIterator());
}
// Pass the partition iterator which updates neither the share fetch context nor the partition map.
return ShareFetchResponse.sizeOf(version, new PartitionIterator(updates.entrySet().iterator(), false));
}
}
@Override
public ShareFetchResponse updateAndGenerateResponseData(String groupId, Uuid memberId,
LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> updates) {
if (! [MASK] ) {
return new ShareFetchResponse(ShareFetchResponse.toMessage(
Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList()));
} else {
int expectedEpoch = ShareRequestMetadata.nextEpoch(reqMetadata.epoch());
int sessionEpoch;
synchronized (session) {
sessionEpoch = session.epoch;
}
if (sessionEpoch != expectedEpoch) {
log.debug("Subsequent share session {} expected epoch {}, but got {}. Possible duplicate request.",
session.key(), expectedEpoch, sessionEpoch);
return new ShareFetchResponse(ShareFetchResponse.toMessage(Errors.INVALID_SHARE_SESSION_EPOCH,
0, Collections.emptyIterator(), Collections.emptyList()));
}
// Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent
Iterator<Map.Entry<TopicIdPartition, ShareFetchResponseData.PartitionData>> partitionIterator = new PartitionIterator(
updates.entrySet().iterator(), true);
while (partitionIterator.hasNext()) {
partitionIterator.next();
}
log.debug("Subsequent share session context with session key {} returning {}", session.key(),
partitionsToLogString(updates.keySet()));
return new ShareFetchResponse(ShareFetchResponse.toMessage(
Errors.NONE, 0, updates.entrySet().iterator(), Collections.emptyList()));
}
}
@Override
public ErroneousAndValidPartitionData getErroneousAndValidTopicIdPartitions() {
if (! [MASK] ) {
return new ErroneousAndValidPartitionData(shareFetchData);
}
Map<TopicIdPartition, PartitionData> erroneous = new HashMap<>();
Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> valid = new HashMap<>();
// Take the session lock and iterate over all the cached partitions.
synchronized (session) {
session.partitionMap().forEach(cachedSharePartition -> {
TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new
TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition()));
ShareFetchRequest.SharePartitionData reqData = cachedSharePartition.reqData();
if (topicIdPartition.topic() == null) {
erroneous.put(topicIdPartition, ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_ID));
} else {
valid.put(topicIdPartition, reqData);
}
});
return new ErroneousAndValidPartitionData(erroneous, valid);
}
}
}
| isSubsequent | java |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.scheduler;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfileTestingUtils;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.runtime.jobmaster.LogicalSlot;
import org.apache.flink.runtime.jobmaster.SlotRequestId;
import org.apache.flink.runtime.jobmaster.TestingPayload;
import org.apache.flink.runtime.jobmaster.slotpool.DummyPayload;
import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequest;
import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulk;
import org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlotRequestBulkChecker;
import org.apache.flink.runtime.scheduler.SharedSlotProfileRetriever.SharedSlotProfileRetrieverFactory;
import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.function.BiConsumerWithException;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createExecutionAttemptId;
import static org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.createRandomExecutionVertexId;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/** Test suite for {@link SlotSharingExecutionSlotAllocator}. */
class SlotSharingExecutionSlotAllocatorTest {
private static final Duration ALLOCATION_TIMEOUT = Duration.ofMillis(100L);
private static final ResourceProfile RESOURCE_PROFILE = ResourceProfile.fromResources(3, 5);
private static final ExecutionVertexID EV1 = createRandomExecutionVertexId();
private static final ExecutionVertexID EV2 = createRandomExecutionVertexId();
private static final ExecutionVertexID EV3 = createRandomExecutionVertexId();
private static final ExecutionVertexID EV4 = createRandomExecutionVertexId();
@Test
void testSlotProfileRequestAskedBulkAndGroup() {
AllocationContext context = AllocationContext. [MASK] ().addGroup(EV1, EV2).build();
ExecutionSlotSharingGroup executionSlotSharingGroup =
context.getSlotSharingStrategy().getExecutionSlotSharingGroup(EV1);
context.allocateSlotsFor(EV1, EV2);
List<Set<ExecutionVertexID>> askedBulks =
context.getSlotProfileRetrieverFactory().getAskedBulks();
assertThat(askedBulks).hasSize(1);
assertThat(askedBulks.get(0)).containsExactlyInAnyOrder(EV1, EV2);
assertThat(context.getSlotProfileRetrieverFactory().getAskedGroups())
.containsExactly(executionSlotSharingGroup);
}
@Test
void testSlotRequestProfile() {
AllocationContext context = AllocationContext. [MASK] ().addGroup(EV1, EV2, EV3).build();
ResourceProfile physicalsSlotResourceProfile = RESOURCE_PROFILE.multiply(3);
context.allocateSlotsFor(EV1, EV2);
Optional<PhysicalSlotRequest> slotRequest =
context.getSlotProvider().getRequests().values().stream().findFirst();
assertThat(slotRequest).isPresent();
assertThat(slotRequest.get().getSlotProfile().getPhysicalSlotResourceProfile())
.isEqualTo(physicalsSlotResourceProfile);
}
@Test
void testAllocatePhysicalSlotForNewSharedSlot() {
AllocationContext context =
AllocationContext. [MASK] ().addGroup(EV1, EV2).addGroup(EV3, EV4).build();
Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments =
context.allocateSlotsFor(EV1, EV2, EV3, EV4);
Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values());
assertThat(assignIds).containsExactlyInAnyOrder(EV1, EV2, EV3, EV4);
assertThat(context.getSlotProvider().getRequests()).hasSize(2);
}
@Test
void testAllocateLogicalSlotFromAvailableSharedSlot() {
AllocationContext context = AllocationContext. [MASK] ().addGroup(EV1, EV2).build();
context.allocateSlotsFor(EV1);
Map<ExecutionAttemptID, ExecutionSlotAssignment> executionSlotAssignments =
context.allocateSlotsFor(EV2);
Collection<ExecutionVertexID> assignIds = getAssignIds(executionSlotAssignments.values());
// execution 0 from the first allocateSlotsFor call and execution 1 from the second
// allocateSlotsFor call
// share a slot, therefore only one physical slot allocation should happen
assertThat(assignIds).containsExactly(EV2);
assertThat(context.getSlotProvider().getRequests()).hasSize(1);
}
@Test
void testDuplicateAllocationDoesNotRecreateLogicalSlotFuture()
throws ExecutionException, InterruptedException {
AllocationContext context = AllocationContext. [MASK] ().addGroup(EV1).build();
ExecutionSlotAssignment assignment1 =
getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1);
ExecutionSlotAssignment assignment2 =
getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1);
assertThat(assignment1.getLogicalSlotFuture().get())
.isSameAs(assignment2.getLogicalSlotFuture().get());
}
@Test
void testFailedPhysicalSlotRequestFailsLogicalSlotFuturesAndRemovesSharedSlot() {
AllocationContext context =
AllocationContext. [MASK] ()
.addGroup(EV1)
.withPhysicalSlotProvider(
TestingPhysicalSlotProvider
.createWithoutImmediatePhysicalSlotCreation())
.build();
CompletableFuture<LogicalSlot> logicalSlotFuture =
getAssignmentByExecutionVertexId(context.allocateSlotsFor(EV1), EV1)
.getLogicalSlotFuture();
SlotRequestId slotRequestId =
context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId();
assertThat(logicalSlotFuture).isNotDone();
context.getSlotProvider()
.getResponses()
.get(slotRequestId)
.completeExceptionally(new Throwable());
assertThat(logicalSlotFuture).isCompletedExceptionally();
// next allocation allocates new shared slot
context.allocateSlotsFor(EV1);
assertThat(context.getSlotProvider().getRequests()).hasSize(2);
}
@Test
void testSlotWillBeOccupiedIndefinitelyFalse() throws ExecutionException, InterruptedException {
testSlotWillBeOccupiedIndefinitely(false);
}
@Test
void testSlotWillBeOccupiedIndefinitelyTrue() throws ExecutionException, InterruptedException {
testSlotWillBeOccupiedIndefinitely(true);
}
private static void testSlotWillBeOccupiedIndefinitely(boolean slotWillBeOccupiedIndefinitely)
throws ExecutionException, InterruptedException {
AllocationContext context =
AllocationContext. [MASK] ()
.addGroup(EV1)
.setSlotWillBeOccupiedIndefinitely(slotWillBeOccupiedIndefinitely)
.build();
context.allocateSlotsFor(EV1);
PhysicalSlotRequest slotRequest = context.getSlotProvider().getFirstRequestOrFail();
assertThat(slotRequest.willSlotBeOccupiedIndefinitely())
.isEqualTo(slotWillBeOccupiedIndefinitely);
TestingPhysicalSlot physicalSlot =
context.getSlotProvider().getResponses().get(slotRequest.getSlotRequestId()).get();
assertThat(physicalSlot.getPayload()).isNotNull();
assertThat(physicalSlot.getPayload().willOccupySlotIndefinitely())
.isEqualTo(slotWillBeOccupiedIndefinitely);
}
@Test
void testReturningLogicalSlotsRemovesSharedSlot() throws Exception {
// physical slot request is completed and completes logical requests
testLogicalSlotRequestCancellationOrRelease(
false,
true,
(context, assignment) -> assignment.getLogicalSlotFuture().get().releaseSlot(null));
}
@Test
void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception {
// physical slot request is not completed and does not complete logical requests
testLogicalSlotRequestCancellationOrRelease(
true,
true,
(context, assignment) -> {
context.getAllocator().cancel(assignment.getExecutionAttemptId());
assertThatThrownBy(
() -> {
context.getAllocator()
.cancel(assignment.getExecutionAttemptId());
assignment.getLogicalSlotFuture().get();
})
.as("The logical future must finish with the cancellation exception.")
.hasCauseInstanceOf(CancellationException.class);
});
}
@Test
void
testCompletedLogicalSlotCancelationDoesNotCancelPhysicalSlotRequestAndDoesNotRemoveSharedSlot()
throws Exception {
// physical slot request is completed and completes logical requests
testLogicalSlotRequestCancellationOrRelease(
false,
false,
(context, assignment) -> {
context.getAllocator().cancel(assignment.getExecutionAttemptId());
assignment.getLogicalSlotFuture().get();
});
}
private static void testLogicalSlotRequestCancellationOrRelease(
boolean completePhysicalSlotFutureManually,
boolean cancelsPhysicalSlotRequestAndRemovesSharedSlot,
BiConsumerWithException<AllocationContext, ExecutionSlotAssignment, Exception>
cancelOrReleaseAction)
throws Exception {
AllocationContext.Builder allocationContextBuilder =
AllocationContext. [MASK] ().addGroup(EV1, EV2, EV3);
if (completePhysicalSlotFutureManually) {
allocationContextBuilder.withPhysicalSlotProvider(
TestingPhysicalSlotProvider.createWithoutImmediatePhysicalSlotCreation());
}
AllocationContext context = allocationContextBuilder.build();
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments =
context.allocateSlotsFor(EV1, EV2);
assertThat(context.getSlotProvider().getRequests()).hasSize(1);
// cancel or release only one sharing logical slots
cancelOrReleaseAction.accept(context, getAssignmentByExecutionVertexId(assignments, EV1));
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignmentsAfterOneCancellation =
context.allocateSlotsFor(EV1, EV2);
// there should be no more physical slot allocations, as the first logical slot reuses the
// previous shared slot
assertThat(context.getSlotProvider().getRequests()).hasSize(1);
// cancel or release all sharing logical slots
for (ExecutionSlotAssignment assignment : assignmentsAfterOneCancellation.values()) {
cancelOrReleaseAction.accept(context, assignment);
}
SlotRequestId slotRequestId =
context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId();
assertThat(context.getSlotProvider().getCancellations().containsKey(slotRequestId))
.isEqualTo(cancelsPhysicalSlotRequestAndRemovesSharedSlot);
context.allocateSlotsFor(EV3);
// there should be one more physical slot allocation if the first allocation should be
// removed with all logical slots
int expectedNumberOfRequests = cancelsPhysicalSlotRequestAndRemovesSharedSlot ? 2 : 1;
assertThat(context.getSlotProvider().getRequests()).hasSize(expectedNumberOfRequests);
}
@Test
void testPhysicalSlotReleaseLogicalSlots() throws ExecutionException, InterruptedException {
AllocationContext context = AllocationContext. [MASK] ().addGroup(EV1, EV2).build();
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments =
context.allocateSlotsFor(EV1, EV2);
List<TestingPayload> payloads =
assignments.values().stream()
.map(
assignment -> {
TestingPayload payload = new TestingPayload();
assignment
.getLogicalSlotFuture()
.thenAccept(
logicalSlot ->
logicalSlot.tryAssignPayload(payload));
return payload;
})
.collect(Collectors.toList());
SlotRequestId slotRequestId =
context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId();
TestingPhysicalSlot physicalSlot = context.getSlotProvider().getFirstResponseOrFail().get();
assertThat(payloads.stream().allMatch(payload -> payload.getTerminalStateFuture().isDone()))
.isFalse();
assertThat(physicalSlot.getPayload()).isNotNull();
physicalSlot.getPayload().release(new Throwable());
assertThat(payloads.stream().allMatch(payload -> payload.getTerminalStateFuture().isDone()))
.isTrue();
assertThat(context.getSlotProvider().getCancellations()).containsKey(slotRequestId);
context.allocateSlotsFor(EV1, EV2);
// there should be one more physical slot allocation, as the first allocation should be
// removed after releasing all logical slots
assertThat(context.getSlotProvider().getRequests()).hasSize(2);
}
@Test
void testSchedulePendingRequestBulkTimeoutCheck() {
TestingPhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker);
context.allocateSlotsFor(EV1, EV3);
PhysicalSlotRequestBulk bulk = bulkChecker.getBulk();
assertThat(bulk.getPendingRequests()).hasSize(2);
assertThat(bulk.getPendingRequests())
.containsExactlyInAnyOrder(RESOURCE_PROFILE.multiply(2), RESOURCE_PROFILE);
assertThat(bulk.getAllocationIdsOfFulfilledRequests()).isEmpty();
assertThat(bulkChecker.getTimeout()).isEqualTo(ALLOCATION_TIMEOUT);
}
@Test
void testRequestFulfilledInBulk() {
TestingPhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker);
context.allocateSlotsFor(EV1, EV3);
AllocationID allocationId = new AllocationID();
ResourceProfile pendingSlotResourceProfile =
fulfilOneOfTwoSlotRequestsAndGetPendingProfile(context, allocationId);
PhysicalSlotRequestBulk bulk = bulkChecker.getBulk();
assertThat(bulk.getPendingRequests()).hasSize(1);
assertThat(bulk.getPendingRequests()).containsExactly(pendingSlotResourceProfile);
assertThat(bulk.getAllocationIdsOfFulfilledRequests()).hasSize(1);
assertThat(bulk.getAllocationIdsOfFulfilledRequests()).containsExactly(allocationId);
}
@Test
void testRequestBulkCancel() {
TestingPhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker);
// allocate 2 physical slots for 2 groups
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments1 =
context.allocateSlotsFor(EV1, EV3);
fulfilOneOfTwoSlotRequestsAndGetPendingProfile(context, new AllocationID());
PhysicalSlotRequestBulk bulk1 = bulkChecker.getBulk();
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments2 =
context.allocateSlotsFor(EV2);
// cancelling of (EV1, EV3) releases assignments1 and only one physical slot for EV3
// the second physical slot is held by sharing EV2 from the next bulk
bulk1.cancel(new Throwable());
// return completed logical slot to clear shared slot and release physical slot
assertThat(assignments1).hasSize(2);
CompletableFuture<LogicalSlot> ev1slot =
getAssignmentByExecutionVertexId(assignments1, EV1).getLogicalSlotFuture();
boolean ev1failed = ev1slot.isCompletedExceptionally();
CompletableFuture<LogicalSlot> ev3slot =
getAssignmentByExecutionVertexId(assignments1, EV3).getLogicalSlotFuture();
boolean ev3failed = ev3slot.isCompletedExceptionally();
LogicalSlot slot = ev1failed ? ev3slot.join() : ev1slot.join();
releaseLogicalSlot(slot);
// EV3 needs again a physical slot, therefore there are 3 requests overall
context.allocateSlotsFor(EV1, EV3);
assertThat(context.getSlotProvider().getRequests()).hasSize(3);
// either EV1 or EV3 logical slot future is fulfilled before cancellation
assertThat(ev1failed).isNotEqualTo(ev3failed);
assertThat(assignments2).hasSize(1);
assertThat(getAssignmentByExecutionVertexId(assignments2, EV2).getLogicalSlotFuture())
.isNotCompletedExceptionally();
}
private static void releaseLogicalSlot(LogicalSlot slot) {
slot.tryAssignPayload(new DummyPayload(CompletableFuture.completedFuture(null)));
slot.releaseSlot(new Throwable());
}
@Test
void testBulkClearIfPhysicalSlotRequestFails() {
TestingPhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker);
context.allocateSlotsFor(EV1, EV3);
SlotRequestId slotRequestId =
context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId();
context.getSlotProvider()
.getResultForRequestId(slotRequestId)
.completeExceptionally(new Throwable());
PhysicalSlotRequestBulk bulk = bulkChecker.getBulk();
assertThat(bulk.getPendingRequests()).isEmpty();
}
@Test
void failLogicalSlotsIfPhysicalSlotIsFailed() {
final TestingPhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
AllocationContext context =
AllocationContext. [MASK] ()
.addGroup(EV1, EV2)
.withBulkChecker(bulkChecker)
.withPhysicalSlotProvider(
TestingPhysicalSlotProvider.createWithFailingPhysicalSlotCreation(
new FlinkException("test failure")))
.build();
final Map<ExecutionAttemptID, ExecutionSlotAssignment> allocatedSlots =
context.allocateSlotsFor(EV1, EV2);
for (ExecutionSlotAssignment allocatedSlot : allocatedSlots.values()) {
assertThat(allocatedSlot.getLogicalSlotFuture()).isCompletedExceptionally();
}
assertThat(bulkChecker.getBulk().getPendingRequests()).isEmpty();
final Set<SlotRequestId> requests = context.getSlotProvider().getRequests().keySet();
assertThat(context.getSlotProvider().getCancellations().keySet()).isEqualTo(requests);
}
@Test
void testSlotRequestProfileFromExecutionSlotSharingGroup() {
final ResourceProfile resourceProfile1 = ResourceProfile.fromResources(1, 10);
final ResourceProfile resourceProfile2 = ResourceProfile.fromResources(2, 20);
final AllocationContext context =
AllocationContext. [MASK] ()
.addGroupAndResource(resourceProfile1, EV1, EV3)
.addGroupAndResource(resourceProfile2, EV2, EV4)
.build();
context.allocateSlotsFor(EV1, EV2);
assertThat(context.getSlotProvider().getRequests()).hasSize(2);
assertThat(
context.getSlotProvider().getRequests().values().stream()
.map(PhysicalSlotRequest::getSlotProfile)
.map(SlotProfile::getPhysicalSlotResourceProfile)
.collect(Collectors.toList()))
.containsExactlyInAnyOrder(resourceProfile1, resourceProfile2);
}
@Test
void testSlotProviderBatchSlotRequestTimeoutCheckIsDisabled() {
final AllocationContext context = AllocationContext. [MASK] ().build();
assertThat(context.getSlotProvider().isBatchSlotRequestTimeoutCheckEnabled()).isFalse();
}
private static List<ExecutionVertexID> getAssignIds(
Collection<ExecutionSlotAssignment> assignments) {
return assignments.stream()
.map(ExecutionSlotAssignment::getExecutionAttemptId)
.map(ExecutionAttemptID::getExecutionVertexId)
.collect(Collectors.toList());
}
private static AllocationContext createBulkCheckerContextWithEv12GroupAndEv3Group(
PhysicalSlotRequestBulkChecker bulkChecker) {
return AllocationContext. [MASK] ()
.addGroup(EV1, EV2)
.addGroup(EV3)
.withBulkChecker(bulkChecker)
.withPhysicalSlotProvider(
TestingPhysicalSlotProvider.createWithoutImmediatePhysicalSlotCreation())
.build();
}
private static ResourceProfile fulfilOneOfTwoSlotRequestsAndGetPendingProfile(
AllocationContext context, AllocationID allocationId) {
Map<SlotRequestId, PhysicalSlotRequest> requests = context.getSlotProvider().getRequests();
List<SlotRequestId> slotRequestIds = new ArrayList<>(requests.keySet());
assertThat(slotRequestIds).hasSize(2);
SlotRequestId slotRequestId1 = slotRequestIds.get(0);
SlotRequestId slotRequestId2 = slotRequestIds.get(1);
context.getSlotProvider()
.getResultForRequestId(slotRequestId1)
.complete(TestingPhysicalSlot.builder().withAllocationID(allocationId).build());
return requests.get(slotRequestId2).getSlotProfile().getPhysicalSlotResourceProfile();
}
private static ExecutionSlotAssignment getAssignmentByExecutionVertexId(
Map<ExecutionAttemptID, ExecutionSlotAssignment> assignments,
ExecutionVertexID executionVertexId) {
return assignments.entrySet().stream()
.filter(entry -> entry.getKey().getExecutionVertexId().equals(executionVertexId))
.map(Map.Entry::getValue)
.collect(Collectors.toList())
.get(0);
}
private static class AllocationContext {
private final TestingPhysicalSlotProvider slotProvider;
private final TestingSlotSharingStrategy slotSharingStrategy;
private final SlotSharingExecutionSlotAllocator allocator;
private final TestingSharedSlotProfileRetrieverFactory slotProfileRetrieverFactory;
private AllocationContext(
TestingPhysicalSlotProvider slotProvider,
TestingSlotSharingStrategy slotSharingStrategy,
SlotSharingExecutionSlotAllocator allocator,
TestingSharedSlotProfileRetrieverFactory slotProfileRetrieverFactory) {
this.slotProvider = slotProvider;
this.slotSharingStrategy = slotSharingStrategy;
this.allocator = allocator;
this.slotProfileRetrieverFactory = slotProfileRetrieverFactory;
}
private SlotSharingExecutionSlotAllocator getAllocator() {
return allocator;
}
private Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor(
ExecutionVertexID... ids) {
return allocator.allocateSlotsFor(
Arrays.stream(ids)
.map(
executionVertexId ->
createExecutionAttemptId(executionVertexId, 0))
.collect(Collectors.toList()));
}
private TestingSlotSharingStrategy getSlotSharingStrategy() {
return slotSharingStrategy;
}
private TestingPhysicalSlotProvider getSlotProvider() {
return slotProvider;
}
private TestingSharedSlotProfileRetrieverFactory getSlotProfileRetrieverFactory() {
return slotProfileRetrieverFactory;
}
private static Builder [MASK] () {
return new Builder();
}
private static class Builder {
private final Map<ExecutionVertexID[], ResourceProfile> groups = new HashMap<>();
private boolean slotWillBeOccupiedIndefinitely = false;
private PhysicalSlotRequestBulkChecker bulkChecker =
new TestingPhysicalSlotRequestBulkChecker();
private TestingPhysicalSlotProvider physicalSlotProvider =
TestingPhysicalSlotProvider.createWithInfiniteSlotCreation();
private Builder addGroup(ExecutionVertexID... group) {
groups.put(group, ResourceProfile.UNKNOWN);
return this;
}
private Builder addGroupAndResource(
ResourceProfile resourceProfile, ExecutionVertexID... group) {
groups.put(group, resourceProfile);
return this;
}
private Builder setSlotWillBeOccupiedIndefinitely(
boolean slotWillBeOccupiedIndefinitely) {
this.slotWillBeOccupiedIndefinitely = slotWillBeOccupiedIndefinitely;
return this;
}
private Builder withBulkChecker(PhysicalSlotRequestBulkChecker bulkChecker) {
this.bulkChecker = bulkChecker;
return this;
}
private Builder withPhysicalSlotProvider(
TestingPhysicalSlotProvider physicalSlotProvider) {
this.physicalSlotProvider = physicalSlotProvider;
return this;
}
private AllocationContext build() {
TestingSharedSlotProfileRetrieverFactory sharedSlotProfileRetrieverFactory =
new TestingSharedSlotProfileRetrieverFactory();
TestingSlotSharingStrategy slotSharingStrategy =
TestingSlotSharingStrategy.createWithGroupsAndResources(groups);
SlotSharingExecutionSlotAllocator allocator =
new SlotSharingExecutionSlotAllocator(
physicalSlotProvider,
slotWillBeOccupiedIndefinitely,
slotSharingStrategy,
sharedSlotProfileRetrieverFactory,
bulkChecker,
ALLOCATION_TIMEOUT,
executionVertexID -> RESOURCE_PROFILE);
return new AllocationContext(
physicalSlotProvider,
slotSharingStrategy,
allocator,
sharedSlotProfileRetrieverFactory);
}
}
}
private static class TestingSlotSharingStrategy implements SlotSharingStrategy {
private final Map<ExecutionVertexID, ExecutionSlotSharingGroup> executionSlotSharingGroups;
private TestingSlotSharingStrategy(
Map<ExecutionVertexID, ExecutionSlotSharingGroup> executionSlotSharingGroups) {
this.executionSlotSharingGroups = executionSlotSharingGroups;
}
@Override
public ExecutionSlotSharingGroup getExecutionSlotSharingGroup(
ExecutionVertexID executionVertexId) {
return executionSlotSharingGroups.get(executionVertexId);
}
@Override
public Set<ExecutionSlotSharingGroup> getExecutionSlotSharingGroups() {
return new HashSet<>(executionSlotSharingGroups.values());
}
private static TestingSlotSharingStrategy createWithGroupsAndResources(
Map<ExecutionVertexID[], ResourceProfile> groupAndResources) {
Map<ExecutionVertexID, ExecutionSlotSharingGroup> executionSlotSharingGroups =
new HashMap<>();
for (Map.Entry<ExecutionVertexID[], ResourceProfile> groupAndResource :
groupAndResources.entrySet()) {
SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
slotSharingGroup.setResourceProfile(groupAndResource.getValue());
ExecutionSlotSharingGroup executionSlotSharingGroup =
new ExecutionSlotSharingGroup(slotSharingGroup);
for (ExecutionVertexID executionVertexId : groupAndResource.getKey()) {
executionSlotSharingGroup.addVertex(executionVertexId);
executionSlotSharingGroups.put(executionVertexId, executionSlotSharingGroup);
}
}
return new TestingSlotSharingStrategy(executionSlotSharingGroups);
}
}
private static class TestingSharedSlotProfileRetrieverFactory
implements SharedSlotProfileRetrieverFactory {
private final List<Set<ExecutionVertexID>> askedBulks;
private final List<ExecutionSlotSharingGroup> askedGroups;
private TestingSharedSlotProfileRetrieverFactory() {
this.askedBulks = new ArrayList<>();
this.askedGroups = new ArrayList<>();
}
@Override
public SharedSlotProfileRetriever createFromBulk(Set<ExecutionVertexID> bulk) {
askedBulks.add(bulk);
return (group, resourceProfile) -> {
askedGroups.add(group);
return SlotProfileTestingUtils.noLocality(resourceProfile);
};
}
private List<Set<ExecutionVertexID>> getAskedBulks() {
return Collections.unmodifiableList(askedBulks);
}
private List<ExecutionSlotSharingGroup> getAskedGroups() {
return Collections.unmodifiableList(askedGroups);
}
}
}
| newBuilder | java |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.runtime.rest;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.connect.errors. [MASK] ;
import org.apache.kafka.connect.health.ConnectClusterDetails;
import org.apache.kafka.connect.rest.ConnectRestExtension;
import org.apache.kafka.connect.rest.ConnectRestExtensionContext;
import org.apache.kafka.connect.runtime.Herder;
import org.apache.kafka.connect.runtime.health.ConnectClusterDetailsImpl;
import org.apache.kafka.connect.runtime.health.ConnectClusterStateImpl;
import org.apache.kafka.connect.runtime.rest.errors. [MASK] Mapper;
import org.apache.kafka.connect.runtime.rest.util.SSLUtils;
import com.fasterxml.jackson.jakarta.rs.json.JacksonJsonProvider;
import org.eclipse.jetty.ee10.servlet.FilterHolder;
import org.eclipse.jetty.ee10.servlet.ServletContextHandler;
import org.eclipse.jetty.ee10.servlet.ServletHolder;
import org.eclipse.jetty.ee10.servlets.HeaderFilter;
import org.eclipse.jetty.server.Connector;
import org.eclipse.jetty.server.CustomRequestLog;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.Slf4jRequestLogWriter;
import org.eclipse.jetty.server.handler.ContextHandlerCollection;
import org.eclipse.jetty.server.handler.CrossOriginHandler;
import org.eclipse.jetty.server.handler.StatisticsHandler;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.glassfish.hk2.utilities.Binder;
import org.glassfish.hk2.utilities.binding.AbstractBinder;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.server.ServerProperties;
import org.glassfish.jersey.servlet.ServletContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import jakarta.servlet.DispatcherType;
import jakarta.ws.rs.core.UriBuilder;
/**
* Embedded server for the REST API that provides the control plane for Kafka Connect workers.
*/
public abstract class RestServer {
// TODO: This should not be so long. However, due to potentially long rebalances that may have to wait a full
// session timeout to complete, during which we cannot serve some requests. Ideally we could reduce this, but
// we need to consider all possible scenarios this could fail. It might be ok to fail with a timeout in rare cases,
// but currently a worker simply leaving the group can take this long as well.
public static final long DEFAULT_REST_REQUEST_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(90);
public static final long DEFAULT_HEALTH_CHECK_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(10);
private static final Logger log = LoggerFactory.getLogger(RestServer.class);
// Used to distinguish between Admin connectors and regular REST API connectors when binding admin handlers
private static final String ADMIN_SERVER_CONNECTOR_NAME = "Admin";
private static final Pattern LISTENER_PATTERN = Pattern.compile("^(.*)://\\[?([0-9a-zA-Z\\-%._:]*)\\]?:(-?[0-9]+)");
private static final long GRACEFUL_SHUTDOWN_TIMEOUT_MS = 60 * 1000;
private static final String PROTOCOL_HTTP = "http";
private static final String PROTOCOL_HTTPS = "https";
protected final RestServerConfig config;
private final ContextHandlerCollection handlers;
private final Server jettyServer;
private final RequestTimeout requestTimeout;
private List<ConnectRestExtension> connectRestExtensions = Collections.emptyList();
/**
* Create a REST server for this herder using the specified configs.
*/
protected RestServer(RestServerConfig config) {
this.config = config;
List<String> listeners = config.listeners();
List<String> adminListeners = config.adminListeners();
jettyServer = new Server();
handlers = new ContextHandlerCollection();
requestTimeout = new RequestTimeout(DEFAULT_REST_REQUEST_TIMEOUT_MS, DEFAULT_HEALTH_CHECK_TIMEOUT_MS);
createConnectors(listeners, adminListeners);
}
/**
* Adds Jetty connector for each configured listener
*/
public final void createConnectors(List<String> listeners, List<String> adminListeners) {
List<Connector> connectors = new ArrayList<>();
for (String listener : listeners) {
Connector connector = createConnector(listener);
connectors.add(connector);
log.info("Added connector for {}", listener);
}
jettyServer.setConnectors(connectors.toArray(new Connector[0]));
if (adminListeners != null && !adminListeners.isEmpty()) {
for (String adminListener : adminListeners) {
Connector conn = createConnector(adminListener, true);
jettyServer.addConnector(conn);
log.info("Added admin connector for {}", adminListener);
}
}
}
/**
* Creates regular (non-admin) Jetty connector according to configuration
*/
public final Connector createConnector(String listener) {
return createConnector(listener, false);
}
/**
* Creates Jetty connector according to configuration
*/
public final Connector createConnector(String listener, boolean isAdmin) {
Matcher listenerMatcher = LISTENER_PATTERN.matcher(listener);
if (!listenerMatcher.matches())
throw new ConfigException("Listener doesn't have the right format (protocol://hostname:port).");
String protocol = listenerMatcher.group(1).toLowerCase(Locale.ENGLISH);
if (!PROTOCOL_HTTP.equals(protocol) && !PROTOCOL_HTTPS.equals(protocol))
throw new ConfigException(String.format("Listener protocol must be either \"%s\" or \"%s\".", PROTOCOL_HTTP, PROTOCOL_HTTPS));
String hostname = listenerMatcher.group(2);
int port = Integer.parseInt(listenerMatcher.group(3));
ServerConnector connector;
if (PROTOCOL_HTTPS.equals(protocol)) {
SslContextFactory.Server ssl;
if (isAdmin) {
ssl = SSLUtils.createServerSideSslContextFactory(config, RestServerConfig.ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX);
} else {
ssl = SSLUtils.createServerSideSslContextFactory(config);
}
connector = new ServerConnector(jettyServer, ssl);
if (!isAdmin) {
connector.setName(String.format("%s_%s%d", PROTOCOL_HTTPS, hostname, port));
}
} else {
connector = new ServerConnector(jettyServer);
if (!isAdmin) {
connector.setName(String.format("%s_%s%d", PROTOCOL_HTTP, hostname, port));
}
}
if (isAdmin) {
connector.setName(ADMIN_SERVER_CONNECTOR_NAME);
}
if (!hostname.isEmpty())
connector.setHost(hostname);
connector.setPort(port);
// TODO: do we need this?
connector.setIdleTimeout(requestTimeout.timeoutMs());
return connector;
}
public void initializeServer() {
log.info("Initializing REST server");
Slf4jRequestLogWriter slf4jRequestLogWriter = new Slf4jRequestLogWriter();
slf4jRequestLogWriter.setLoggerName(RestServer.class.getCanonicalName());
CustomRequestLog requestLog = new CustomRequestLog(slf4jRequestLogWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT + " %{ms}T");
jettyServer.setRequestLog(requestLog);
/* Needed for graceful shutdown as per `setStopTimeout` documentation */
StatisticsHandler statsHandler = new StatisticsHandler();
statsHandler.setHandler(handlers);
jettyServer.setHandler(statsHandler);
jettyServer.setStopTimeout(GRACEFUL_SHUTDOWN_TIMEOUT_MS);
jettyServer.setStopAtShutdown(true);
try {
jettyServer.start();
} catch (Exception e) {
throw new [MASK] ("Unable to initialize REST server", e);
}
log.info("REST server listening at " + jettyServer.getURI() + ", advertising URL " + advertisedUrl());
URI adminUrl = adminUrl();
if (adminUrl != null)
log.info("REST admin endpoints at " + adminUrl);
}
protected final void initializeResources() {
log.info("Initializing REST resources");
ResourceConfig resourceConfig = newResourceConfig();
Collection<Class<?>> regularResources = regularResources();
regularResources.forEach(resourceConfig::register);
configureRegularResources(resourceConfig);
List<String> adminListeners = config.adminListeners();
ResourceConfig adminResourceConfig;
if (adminListeners != null && adminListeners.isEmpty()) {
log.info("Skipping adding admin resources");
// set up adminResource but add no handlers to it
adminResourceConfig = resourceConfig;
} else {
if (adminListeners == null) {
log.info("Adding admin resources to main listener");
adminResourceConfig = resourceConfig;
} else {
// TODO: we need to check if these listeners are same as 'listeners'
// TODO: the following code assumes that they are different
log.info("Adding admin resources to admin listener");
adminResourceConfig = newResourceConfig();
}
Collection<Class<?>> adminResources = adminResources();
adminResources.forEach(adminResourceConfig::register);
configureAdminResources(adminResourceConfig);
}
ServletContainer servletContainer = new ServletContainer(resourceConfig);
ServletHolder servletHolder = new ServletHolder(servletContainer);
List<Handler> contextHandlers = new ArrayList<>();
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
context.addServlet(servletHolder, "/*");
contextHandlers.add(context);
ServletContextHandler adminContext = null;
if (adminResourceConfig != resourceConfig) {
adminContext = new ServletContextHandler(ServletContextHandler.SESSIONS);
ServletHolder adminServletHolder = new ServletHolder(new ServletContainer(adminResourceConfig));
adminContext.setContextPath("/");
adminContext.addServlet(adminServletHolder, "/*");
adminContext.setVirtualHosts(List.of("@" + ADMIN_SERVER_CONNECTOR_NAME));
contextHandlers.add(adminContext);
}
String allowedOrigins = config.allowedOrigins();
if (!Utils.isBlank(allowedOrigins)) {
CrossOriginHandler crossOriginHandler = new CrossOriginHandler();
crossOriginHandler.setAllowedOriginPatterns(Set.of(allowedOrigins.split(",")));
String allowedMethods = config.allowedMethods();
if (!Utils.isBlank(allowedMethods)) {
crossOriginHandler.setAllowedMethods(Set.of(allowedMethods.split(",")));
}
// Setting to true matches the previously used CrossOriginFilter
crossOriginHandler.setDeliverPreflightRequests(true);
context.insertHandler(crossOriginHandler);
}
String headerConfig = config.responseHeaders();
if (!Utils.isBlank(headerConfig)) {
configureHttpResponseHeaderFilter(context, headerConfig);
}
handlers.setHandlers(contextHandlers.toArray(new Handler[0]));
try {
context.start();
} catch (Exception e) {
throw new [MASK] ("Unable to initialize REST resources", e);
}
if (adminResourceConfig != resourceConfig) {
try {
log.debug("Starting admin context");
adminContext.start();
} catch (Exception e) {
throw new [MASK] ("Unable to initialize Admin REST resources", e);
}
}
log.info("REST resources initialized; server is started and ready to handle requests");
}
private ResourceConfig newResourceConfig() {
ResourceConfig result = new ResourceConfig();
result.register(new JacksonJsonProvider());
result.register(requestTimeout.binder());
result.register( [MASK] Mapper.class);
result.property(ServerProperties.WADL_FEATURE_DISABLE, true);
return result;
}
/**
* @return the resources that should be registered with the
* standard (i.e., non-admin) listener for this server; may be empty, but not null
*/
protected abstract Collection<Class<?>> regularResources();
/**
* @return the resources that should be registered with the
* admin listener for this server; may be empty, but not null
*/
protected abstract Collection<Class<?>> adminResources();
/**
* Pluggable hook to customize the regular (i.e., non-admin) resources on this server
* after they have been instantiated and registered with the given {@link ResourceConfig}.
* This may be used to, for example, add REST extensions via {@link #registerRestExtensions(Herder, ResourceConfig)}.
* <p>
* <em>N.B.: Classes do <b>not</b> need to register the resources provided in {@link #regularResources()} with
* the {@link ResourceConfig} parameter in this method; they are automatically registered by the parent class.</em>
* @param resourceConfig the {@link ResourceConfig} that the server's regular listeners are registered with; never null
*/
protected void configureRegularResources(ResourceConfig resourceConfig) {
// No-op by default
}
/**
* Pluggable hook to customize the admin resources on this server after they have been instantiated and registered
* with the given {@link ResourceConfig}. This may be used to, for example, add REST extensions via
* {@link #registerRestExtensions(Herder, ResourceConfig)}.
* <p>
* <em>N.B.: Classes do <b>not</b> need to register the resources provided in {@link #adminResources()} with
* the {@link ResourceConfig} parameter in this method; they are automatically registered by the parent class.</em>
* @param adminResourceConfig the {@link ResourceConfig} that the server's admin listeners are registered with; never null
*/
protected void configureAdminResources(ResourceConfig adminResourceConfig) {
// No-op by default
}
public URI serverUrl() {
return jettyServer.getURI();
}
public void stop() {
log.info("Stopping REST server");
try {
if (handlers.isRunning()) {
for (Handler handler : handlers.getHandlers()) {
if (handler != null) {
Utils.closeQuietly(handler::stop, handler.toString());
}
}
}
for (ConnectRestExtension connectRestExtension : connectRestExtensions) {
try {
connectRestExtension.close();
} catch (IOException e) {
log.warn("Error while invoking close on " + connectRestExtension.getClass(), e);
}
}
jettyServer.stop();
jettyServer.join();
} catch (Exception e) {
throw new [MASK] ("Unable to stop REST server", e);
} finally {
try {
jettyServer.destroy();
} catch (Exception e) {
log.error("Unable to destroy REST server", e);
}
}
log.info("REST server stopped");
}
/**
* Get the URL to advertise to other workers and clients. This uses the default connector from the embedded Jetty
* server, unless overrides for advertised hostname and/or port are provided via configs. {@link #initializeServer()}
* must be invoked successfully before calling this method.
*/
public URI advertisedUrl() {
UriBuilder builder = UriBuilder.fromUri(jettyServer.getURI());
String advertisedSecurityProtocol = determineAdvertisedProtocol();
ServerConnector serverConnector = findConnector(advertisedSecurityProtocol);
builder.scheme(advertisedSecurityProtocol);
String advertisedHostname = config.advertisedHostName();
if (advertisedHostname != null && !advertisedHostname.isEmpty())
builder.host(advertisedHostname);
else if (serverConnector != null && serverConnector.getHost() != null && !serverConnector.getHost().isEmpty())
builder.host(serverConnector.getHost());
Integer advertisedPort = config.advertisedPort();
if (advertisedPort != null)
builder.port(advertisedPort);
else if (serverConnector != null && serverConnector.getPort() > 0)
builder.port(serverConnector.getPort());
else if (serverConnector != null && serverConnector.getLocalPort() > 0)
builder.port(serverConnector.getLocalPort());
log.info("Advertised URI: {}", builder.build());
return builder.build();
}
/**
* @return the admin url for this worker. Can be null if admin endpoints are disabled.
*/
public URI adminUrl() {
ServerConnector adminConnector = null;
for (Connector connector : jettyServer.getConnectors()) {
if (ADMIN_SERVER_CONNECTOR_NAME.equals(connector.getName()))
adminConnector = (ServerConnector) connector;
}
if (adminConnector == null) {
List<String> adminListeners = config.adminListeners();
if (adminListeners == null) {
return advertisedUrl();
} else if (adminListeners.isEmpty()) {
return null;
} else {
log.error("No admin connector found for listeners {}", adminListeners);
return null;
}
}
UriBuilder builder = UriBuilder.fromUri(jettyServer.getURI());
builder.port(adminConnector.getLocalPort());
return builder.build();
}
// For testing only
public void requestTimeout(long requestTimeoutMs) {
this.requestTimeout.timeoutMs(requestTimeoutMs);
}
// For testing only
public void healthCheckTimeout(long healthCheckTimeoutMs) {
this.requestTimeout.healthCheckTimeoutMs(healthCheckTimeoutMs);
}
String determineAdvertisedProtocol() {
String advertisedSecurityProtocol = config.advertisedListener();
if (advertisedSecurityProtocol == null) {
String listeners = config.rawListeners();
if (listeners == null)
return PROTOCOL_HTTP;
else
listeners = listeners.toLowerCase(Locale.ENGLISH);
if (listeners.contains(String.format("%s://", PROTOCOL_HTTP)))
return PROTOCOL_HTTP;
else if (listeners.contains(String.format("%s://", PROTOCOL_HTTPS)))
return PROTOCOL_HTTPS;
else
return PROTOCOL_HTTP;
} else {
return advertisedSecurityProtocol.toLowerCase(Locale.ENGLISH);
}
}
/**
* Locate a Jetty connector for the standard (non-admin) REST API that uses the given protocol.
* @param protocol the protocol for the connector (e.g., "http" or "https").
* @return a {@link ServerConnector} for the server that uses the requested protocol, or
* {@code null} if none exist.
*/
ServerConnector findConnector(String protocol) {
for (Connector connector : jettyServer.getConnectors()) {
String connectorName = connector.getName();
// We set the names for these connectors when instantiating them, beginning with the
// protocol for the connector and then an underscore ("_"). We rely on that format here
// when trying to locate a connector with the requested protocol; if the naming format
// for the connectors we create is ever changed, we'll need to adjust the logic here
// accordingly.
if (connectorName.startsWith(protocol + "_") && !ADMIN_SERVER_CONNECTOR_NAME.equals(connectorName))
return (ServerConnector) connector;
}
return null;
}
protected final void registerRestExtensions(Herder herder, ResourceConfig resourceConfig) {
connectRestExtensions = herder.plugins().newPlugins(
config.restExtensions(),
config, ConnectRestExtension.class);
long herderRequestTimeoutMs = DEFAULT_REST_REQUEST_TIMEOUT_MS;
Integer rebalanceTimeoutMs = config.rebalanceTimeoutMs();
if (rebalanceTimeoutMs != null) {
herderRequestTimeoutMs = Math.min(herderRequestTimeoutMs, rebalanceTimeoutMs.longValue());
}
ConnectClusterDetails connectClusterDetails = new ConnectClusterDetailsImpl(
herder.kafkaClusterId()
);
ConnectRestExtensionContext connectRestExtensionContext =
new ConnectRestExtensionContextImpl(
new ConnectRestConfigurable(resourceConfig),
new ConnectClusterStateImpl(herderRequestTimeoutMs, connectClusterDetails, herder)
);
for (ConnectRestExtension connectRestExtension : connectRestExtensions) {
connectRestExtension.register(connectRestExtensionContext);
}
}
/**
* Register header filter to ServletContextHandler.
* @param context The servlet context handler
*/
protected void configureHttpResponseHeaderFilter(ServletContextHandler context, String headerConfig) {
FilterHolder headerFilterHolder = new FilterHolder(HeaderFilter.class);
headerFilterHolder.setInitParameter("headerConfig", headerConfig);
context.addFilter(headerFilterHolder, "/*", EnumSet.of(DispatcherType.REQUEST));
}
private static class RequestTimeout implements RestRequestTimeout {
private final RequestBinder binder;
private volatile long timeoutMs;
private volatile long healthCheckTimeoutMs;
public RequestTimeout(long initialTimeoutMs, long initialHealthCheckTimeoutMs) {
this.timeoutMs = initialTimeoutMs;
this.healthCheckTimeoutMs = initialHealthCheckTimeoutMs;
this.binder = new RequestBinder();
}
@Override
public long timeoutMs() {
return timeoutMs;
}
@Override
public long healthCheckTimeoutMs() {
return healthCheckTimeoutMs;
}
public void timeoutMs(long timeoutMs) {
this.timeoutMs = timeoutMs;
}
public void healthCheckTimeoutMs(long healthCheckTimeoutMs) {
this.healthCheckTimeoutMs = healthCheckTimeoutMs;
}
public Binder binder() {
return binder;
}
private class RequestBinder extends AbstractBinder {
@Override
protected void configure() {
bind(RequestTimeout.this).to(RestRequestTimeout.class);
}
}
}
}
| ConnectException | java |
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.logger;
import java.util. [MASK] ;
import java.util.Iterator;
import java.util.Map;
import java.util.ResourceBundle;
import java.util.ServiceLoader;
import java.util.function.BooleanSupplier;
import java.util.function.Function;
import java.util.function.Supplier;
import java.lang.System.LoggerFinder;
import java.lang.System.Logger;
import java.lang.ref.WeakReference;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import jdk.internal.misc.InnocuousThread;
import jdk.internal.misc.VM;
import sun.util.logging.PlatformLogger;
import jdk.internal.logger.LazyLoggers.LazyLoggerAccessor;
/**
* The BootstrapLogger class handles all the logic needed by Lazy Loggers
* to delay the creation of System.Logger instances until the VM is booted.
* By extension - it also contains the logic that will delay the creation
* of JUL Loggers until the LogManager is initialized by the application, in
* the common case where JUL is the default and there is no custom JUL
* configuration.
*
* A BootstrapLogger instance is both a Logger and a
* PlatformLogger.Bridge instance, which will put all Log messages in a queue
* until the VM is booted.
* Once the VM is booted, it obtain the real System.Logger instance from the
* LoggerFinder and flushes the message to the queue.
*
* There are a few caveat:
* - the queue may not be flush until the next message is logged after
* the VM is booted
* - while the BootstrapLogger is active, the default implementation
* for all convenience methods is used
* - PlatformLogger.setLevel calls are ignored
*
*
*/
public final class BootstrapLogger implements Logger, PlatformLogger.Bridge,
PlatformLogger.ConfigurableBridge {
// We use the BootstrapExecutors class to submit delayed messages
// to an independent InnocuousThread which will ensure that
// delayed log events will be clearly identified as messages that have
// been delayed during the boot sequence.
private static class BootstrapExecutors implements ThreadFactory {
// Maybe that should be made configurable with system properties.
static final long KEEP_EXECUTOR_ALIVE_SECONDS = 30;
// The BootstrapMessageLoggerTask is a Runnable which keeps
// a hard ref to the ExecutorService that owns it.
// This ensure that the ExecutorService is not gc'ed until the thread
// has stopped running.
private static class BootstrapMessageLoggerTask implements Runnable {
ExecutorService owner;
Runnable run;
public BootstrapMessageLoggerTask(ExecutorService owner, Runnable r) {
this.owner = owner;
this.run = r;
}
@Override
public void run() {
try {
run.run();
} finally {
owner = null; // allow the ExecutorService to be gced.
}
}
}
private static volatile WeakReference<ExecutorService> executorRef;
private static ExecutorService getExecutor() {
WeakReference<ExecutorService> ref = executorRef;
ExecutorService executor = ref == null ? null : ref.get();
if (executor != null) return executor;
synchronized (BootstrapExecutors.class) {
ref = executorRef;
executor = ref == null ? null : ref.get();
if (executor == null) {
executor = new ThreadPoolExecutor(0, 1,
KEEP_EXECUTOR_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(), new BootstrapExecutors());
}
// The executor service will be elligible for gc
// KEEP_EXECUTOR_ALIVE_SECONDS seconds (30s)
// after the execution of its last pending task.
executorRef = new WeakReference<>(executor);
return executorRef.get();
}
}
@Override
public Thread newThread(Runnable r) {
ExecutorService owner = getExecutor();
Thread thread = InnocuousThread.newThread(new BootstrapMessageLoggerTask(owner, r));
thread.setName("BootstrapMessageLoggerTask-" + thread.getName());
thread.setDaemon(true);
return thread;
}
static void submit(Runnable r) {
getExecutor().execute(r);
}
// This is used by tests.
static void join(Runnable r) {
try {
getExecutor().submit(r).get();
} catch (InterruptedException | ExecutionException ex) {
// should not happen
throw new RuntimeException(ex);
}
}
// This is used by tests.
static void awaitPendingTasks() {
WeakReference<ExecutorService> ref = executorRef;
ExecutorService executor = ref == null ? null : ref.get();
if (ref == null) {
synchronized(BootstrapExecutors.class) {
ref = executorRef;
executor = ref == null ? null : ref.get();
}
}
if (executor != null) {
// since our executor uses a FIFO and has a single thread
// then awaiting the execution of its pending tasks can be done
// simply by registering a new task and waiting until it
// completes. This of course would not work if we were using
// several threads, but we don't.
join(()->{});
}
}
// This is used by tests.
static boolean isAlive() {
WeakReference<ExecutorService> ref = executorRef;
if (ref != null && !ref.refersTo(null)) return true;
synchronized (BootstrapExecutors.class) {
ref = executorRef;
return ref != null && !ref.refersTo(null);
}
}
// The pending log event queue. The first event is the head, and
// new events are added at the tail
static LogEvent head, tail;
static void enqueue(LogEvent event) {
if (event.next != null) return;
synchronized (BootstrapExecutors.class) {
if (event.next != null) return;
event.next = event;
if (tail == null) {
head = tail = event;
} else {
tail.next = event;
tail = event;
}
}
}
static void flush() {
LogEvent event;
// drain the whole queue
synchronized(BootstrapExecutors.class) {
event = head;
head = tail = null;
}
while(event != null) {
LogEvent.log(event);
synchronized(BootstrapExecutors.class) {
LogEvent prev = event;
event = (event.next == event ? null : event.next);
prev.next = null;
}
}
}
}
// The accessor in which this logger is temporarily set.
final LazyLoggerAccessor holder;
// tests whether the logger is invoked by the loading thread before
// the LoggerFinder is loaded; can be null;
final BooleanSupplier isLoadingThread;
// returns true if the logger is invoked by the loading thread before the
// LoggerFinder service is loaded
boolean isLoadingThread() {
return isLoadingThread != null && isLoadingThread.getAsBoolean();
}
BootstrapLogger(LazyLoggerAccessor holder, BooleanSupplier isLoadingThread) {
this.holder = holder;
this.isLoadingThread = isLoadingThread;
}
// Temporary data object storing log events
// It would be nice to use a Consumer<Logger> instead of a LogEvent.
// This way we could simply do things like:
// push((logger) -> logger.log(level, msg));
// Unfortunately, if we come to here it means we are in the bootsraping
// phase where using lambdas is not safe yet - so we have to use
// a data object instead...
//
static final class LogEvent {
// only one of these two levels should be non null
final Level level;
final PlatformLogger.Level platformLevel;
final BootstrapLogger bootstrap;
final ResourceBundle bundle;
final String msg;
final Throwable thrown;
final Object[] params;
final Supplier<String> msgSupplier;
final String sourceClass;
final String sourceMethod;
final long timeMillis;
final long nanoAdjustment;
// because logging a message may entail calling toString() on
// the parameters etc... we need to store the context of the
// caller who logged the message - so that we can reuse it when
// we finally log the message.
// The next event in the queue
LogEvent next;
@SuppressWarnings("removal")
private LogEvent(BootstrapLogger bootstrap, Level level,
ResourceBundle bundle, String msg,
Throwable thrown, Object[] params) {
this.timeMillis = System.currentTimeMillis();
this.nanoAdjustment = VM.getNanoTimeAdjustment(timeMillis);
this.level = level;
this.platformLevel = null;
this.bundle = bundle;
this.msg = msg;
this.msgSupplier = null;
this.thrown = thrown;
this.params = params;
this.sourceClass = null;
this.sourceMethod = null;
this.bootstrap = bootstrap;
}
@SuppressWarnings("removal")
private LogEvent(BootstrapLogger bootstrap, Level level,
Supplier<String> msgSupplier,
Throwable thrown, Object[] params) {
this.timeMillis = System.currentTimeMillis();
this.nanoAdjustment = VM.getNanoTimeAdjustment(timeMillis);
this.level = level;
this.platformLevel = null;
this.bundle = null;
this.msg = null;
this.msgSupplier = msgSupplier;
this.thrown = thrown;
this.params = params;
this.sourceClass = null;
this.sourceMethod = null;
this.bootstrap = bootstrap;
}
@SuppressWarnings("removal")
private LogEvent(BootstrapLogger bootstrap,
PlatformLogger.Level platformLevel,
String sourceClass, String sourceMethod,
ResourceBundle bundle, String msg,
Throwable thrown, Object[] params) {
this.timeMillis = System.currentTimeMillis();
this.nanoAdjustment = VM.getNanoTimeAdjustment(timeMillis);
this.level = null;
this.platformLevel = platformLevel;
this.bundle = bundle;
this.msg = msg;
this.msgSupplier = null;
this.thrown = thrown;
this.params = params;
this.sourceClass = sourceClass;
this.sourceMethod = sourceMethod;
this.bootstrap = bootstrap;
}
@SuppressWarnings("removal")
private LogEvent(BootstrapLogger bootstrap,
PlatformLogger.Level platformLevel,
String sourceClass, String sourceMethod,
Supplier<String> msgSupplier,
Throwable thrown, Object[] params) {
this.timeMillis = System.currentTimeMillis();
this.nanoAdjustment = VM.getNanoTimeAdjustment(timeMillis);
this.level = null;
this.platformLevel = platformLevel;
this.bundle = null;
this.msg = null;
this.msgSupplier = msgSupplier;
this.thrown = thrown;
this.params = params;
this.sourceClass = sourceClass;
this.sourceMethod = sourceMethod;
this.bootstrap = bootstrap;
}
// Log this message in the given logger. Do not call directly.
// Use LogEvent.log(LogEvent, logger) instead.
private void log(Logger logger) {
assert platformLevel == null && level != null;
//new Exception("logging delayed message").printStackTrace();
if (msgSupplier != null) {
if (thrown != null) {
logger.log(level, msgSupplier, thrown);
} else {
logger.log(level, msgSupplier);
}
} else {
// BootstrapLoggers are never localized so we can safely
// use the method that takes a ResourceBundle parameter
// even when that resource bundle is null.
if (thrown != null) {
logger.log(level, bundle, msg, thrown);
} else {
logger.log(level, bundle, msg, params);
}
}
}
// Log this message in the given logger. Do not call directly.
// Use LogEvent.doLog(LogEvent, logger) instead.
private void log(PlatformLogger.Bridge logger) {
assert platformLevel != null && level == null;
if (sourceClass == null) {
if (msgSupplier != null) {
if (thrown != null) {
logger.log(platformLevel, thrown, msgSupplier);
} else {
logger.log(platformLevel, msgSupplier);
}
} else {
// BootstrapLoggers are never localized so we can safely
// use the method that takes a ResourceBundle parameter
// even when that resource bundle is null.
if (thrown != null) {
logger.logrb(platformLevel, bundle, msg, thrown);
} else {
logger.logrb(platformLevel, bundle, msg, params);
}
}
} else {
if (msgSupplier != null) {
if (thrown != null) {
logger.logp(platformLevel, sourceClass, sourceMethod, thrown, msgSupplier);
} else {
logger.logp(platformLevel, sourceClass, sourceMethod, msgSupplier);
}
} else {
// BootstrapLoggers are never localized so we can safely
// use the method that takes a ResourceBundle parameter
// even when that resource bundle is null.
if (thrown != null) {
logger.logrb(platformLevel, sourceClass, sourceMethod, bundle, msg, thrown);
} else {
logger.logrb(platformLevel, sourceClass, sourceMethod, bundle, msg, params);
}
}
}
}
// non default methods from Logger interface
static LogEvent valueOf(BootstrapLogger bootstrap, Level level,
ResourceBundle bundle, String key, Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), bundle, key,
thrown, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, Level level,
ResourceBundle bundle, String format, Object[] params) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), bundle, format,
null, params);
}
static LogEvent valueOf(BootstrapLogger bootstrap, Level level,
Supplier<String> msgSupplier, Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level),
Objects.requireNonNull(msgSupplier), thrown, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, Level level,
Supplier<String> msgSupplier) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level),
Objects.requireNonNull(msgSupplier), null, null);
}
static void log(LogEvent log, Logger logger) {
// not sure we can actually use lambda here. We may need to create
// an anonymous class. Although if we reach here, then it means
// the VM is booted.
BootstrapExecutors.submit(() -> log.log(logger));
}
// non default methods from PlatformLogger.Bridge interface
static LogEvent valueOf(BootstrapLogger bootstrap,
PlatformLogger.Level level, String msg) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), null, null, null,
msg, null, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
String msg, Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), null, null, null, msg, thrown, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
String msg, Object[] params) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), null, null, null, msg, null, params);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
Supplier<String> msgSupplier) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), null, null, msgSupplier, null, null);
}
static LogEvent vaueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
Supplier<String> msgSupplier,
Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), null, null,
msgSupplier, thrown, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
String sourceClass, String sourceMethod,
ResourceBundle bundle, String msg, Object[] params) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), sourceClass,
sourceMethod, bundle, msg, null, params);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
String sourceClass, String sourceMethod,
ResourceBundle bundle, String msg, Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), sourceClass,
sourceMethod, bundle, msg, thrown, null);
}
static LogEvent valueOf(BootstrapLogger bootstrap, PlatformLogger.Level level,
String sourceClass, String sourceMethod,
Supplier<String> msgSupplier, Throwable thrown) {
return new LogEvent(Objects.requireNonNull(bootstrap),
Objects.requireNonNull(level), sourceClass,
sourceMethod, msgSupplier, thrown, null);
}
static void log(LogEvent log, PlatformLogger.Bridge logger) {
BootstrapExecutors.submit(() -> log.log(logger));
}
static void log(LogEvent event) {
event.bootstrap.flush(event);
}
}
// Push a log event at the end of the pending LogEvent queue.
void push(LogEvent log) {
BootstrapExecutors.enqueue(log);
// if the queue has been flushed just before we entered
// the synchronized block we need to flush it again.
checkBootstrapping();
}
// Flushes the queue of pending LogEvents to the logger.
void flush(LogEvent event) {
assert event.bootstrap == this;
if (event.platformLevel != null) {
PlatformLogger.Bridge concrete = holder.getConcretePlatformLogger(this);
LogEvent.log(event, concrete);
} else {
Logger concrete = holder.getConcreteLogger(this);
LogEvent.log(event, concrete);
}
}
/**
* The name of this logger. This is the name of the actual logger for which
* this logger acts as a temporary proxy.
* @return The logger name.
*/
@Override
public String getName() {
return holder.name;
}
/**
* Check whether the VM is still bootstrapping, and if not, arranges
* for this logger's holder to create the real logger and flush the
* pending event queue.
* @return true if the VM is still bootstrapping.
*/
boolean checkBootstrapping() {
if (isBooted() && !isLoadingThread()) {
BootstrapExecutors.flush();
holder.getConcreteLogger(this);
return false;
}
return true;
}
// ----------------------------------
// Methods from Logger
// ----------------------------------
@Override
public boolean isLoggable(Level level) {
if (checkBootstrapping()) {
return level.getSeverity() >= Level.INFO.getSeverity();
} else {
final Logger spi = holder.wrapped();
return spi.isLoggable(level);
}
}
@Override
public void log(Level level, ResourceBundle bundle, String key, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, bundle, key, thrown));
} else {
final Logger spi = holder.wrapped();
spi.log(level, bundle, key, thrown);
}
}
@Override
public void log(Level level, ResourceBundle bundle, String format, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, bundle, format, params));
} else {
final Logger spi = holder.wrapped();
spi.log(level, bundle, format, params);
}
}
@Override
public void log(Level level, String msg, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, null, msg, thrown));
} else {
final Logger spi = holder.wrapped();
spi.log(level, msg, thrown);
}
}
@Override
public void log(Level level, String format, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, null, format, params));
} else {
final Logger spi = holder.wrapped();
spi.log(level, format, params);
}
}
@Override
public void log(Level level, Supplier<String> msgSupplier) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msgSupplier));
} else {
final Logger spi = holder.wrapped();
spi.log(level, msgSupplier);
}
}
@Override
public void log(Level level, Object obj) {
if (checkBootstrapping()) {
Logger.super.log(level, obj);
} else {
final Logger spi = holder.wrapped();
spi.log(level, obj);
}
}
@Override
public void log(Level level, String msg) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, null, msg, (Object[])null));
} else {
final Logger spi = holder.wrapped();
spi.log(level, msg);
}
}
@Override
public void log(Level level, Supplier<String> msgSupplier, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msgSupplier, thrown));
} else {
final Logger spi = holder.wrapped();
spi.log(level, msgSupplier, thrown);
}
}
// ----------------------------------
// Methods from PlatformLogger.Bridge
// ----------------------------------
@Override
public boolean isLoggable(PlatformLogger.Level level) {
if (checkBootstrapping()) {
return level.intValue() >= PlatformLogger.Level.INFO.intValue();
} else {
final PlatformLogger.Bridge spi = holder.platform();
return spi.isLoggable(level);
}
}
@Override
public boolean isEnabled() {
if (checkBootstrapping()) {
return true;
} else {
final PlatformLogger.Bridge spi = holder.platform();
return spi.isEnabled();
}
}
@Override
public void log(PlatformLogger.Level level, String msg) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msg));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.log(level, msg);
}
}
@Override
public void log(PlatformLogger.Level level, String msg, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msg, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.log(level, msg, thrown);
}
}
@Override
public void log(PlatformLogger.Level level, String msg, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msg, params));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.log(level, msg, params);
}
}
@Override
public void log(PlatformLogger.Level level, Supplier<String> msgSupplier) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, msgSupplier));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.log(level, msgSupplier);
}
}
@Override
public void log(PlatformLogger.Level level, Throwable thrown,
Supplier<String> msgSupplier) {
if (checkBootstrapping()) {
push(LogEvent.vaueOf(this, level, msgSupplier, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.log(level, thrown, msgSupplier);
}
}
@Override
public void logp(PlatformLogger.Level level, String sourceClass,
String sourceMethod, String msg) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, null,
msg, (Object[])null));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logp(level, sourceClass, sourceMethod, msg);
}
}
@Override
public void logp(PlatformLogger.Level level, String sourceClass,
String sourceMethod, Supplier<String> msgSupplier) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, msgSupplier, null));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logp(level, sourceClass, sourceMethod, msgSupplier);
}
}
@Override
public void logp(PlatformLogger.Level level, String sourceClass,
String sourceMethod, String msg, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, null, msg, params));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logp(level, sourceClass, sourceMethod, msg, params);
}
}
@Override
public void logp(PlatformLogger.Level level, String sourceClass,
String sourceMethod, String msg, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, null, msg, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logp(level, sourceClass, sourceMethod, msg, thrown);
}
}
@Override
public void logp(PlatformLogger.Level level, String sourceClass,
String sourceMethod, Throwable thrown, Supplier<String> msgSupplier) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, msgSupplier, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logp(level, sourceClass, sourceMethod, thrown, msgSupplier);
}
}
@Override
public void logrb(PlatformLogger.Level level, String sourceClass,
String sourceMethod, ResourceBundle bundle, String msg, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, bundle, msg, params));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logrb(level, sourceClass, sourceMethod, bundle, msg, params);
}
}
@Override
public void logrb(PlatformLogger.Level level, String sourceClass,
String sourceMethod, ResourceBundle bundle, String msg, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, sourceClass, sourceMethod, bundle, msg, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logrb(level, sourceClass, sourceMethod, bundle, msg, thrown);
}
}
@Override
public void logrb(PlatformLogger.Level level, ResourceBundle bundle,
String msg, Object... params) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, null, null, bundle, msg, params));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logrb(level, bundle, msg, params);
}
}
@Override
public void logrb(PlatformLogger.Level level, ResourceBundle bundle, String msg, Throwable thrown) {
if (checkBootstrapping()) {
push(LogEvent.valueOf(this, level, null, null, bundle, msg, thrown));
} else {
final PlatformLogger.Bridge spi = holder.platform();
spi.logrb(level, bundle, msg, thrown);
}
}
@Override
public LoggerConfiguration getLoggerConfiguration() {
if (checkBootstrapping()) {
// This practically means that PlatformLogger.setLevel()
// calls will be ignored if the VM is still bootstrapping. We could
// attempt to fix that but is it worth it?
return PlatformLogger.ConfigurableBridge.super.getLoggerConfiguration();
} else {
final PlatformLogger.Bridge spi = holder.platform();
return PlatformLogger.ConfigurableBridge.getLoggerConfiguration(spi);
}
}
// This BooleanSupplier is a hook for tests - so that we can simulate
// what would happen before the VM is booted.
private static volatile BooleanSupplier isBooted;
public static boolean isBooted() {
if (isBooted != null) return isBooted.getAsBoolean();
else return VM.isBooted();
}
// A bit of magic. We try to find out the nature of the logging
// backend without actually loading it.
private static enum LoggingBackend {
// There is no LoggerFinder and JUL is not present
NONE(true),
// There is no LoggerFinder, but we have found a
// JdkLoggerFinder installed (which means JUL is present),
// and we haven't found any custom configuration for JUL.
// Until LogManager is initialized we can use a simple console
// logger.
JUL_DEFAULT(false),
// Same as above, except that we have found a custom configuration
// for JUL. We cannot use the simple console logger in this case.
JUL_WITH_CONFIG(true),
// We have found a custom LoggerFinder.
CUSTOM(true);
final boolean useLoggerFinder;
private LoggingBackend(boolean useLoggerFinder) {
this.useLoggerFinder = useLoggerFinder;
}
};
// The purpose of this class is to delay the initialization of
// the detectedBackend field until it is actually read.
// We do not want this field to get initialized if VM.isBooted() is false.
@SuppressWarnings("removal")
private static final class DetectBackend {
static final LoggingBackend detectedBackend = detectBackend();
static LoggingBackend detectBackend() {
final Iterator<LoggerFinder> iterator =
ServiceLoader.load(LoggerFinder.class, ClassLoader.getSystemClassLoader())
.iterator();
if (iterator.hasNext()) {
return LoggingBackend.CUSTOM; // Custom Logger Provider is registered
}
// No custom logger provider: we will be using the default
// backend.
final Iterator<DefaultLoggerFinder> iterator2 =
ServiceLoader.loadInstalled(DefaultLoggerFinder.class)
.iterator();
if (iterator2.hasNext()) {
// LoggingProviderImpl is registered. The default
// implementation is java.util.logging
String cname = System.getProperty("java.util.logging.config.class");
String fname = System.getProperty("java.util.logging.config.file");
return (cname != null || fname != null)
? LoggingBackend.JUL_WITH_CONFIG
: LoggingBackend.JUL_DEFAULT;
} else {
// SimpleConsoleLogger is used
return LoggingBackend.NONE;
}
}
}
// We will use a temporary SurrogateLogger if
// the logging backend is JUL, there is no custom config,
// and the LogManager has not been initialized yet.
private static boolean useSurrogateLoggers() {
// being paranoid: this should already have been checked
if (!isBooted()) return true;
return DetectBackend.detectedBackend == LoggingBackend.JUL_DEFAULT
&& !logManagerConfigured;
}
// We will use lazy loggers if:
// - the VM is not yet booted
// - the logging backend is a custom backend
// - the logging backend is JUL, there is no custom config,
// and the LogManager has not been initialized yet.
public static boolean useLazyLoggers() {
// Note: avoid triggering the initialization of the DetectBackend class
// while holding the BootstrapLogger class monitor
if (!BootstrapLogger.isBooted() ||
DetectBackend.detectedBackend == LoggingBackend.CUSTOM) {
return true;
}
synchronized (BootstrapLogger.class) {
return useSurrogateLoggers();
}
}
// Called by LazyLoggerAccessor. This method will determine whether
// to create a BootstrapLogger (if the VM is not yet booted),
// a SurrogateLogger (if JUL is the default backend and there
// is no custom JUL configuration and LogManager is not yet initialized),
// or a logger returned by the loaded LoggerFinder (all other cases).
static Logger getLogger(LazyLoggerAccessor accessor, BooleanSupplier isLoading) {
if (!BootstrapLogger.isBooted() || isLoading != null && isLoading.getAsBoolean()) {
return new BootstrapLogger(accessor, isLoading);
} else {
if (useSurrogateLoggers()) {
// JUL is the default backend, there is no custom configuration,
// LogManager has not been used.
synchronized(BootstrapLogger.class) {
if (useSurrogateLoggers()) {
return createSurrogateLogger(accessor);
}
}
}
// Already booted. Return the real logger.
return accessor.createLogger();
}
}
// trigger class initialization outside of holding lock
static void ensureBackendDetected() {
assert VM.isBooted() : "VM is not booted";
// triggers detection of the backend
var backend = DetectBackend.detectedBackend;
}
// If the backend is JUL, and there is no custom configuration, and
// nobody has attempted to call LogManager.getLogManager() yet, then
// we can temporarily substitute JUL Logger with SurrogateLoggers,
// which avoids the cost of actually loading up the LogManager...
// The RedirectedLoggers class has the logic to create such surrogate
// loggers, and to possibly replace them with real JUL loggers if
// someone calls LogManager.getLogManager().
static final class RedirectedLoggers implements
Function<LazyLoggerAccessor, SurrogateLogger> {
// all accesses must be synchronized on the outer BootstrapLogger.class
final Map<LazyLoggerAccessor, SurrogateLogger> redirectedLoggers =
new [MASK] <>();
// all accesses must be synchronized on the outer BootstrapLogger.class
// The redirectLoggers map will be cleared when LogManager is initialized.
boolean cleared;
@Override
// all accesses must be synchronized on the outer BootstrapLogger.class
public SurrogateLogger apply(LazyLoggerAccessor t) {
if (cleared) throw new IllegalStateException("LoggerFinder already initialized");
return SurrogateLogger.makeSurrogateLogger(t.getLoggerName());
}
// all accesses must be synchronized on the outer BootstrapLogger.class
SurrogateLogger get(LazyLoggerAccessor a) {
if (cleared) throw new IllegalStateException("LoggerFinder already initialized");
return redirectedLoggers.computeIfAbsent(a, this);
}
// all accesses must be synchronized on the outer BootstrapLogger.class
Map<LazyLoggerAccessor, SurrogateLogger> drainLoggersMap() {
if (redirectedLoggers.isEmpty()) return null;
if (cleared) throw new IllegalStateException("LoggerFinder already initialized");
final Map<LazyLoggerAccessor, SurrogateLogger> accessors = new [MASK] <>(redirectedLoggers);
redirectedLoggers.clear();
cleared = true;
return accessors;
}
static void replaceSurrogateLoggers(Map<LazyLoggerAccessor, SurrogateLogger> accessors) {
// When the backend is JUL we want to force the creation of
// JUL loggers here: some tests are expecting that the
// PlatformLogger will create JUL loggers as soon as the
// LogManager is initialized.
//
// If the backend is not JUL then we can delay the re-creation
// of the wrapped logger until they are next accessed.
//
final LoggingBackend detectedBackend = DetectBackend.detectedBackend;
final boolean lazy = detectedBackend != LoggingBackend.JUL_DEFAULT
&& detectedBackend != LoggingBackend.JUL_WITH_CONFIG;
for (Map.Entry<LazyLoggerAccessor, SurrogateLogger> a : accessors.entrySet()) {
a.getKey().release(a.getValue(), !lazy);
}
}
// all accesses must be synchronized on the outer BootstrapLogger.class
static final RedirectedLoggers INSTANCE = new RedirectedLoggers();
}
static synchronized Logger createSurrogateLogger(LazyLoggerAccessor a) {
// accesses to RedirectedLoggers is synchronized on BootstrapLogger.class
return RedirectedLoggers.INSTANCE.get(a);
}
private static volatile boolean logManagerConfigured;
private static synchronized Map<LazyLoggerAccessor, SurrogateLogger>
releaseSurrogateLoggers() {
// first check whether there's a chance that we have used
// surrogate loggers; Will be false if logManagerConfigured is already
// true.
final boolean releaseSurrogateLoggers = useSurrogateLoggers();
// then sets the flag that tells that the log manager is configured
logManagerConfigured = true;
// finally retrieves all surrogate loggers that should be replaced
// by real JUL loggers, and return them in the form of a redirected
// loggers map.
if (releaseSurrogateLoggers) {
// accesses to RedirectedLoggers is synchronized on BootstrapLogger.class
return RedirectedLoggers.INSTANCE.drainLoggersMap();
} else {
return null;
}
}
public static void redirectTemporaryLoggers() {
// This call is synchronized on BootstrapLogger.class.
final Map<LazyLoggerAccessor, SurrogateLogger> accessors =
releaseSurrogateLoggers();
// We will now reset the logger accessors, triggering the
// (possibly lazy) replacement of any temporary surrogate logger by the
// real logger returned from the loaded LoggerFinder.
if (accessors != null) {
RedirectedLoggers.replaceSurrogateLoggers(accessors);
}
BootstrapExecutors.flush();
}
// Hook for tests which need to wait until pending messages
// are processed.
static void awaitPendingTasks() {
BootstrapExecutors.awaitPendingTasks();
}
static boolean isAlive() {
return BootstrapExecutors.isAlive();
}
}
| HashMap | java |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.compute.operator;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.compute.Describable;
import org.elasticsearch.compute.aggregation.AggregatorMode;
import org.elasticsearch.compute.aggregation. [MASK] ;
import org.elasticsearch.compute.aggregation. [MASK] Function;
import org.elasticsearch.compute.aggregation.blockhash.BlockHash;
import org.elasticsearch.compute.data.Block;
import org.elasticsearch.compute.data.IntBlock;
import org.elasticsearch.compute.data.IntVector;
import org.elasticsearch.compute.data.Page;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.function.Supplier;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.joining;
public class HashAggregationOperator implements Operator {
public record HashAggregationOperatorFactory(
List<BlockHash.GroupSpec> groups,
AggregatorMode aggregatorMode,
List< [MASK] .Factory> aggregators,
int maxPageSize,
AnalysisRegistry analysisRegistry
) implements OperatorFactory {
@Override
public Operator get(DriverContext driverContext) {
if (groups.stream().anyMatch(BlockHash.GroupSpec::isCategorize)) {
return new HashAggregationOperator(
aggregators,
() -> BlockHash.buildCategorizeBlockHash(
groups,
aggregatorMode,
driverContext.blockFactory(),
analysisRegistry,
maxPageSize
),
driverContext
);
}
return new HashAggregationOperator(
aggregators,
() -> BlockHash.build(groups, driverContext.blockFactory(), maxPageSize, false),
driverContext
);
}
@Override
public String describe() {
return "HashAggregationOperator[mode = "
+ "<not-needed>"
+ ", aggs = "
+ aggregators.stream().map(Describable::describe).collect(joining(", "))
+ "]";
}
}
private boolean finished;
private Page output;
private final BlockHash blockHash;
private final List< [MASK] > aggregators;
private final DriverContext driverContext;
/**
* Nanoseconds this operator has spent hashing grouping keys.
*/
private long hashNanos;
/**
* Nanoseconds this operator has spent running the aggregations.
*/
private long aggregationNanos;
/**
* Count of pages this operator has processed.
*/
private int pagesProcessed;
/**
* Count of rows this operator has received.
*/
private long rowsReceived;
/**
* Count of rows this operator has emitted.
*/
private long rowsEmitted;
@SuppressWarnings("this-escape")
public HashAggregationOperator(
List< [MASK] .Factory> aggregators,
Supplier<BlockHash> blockHash,
DriverContext driverContext
) {
this.aggregators = new ArrayList<>(aggregators.size());
this.driverContext = driverContext;
boolean success = false;
try {
this.blockHash = blockHash.get();
for ( [MASK] .Factory a : aggregators) {
this.aggregators.add(a.apply(driverContext));
}
success = true;
} finally {
if (success == false) {
close();
}
}
}
@Override
public boolean needsInput() {
return finished == false;
}
@Override
public void addInput(Page page) {
try {
[MASK] Function.AddInput[] prepared = new [MASK] Function.AddInput[aggregators.size()];
class AddInput implements [MASK] Function.AddInput {
long hashStart = System.nanoTime();
long aggStart;
@Override
public void add(int positionOffset, IntBlock groupIds) {
IntVector groupIdsVector = groupIds.asVector();
if (groupIdsVector != null) {
add(positionOffset, groupIdsVector);
} else {
startAggEndHash();
for ( [MASK] Function.AddInput p : prepared) {
p.add(positionOffset, groupIds);
}
end();
}
}
@Override
public void add(int positionOffset, IntVector groupIds) {
startAggEndHash();
for ( [MASK] Function.AddInput p : prepared) {
p.add(positionOffset, groupIds);
}
end();
}
private void startAggEndHash() {
aggStart = System.nanoTime();
hashNanos += aggStart - hashStart;
}
private void end() {
hashStart = System.nanoTime();
aggregationNanos += hashStart - aggStart;
}
@Override
public void close() {
Releasables.closeExpectNoException(prepared);
}
}
try (AddInput add = new AddInput()) {
checkState(needsInput(), "Operator is already finishing");
requireNonNull(page, "page is null");
for (int i = 0; i < prepared.length; i++) {
prepared[i] = aggregators.get(i).prepareProcessPage(blockHash, page);
}
blockHash.add(wrapPage(page), add);
hashNanos += System.nanoTime() - add.hashStart;
}
} finally {
page.releaseBlocks();
pagesProcessed++;
rowsReceived += page.getPositionCount();
}
}
@Override
public Page getOutput() {
Page p = output;
if (p != null) {
rowsEmitted += p.getPositionCount();
}
output = null;
return p;
}
@Override
public void finish() {
if (finished) {
return;
}
finished = true;
Block[] blocks = null;
IntVector selected = null;
boolean success = false;
try {
selected = blockHash.nonEmpty();
Block[] keys = blockHash.getKeys();
int[] aggBlockCounts = aggregators.stream().mapToInt( [MASK] ::evaluateBlockCount).toArray();
blocks = new Block[keys.length + Arrays.stream(aggBlockCounts).sum()];
System.arraycopy(keys, 0, blocks, 0, keys.length);
int offset = keys.length;
for (int i = 0; i < aggregators.size(); i++) {
var aggregator = aggregators.get(i);
aggregator.evaluate(blocks, offset, selected, driverContext);
offset += aggBlockCounts[i];
}
output = new Page(blocks);
success = true;
} finally {
// selected should always be closed
if (selected != null) {
selected.close();
}
if (success == false && blocks != null) {
Releasables.closeExpectNoException(blocks);
}
}
}
@Override
public boolean isFinished() {
return finished && output == null;
}
@Override
public void close() {
if (output != null) {
output.releaseBlocks();
}
Releasables.close(blockHash, () -> Releasables.close(aggregators));
}
@Override
public Operator.Status status() {
return new Status(hashNanos, aggregationNanos, pagesProcessed, rowsReceived, rowsEmitted);
}
protected static void checkState(boolean condition, String msg) {
if (condition == false) {
throw new IllegalArgumentException(msg);
}
}
protected Page wrapPage(Page page) {
return page;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(this.getClass().getSimpleName()).append("[");
sb.append("blockHash=").append(blockHash).append(", ");
sb.append("aggregators=").append(aggregators);
sb.append("]");
return sb.toString();
}
public static class Status implements Operator.Status {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Operator.Status.class,
"hashagg",
Status::new
);
/**
* Nanoseconds this operator has spent hashing grouping keys.
*/
private final long hashNanos;
/**
* Nanoseconds this operator has spent running the aggregations.
*/
private final long aggregationNanos;
/**
* Count of pages this operator has processed.
*/
private final int pagesProcessed;
/**
* Count of rows this operator has received.
*/
private final long rowsReceived;
/**
* Count of rows this operator has emitted.
*/
private final long rowsEmitted;
/**
* Build.
* @param hashNanos Nanoseconds this operator has spent hashing grouping keys.
* @param aggregationNanos Nanoseconds this operator has spent running the aggregations.
* @param pagesProcessed Count of pages this operator has processed.
* @param rowsReceived Count of rows this operator has received.
* @param rowsEmitted Count of rows this operator has emitted.
*/
public Status(long hashNanos, long aggregationNanos, int pagesProcessed, long rowsReceived, long rowsEmitted) {
this.hashNanos = hashNanos;
this.aggregationNanos = aggregationNanos;
this.pagesProcessed = pagesProcessed;
this.rowsReceived = rowsReceived;
this.rowsEmitted = rowsEmitted;
}
protected Status(StreamInput in) throws IOException {
hashNanos = in.readVLong();
aggregationNanos = in.readVLong();
pagesProcessed = in.readVInt();
if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) {
rowsReceived = in.readVLong();
rowsEmitted = in.readVLong();
} else {
rowsReceived = 0;
rowsEmitted = 0;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(hashNanos);
out.writeVLong(aggregationNanos);
out.writeVInt(pagesProcessed);
if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_ROWS_PROCESSED)) {
out.writeVLong(rowsReceived);
out.writeVLong(rowsEmitted);
}
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
/**
* Nanoseconds this operator has spent hashing grouping keys.
*/
public long hashNanos() {
return hashNanos;
}
/**
* Nanoseconds this operator has spent running the aggregations.
*/
public long aggregationNanos() {
return aggregationNanos;
}
/**
* Count of pages this operator has processed.
*/
public int pagesProcessed() {
return pagesProcessed;
}
/**
* Count of rows this operator has received.
*/
public long rowsReceived() {
return rowsReceived;
}
/**
* Count of rows this operator has emitted.
*/
public long rowsEmitted() {
return rowsEmitted;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("hash_nanos", hashNanos);
if (builder.humanReadable()) {
builder.field("hash_time", TimeValue.timeValueNanos(hashNanos));
}
builder.field("aggregation_nanos", aggregationNanos);
if (builder.humanReadable()) {
builder.field("aggregation_time", TimeValue.timeValueNanos(aggregationNanos));
}
builder.field("pages_processed", pagesProcessed);
builder.field("rows_received", rowsReceived);
builder.field("rows_emitted", rowsEmitted);
return builder.endObject();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Status status = (Status) o;
return hashNanos == status.hashNanos
&& aggregationNanos == status.aggregationNanos
&& pagesProcessed == status.pagesProcessed
&& rowsReceived == status.rowsReceived
&& rowsEmitted == status.rowsEmitted;
}
@Override
public int hashCode() {
return Objects.hash(hashNanos, aggregationNanos, pagesProcessed, rowsReceived, rowsEmitted);
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_14_0;
}
}
}
| GroupingAggregator | java |
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: DoRoguelikeDungeonCardGachaReq.proto
package emu.grasscutter.net.proto;
public final class DoRoguelikeDungeonCardGachaReqOuterClass {
private DoRoguelikeDungeonCardGachaReqOuterClass() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface DoRoguelikeDungeonCardGachaReqOrBuilder extends
// @@protoc_insertion_point(interface_extends:DoRoguelikeDungeonCardGachaReq)
com.google.protobuf.MessageOrBuilder {
/**
* <code>uint32 dungeon_id = 14;</code>
* @return The dungeonId.
*/
int getDungeonId();
/**
* <code>uint32 cell_id = 3;</code>
* @return The cellId.
*/
int getCellId();
}
/**
* <pre>
* CmdId: 21709
* Obf: BCNAOIAJONN
* </pre>
*
* Protobuf type {@code DoRoguelikeDungeonCardGachaReq}
*/
public static final class DoRoguelikeDungeonCardGachaReq extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:DoRoguelikeDungeonCardGachaReq)
DoRoguelikeDungeonCardGachaReqOrBuilder {
private static final long serialVersionUID = 0L;
// Use DoRoguelikeDungeonCardGachaReq.newBuilder() to construct.
private DoRoguelikeDungeonCardGachaReq(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DoRoguelikeDungeonCardGachaReq() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new DoRoguelikeDungeonCardGachaReq();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DoRoguelikeDungeonCardGachaReq(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 24: {
cellId_ = input.readUInt32();
break;
}
case 112: {
dungeonId_ = input.readUInt32();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.internal_static_DoRoguelikeDungeonCardGachaReq_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.internal_static_DoRoguelikeDungeonCardGachaReq_fieldAccessorTable
.ensureFieldAccessorsInitialized(
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.class, emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.Builder.class);
}
public static final int DUNGEON_ID_FIELD_NUMBER = 14;
private int dungeonId_;
/**
* <code>uint32 dungeon_id = 14;</code>
* @return The dungeonId.
*/
@java.lang.Override
public int getDungeonId() {
return dungeonId_;
}
public static final int CELL_ID_FIELD_NUMBER = 3;
private int cellId_;
/**
* <code>uint32 cell_id = 3;</code>
* @return The cellId.
*/
@java.lang.Override
public int getCellId() {
return cellId_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (cellId_ != 0) {
output.writeUInt32(3, cellId_);
}
if (dungeonId_ != 0) {
output.writeUInt32(14, dungeonId_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (cellId_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, cellId_);
}
if (dungeonId_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(14, dungeonId_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq)) {
return super.equals(obj);
}
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq other = (emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq) obj;
if (getDungeonId()
!= other.getDungeonId()) return false;
if (getCellId()
!= other.getCellId()) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + DUNGEON_ID_FIELD_NUMBER;
hash = (53 * hash) + getDungeonId();
hash = (37 * hash) + CELL_ID_FIELD_NUMBER;
hash = (53 * hash) + getCellId();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* <pre>
* CmdId: 21709
* Obf: BCNAOIAJONN
* </pre>
*
* Protobuf type {@code DoRoguelikeDungeonCardGachaReq}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:DoRoguelikeDungeonCardGachaReq)
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReqOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.internal_static_DoRoguelikeDungeonCardGachaReq_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.internal_static_DoRoguelikeDungeonCardGachaReq_fieldAccessorTable
.ensureFieldAccessorsInitialized(
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.class, emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.Builder.class);
}
// Construct using emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
dungeonId_ = 0;
cellId_ = 0;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.internal_static_DoRoguelikeDungeonCardGachaReq_descriptor;
}
@java.lang.Override
public emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq getDefaultInstanceForType() {
return emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.getDefaultInstance();
}
@java.lang.Override
public emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq build() {
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq buildPartial() {
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq result = new emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq(this);
result.dungeonId_ = dungeonId_;
result.cellId_ = cellId_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq) {
return mergeFrom((emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq other) {
if (other == emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq.getDefaultInstance()) return this;
if (other.getDungeonId() != 0) {
setDungeonId(other.getDungeonId());
}
if (other.getCellId() != 0) {
setCellId(other.getCellId());
}
this. [MASK] (other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int dungeonId_ ;
/**
* <code>uint32 dungeon_id = 14;</code>
* @return The dungeonId.
*/
@java.lang.Override
public int getDungeonId() {
return dungeonId_;
}
/**
* <code>uint32 dungeon_id = 14;</code>
* @param value The dungeonId to set.
* @return This builder for chaining.
*/
public Builder setDungeonId(int value) {
dungeonId_ = value;
onChanged();
return this;
}
/**
* <code>uint32 dungeon_id = 14;</code>
* @return This builder for chaining.
*/
public Builder clearDungeonId() {
dungeonId_ = 0;
onChanged();
return this;
}
private int cellId_ ;
/**
* <code>uint32 cell_id = 3;</code>
* @return The cellId.
*/
@java.lang.Override
public int getCellId() {
return cellId_;
}
/**
* <code>uint32 cell_id = 3;</code>
* @param value The cellId to set.
* @return This builder for chaining.
*/
public Builder setCellId(int value) {
cellId_ = value;
onChanged();
return this;
}
/**
* <code>uint32 cell_id = 3;</code>
* @return This builder for chaining.
*/
public Builder clearCellId() {
cellId_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder [MASK] (
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super. [MASK] (unknownFields);
}
// @@protoc_insertion_point(builder_scope:DoRoguelikeDungeonCardGachaReq)
}
// @@protoc_insertion_point(class_scope:DoRoguelikeDungeonCardGachaReq)
private static final emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq();
}
public static emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<DoRoguelikeDungeonCardGachaReq>
PARSER = new com.google.protobuf.AbstractParser<DoRoguelikeDungeonCardGachaReq>() {
@java.lang.Override
public DoRoguelikeDungeonCardGachaReq parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DoRoguelikeDungeonCardGachaReq(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DoRoguelikeDungeonCardGachaReq> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DoRoguelikeDungeonCardGachaReq> getParserForType() {
return PARSER;
}
@java.lang.Override
public emu.grasscutter.net.proto.DoRoguelikeDungeonCardGachaReqOuterClass.DoRoguelikeDungeonCardGachaReq getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_DoRoguelikeDungeonCardGachaReq_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_DoRoguelikeDungeonCardGachaReq_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n$DoRoguelikeDungeonCardGachaReq.proto\"E" +
"\n\036DoRoguelikeDungeonCardGachaReq\022\022\n\ndung" +
"eon_id\030\016 \001(\r\022\017\n\007cell_id\030\003 \001(\rB\033\n\031emu.gra" +
"sscutter.net.protob\006proto3"
};
descriptor = com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_DoRoguelikeDungeonCardGachaReq_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_DoRoguelikeDungeonCardGachaReq_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_DoRoguelikeDungeonCardGachaReq_descriptor,
new java.lang.String[] { "DungeonId", "CellId", });
}
// @@protoc_insertion_point(outer_class_scope)
}
| mergeUnknownFields | java |
package ai.chat2db.server.test.domain.data.service;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.alibaba.fastjson2.JSON;
import ai.chat2db.server.domain.api.param.ConsoleConnectParam;
import ai.chat2db.server.domain.api.param.DlExecuteParam;
import ai.chat2db.server.domain.api.param.DropParam;
import ai.chat2db.server.domain.api.param.ShowCreateTableParam;
import ai.chat2db.server.domain.api.param.TablePageQueryParam;
import ai.chat2db.server.domain.api.param.TableQueryParam;
import ai.chat2db.server.domain.api.param.TableSelector;
import ai.chat2db.server.domain.api.param.datasource.DataSourcePreConnectParam;
import ai.chat2db.server.domain.api.service.ConsoleService;
import ai.chat2db.server.domain.api.service.DataSourceService;
import ai.chat2db.server.domain.api.service.DlTemplateService;
import ai.chat2db.server.domain.api.service.TableService;
import ai.chat2db.server.test.common.BaseTest;
import ai.chat2db.server.test.domain.data.service.dialect.DialectProperties;
import ai.chat2db.server.test.domain.data.utils.TestUtils;
import ai.chat2db.server.tools.base.wrapper.result.DataResult;
import ai.chat2db.server.tools.common.util.EasyCollectionUtils;
import ai.chat2db.spi.enums.CollationEnum;
import ai.chat2db.spi.enums.IndexTypeEnum;
import ai.chat2db.spi.model.Sql;
import ai.chat2db.spi.model.Table;
import ai.chat2db.spi.model.TableColumn;
import ai.chat2db.spi.model.TableIndex;
import ai.chat2db.spi.model.TableIndexColumn;
import com.google.common.collect.Lists;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Data source testing
*
* @author Jiaju Zhuang
*/
@Slf4j
public class TableOperationsTest extends BaseTest {
/**
* Table Name
*/
public static final String TABLE_NAME = "data_ops_table_test_" + System.currentTimeMillis();
@Resource
private DataSourceService dataSourceService;
@Resource
private ConsoleService consoleService;
@Autowired
private List<DialectProperties> dialectPropertiesList;
@Resource
private DlTemplateService dlTemplateService;
@Resource
private TableService tableService;
//@Resource
//private SqlOperations sqlOperations;
@Test
@Order(1)
public void table() {
for (DialectProperties dialectProperties : dialectPropertiesList) {
String dbTypeEnum = dialectProperties.getDbType();
Long dataSourceId = TestUtils.nextLong();
Long consoleId = TestUtils.nextLong();
// Prepare context
putConnect(dialectProperties.getUrl(), dialectProperties.getUsername(), dialectProperties.getPassword(),
dialectProperties.getDbType(), dialectProperties.getDatabaseName(), dataSourceId, consoleId);
DataSourcePreConnectParam dataSourceCreateParam = new DataSourcePreConnectParam();
dataSourceCreateParam.setType(dbTypeEnum);
dataSourceCreateParam.setUrl(dialectProperties.getUrl());
dataSourceCreateParam.setUser(dialectProperties.getUsername());
dataSourceCreateParam.setPassword(dialectProperties.getPassword());
dataSourceService.preConnect(dataSourceCreateParam);
// Create a console
ConsoleConnectParam consoleCreateParam = new ConsoleConnectParam();
consoleCreateParam.setDataSourceId(dataSourceId);
consoleCreateParam.setConsoleId(consoleId);
consoleCreateParam.setDatabaseName(dialectProperties.getDatabaseName());
consoleService.createConsole(consoleCreateParam);
// Create table structure
DlExecuteParam templateQueryParam = new DlExecuteParam();
templateQueryParam.setConsoleId(consoleId);
templateQueryParam.setDataSourceId(dataSourceId);
templateQueryParam.setSql(dialectProperties.getCrateTableSql(TABLE_NAME));
dlTemplateService.execute(templateQueryParam);
// Query table creation statement
ShowCreateTableParam showCreateTableParam = ShowCreateTableParam.builder()
.dataSourceId(dataSourceId)
.databaseName(dialectProperties.getDatabaseName())
.tableName(dialectProperties.toCase(TABLE_NAME))
.build();
if (dialectProperties.getDbType() == "POSTGRESQL") {
showCreateTableParam.setSchemaName("public");
}
DataResult<String> createTable = tableService.showCreateTable(showCreateTableParam);
log.info("Table creation statement: {}", createTable.getData());
if (dialectProperties.getDbType() != "H2") {
Assertions.assertTrue(createTable.getData().contains(dialectProperties.toCase(TABLE_NAME)),
"Query table structure failed");
}
// Query table structure
TablePageQueryParam [MASK] = new TablePageQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(TABLE_NAME));
if (dialectProperties.getDbType() == "POSTGRESQL") {
[MASK] .setSchemaName("public");
}
List<Table> tableList = tableService.pageQuery( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
log.info("Analyzing data returns {}", JSON.toJSONString(tableList));
Assertions.assertNotEquals(0L, tableList.size(), "Query table structure failed");
Table table = tableList.get(0);
// Assertions.assertEquals(dialectProperties.toCase(TABLE_NAME), table.getName(), "Query table structure failed");
if (dialectProperties.getDbType() != "POSTGRESQL") {
Assertions.assertEquals("Test table", table.getComment(), "Query table structure failed");
}
TableQueryParam tableQueryParam = new TableQueryParam();
tableQueryParam.setTableName(table.getName());
tableQueryParam.setDataSourceId(dataSourceId);
tableQueryParam.setDatabaseName(dialectProperties.getDatabaseName());
if (dialectProperties.getDbType() == "POSTGRESQL") {
tableQueryParam.setSchemaName("public");
}
List<TableColumn> columnList = tableService.queryColumns(tableQueryParam);
Assertions.assertEquals(4L, columnList.size(), "Query table structure failed");
TableColumn id = columnList.get(0);
Assertions.assertEquals(dialectProperties.toCase("id"), id.getName(), "Query table structure failed");
Assertions.assertEquals("Primary key auto-increment", id.getComment(), "Query table structure failed");
Assertions.assertTrue(id.getAutoIncrement(), "Query table structure failed");
//Assertions.assertFalse(id.getNullable(), "Query table structure failed");
TableColumn string = columnList.get(3);
Assertions.assertEquals(dialectProperties.toCase("string"), string.getName(), "Query table structure failed");
//Assertions.assertTrue(string.getNullable(), "Query table structure failed");
Assertions.assertEquals("DATA", TestUtils.unWrapperDefaultValue(string.getDefaultValue()),
"Query table structure failed");
if (dialectProperties.getDbType() == "POSTGRESQL") {
[MASK] .setSchemaName("public");
}
List<TableIndex> tableIndexList = tableService.queryIndexes(tableQueryParam);
log.info("Analyzing data returns {}", JSON.toJSONString(tableIndexList));
Assertions.assertEquals(4L, tableIndexList.size(), "Query table structure failed");
Map<String, TableIndex> tableIndexMap = EasyCollectionUtils.toIdentityMap(tableIndexList,
TableIndex::getName);
TableIndex idxDate = tableIndexMap.get(dialectProperties.toCase(TABLE_NAME + "_idx_date"));
Assertions.assertEquals("date index", idxDate.getComment(), "Query table structure failed");
Assertions.assertEquals(IndexTypeEnum.NORMAL.getCode(), idxDate.getType(), "Query table structure failed");
Assertions.assertEquals(1L, idxDate.getColumnList().size(), "Query table structure failed");
Assertions.assertEquals(dialectProperties.toCase("date"), idxDate.getColumnList().get(0).getColumnName(),
"Query table structure failed");
Assertions.assertEquals(CollationEnum.DESC.getCode(), idxDate.getColumnList().get(0).getCollation(),
"Query table structure failed");
TableIndex ukNumber = tableIndexMap.get(dialectProperties.toCase(TABLE_NAME + "_uk_number"));
Assertions.assertEquals("unique index", ukNumber.getComment(), "Query table structure failed");
Assertions.assertEquals(IndexTypeEnum.UNIQUE.getCode(), ukNumber.getType(), "Query table structure failed");
TableIndex idxNumberString = tableIndexMap.get(dialectProperties.toCase(TABLE_NAME + "_idx_number_string"));
Assertions.assertEquals(2, idxNumberString.getColumnList().size(), "Query table structure failed");
// Delete table structure
DropParam dropParam = DropParam.builder()
.dataSourceId(dataSourceId)
.databaseName(dialectProperties.getDatabaseName())
.tableName(dialectProperties.toCase(TABLE_NAME))
.build();
tableService.drop(dropParam);
// Query table structure
[MASK] = new TablePageQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(TABLE_NAME));
tableList = tableService.pageQuery( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
log.info("After deleting the table, the data returns {}", JSON.toJSONString(tableList));
Assertions.assertEquals(0L, tableList.size(), "Query table structure failed");
// Test the table creation statement
testBuildSql(dialectProperties, dataSourceId, consoleId);
removeConnect();
}
}
private void testBuildSql(DialectProperties dialectProperties, Long dataSourceId, Long consoleId) {
if (dialectProperties.getDbType() != "MYSQL") {
log.error("Currently the test case only supports mysql");
return;
}
// Create new table
// CREATE TABLE `DATA_OPS_TEMPLATE_TEST_1673093980449`
// (
// `id` bigint PRIMARY KEY AUTO_INCREMENT NOT NULL COMMENT 'Primary key auto-increment',
// `date` datetime(3) not null COMMENT 'date',
// `number` bigint COMMENT 'long integer',
// `string` VARCHAR(100) default 'DATA' COMMENT 'name',
// index DATA_OPS_TEMPLATE_TEST_1673093980449_idx_date (date desc) comment 'date index',
// unique DATA_OPS_TEMPLATE_TEST_1673093980449_uk_number (number) comment 'unique index',
// index DATA_OPS_TEMPLATE_TEST_1673093980449_idx_number_string (number, date) comment 'Union index'
//) COMMENT ='Test table';
// * The case depends on the specific database:
//* Create table structure: Test table
// * Fields:
//* id Primary key auto-increment
//* date date is not null
// * number long integer
// * string String length 100 default value "DATA"
// *
//* Index (plus $tableName_ because some database indexes are globally unique):
//* $tableName_idx_date date index reverse order
// * $tableName_uk_number unique index
// * $tableName_idx_number_string Union index
String tableName = dialectProperties.toCase("data_ops_table_test_" + System.currentTimeMillis());
Table newTable = new Table();
newTable.setName(tableName);
newTable.setComment("Test table");
List<TableColumn> tableColumnList = new ArrayList<>();
newTable.setColumnList(tableColumnList);
//id
TableColumn idTableColumn = new TableColumn();
idTableColumn.setName("id");
idTableColumn.setAutoIncrement(Boolean.TRUE);
idTableColumn.setPrimaryKey(Boolean.TRUE);
//idTableColumn.setNullable(Boolean.FALSE);
idTableColumn.setComment("Primary key auto-increment");
idTableColumn.setColumnType("bigint");
tableColumnList.add(idTableColumn);
// date
TableColumn dateTableColumn = new TableColumn();
dateTableColumn.setName("date");
//dateTableColumn.setNullable(Boolean.FALSE);
dateTableColumn.setComment("date");
dateTableColumn.setColumnType("datetime(3)");
tableColumnList.add(dateTableColumn);
// number
TableColumn numberTableColumn = new TableColumn();
numberTableColumn.setName("number");
numberTableColumn.setComment("long integer");
numberTableColumn.setColumnType("bigint");
tableColumnList.add(numberTableColumn);
// string
TableColumn stringTableColumn = new TableColumn();
stringTableColumn.setName("string");
stringTableColumn.setComment("name");
stringTableColumn.setColumnType("varchar(100)");
stringTableColumn.setDefaultValue("DATA");
tableColumnList.add(stringTableColumn);
// index
List<TableIndex> tableIndexList = new ArrayList<>();
newTable.setIndexList(tableIndexList);
// index DATA_OPS_TEMPLATE_TEST_1673093980449_idx_date (date desc) comment 'date index',
tableIndexList.add(TableIndex.builder()
.name(tableName + "_idx_date")
.type(IndexTypeEnum.NORMAL.getCode())
.comment("date index")
.columnList(Lists.newArrayList(TableIndexColumn.builder()
.columnName("date")
.collation(CollationEnum.DESC.getCode())
.build()))
.build());
// unique DATA_OPS_TEMPLATE_TEST_1673093980449_uk_number (number) comment 'unique index',
tableIndexList.add(TableIndex.builder()
.name(tableName + "_uk_number")
.type(IndexTypeEnum.UNIQUE.getCode())
.comment("unique index")
.columnList(Lists.newArrayList(TableIndexColumn.builder()
.columnName("number")
.build()))
.build());
// index DATA_OPS_TEMPLATE_TEST_1673093980449_idx_number_string (number, date) comment 'Union index'
tableIndexList.add(TableIndex.builder()
.name(tableName + "_idx_number_string")
.type(IndexTypeEnum.NORMAL.getCode())
.comment("Union index")
.columnList(Lists.newArrayList(TableIndexColumn.builder()
.columnName("number")
.build(),
TableIndexColumn.builder()
.columnName("date")
.build()))
.build());
// build sql
List<Sql> buildTableSqlList = tableService.buildSql(null, newTable).getData();
log.info("The structural statement to create a table is:{}", JSON.toJSONString(buildTableSqlList));
for (Sql sql : buildTableSqlList) {
DlExecuteParam templateQueryParam = new DlExecuteParam();
templateQueryParam.setConsoleId(consoleId);
templateQueryParam.setDataSourceId(dataSourceId);
templateQueryParam.setSql(sql.getSql());
dlTemplateService.execute(templateQueryParam);
}
// Check table structure
checkTable(tableName, dialectProperties, dataSourceId);
// Go to the database to query the table structure
TableQueryParam [MASK] = new TableQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(tableName));
Table table = tableService.query( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
log.info("Analyzing data returns {}", JSON.toJSONString(table));
Assertions.assertNotNull(table, "Query table structure failed");
Table oldTable = table;
Assertions.assertEquals(dialectProperties.toCase(tableName), oldTable.getName(), "Query table structure failed");
Assertions.assertEquals("Test table", oldTable.getComment(), "Query table structure failed");
// Modify table structure
// build sql
log.info("oldTable:{}", JSON.toJSONString(oldTable));
log.info("newTable:{}", JSON.toJSONString(newTable));
buildTableSqlList = tableService.buildSql(oldTable, newTable).getData();
log.info("Modify the table structure: {}", JSON.toJSONString(buildTableSqlList));
Assertions.assertTrue(!buildTableSqlList.isEmpty(), "构建sql失败");
// Let’s query again. There will be 2 objects.
[MASK] = new TableQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(tableName));
newTable = tableService.query( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
// Modify fields
// Add a new field
newTable.getColumnList().add(TableColumn.builder()
.name("add_string")
.columnType("varchar(20)")
.comment("New string")
.build());
// Add a new index
newTable.getIndexList().add(TableIndex.builder()
.name(tableName + "_idx_string_new")
.type(IndexTypeEnum.NORMAL.getCode())
.comment("new string index")
.columnList(Lists.newArrayList(TableIndexColumn.builder()
.columnName("add_string")
.collation(CollationEnum.DESC.getCode())
.build()))
.build());
// Query table structure changes
log.info("oldTable:{}", JSON.toJSONString(oldTable));
log.info("newTable:{}", JSON.toJSONString(newTable));
buildTableSqlList = tableService.buildSql(oldTable, newTable).getData();
log.info("Modify the table structure: {}", JSON.toJSONString(buildTableSqlList));
// Delete table structure
dropTable(tableName, dialectProperties, dataSourceId);
}
private void dropTable(String tableName, DialectProperties dialectProperties, Long dataSourceId) {
// Delete table structure
DropParam dropParam = DropParam.builder()
.dataSourceId(dataSourceId)
.databaseName(dialectProperties.getDatabaseName())
.tableName(dialectProperties.toCase(tableName))
.build();
tableService.drop(dropParam);
// Query table structure
TablePageQueryParam [MASK] = new TablePageQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(tableName));
List<Table> tableList = tableService.pageQuery( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
log.info("After deleting the table, the data returns {}", JSON.toJSONString(tableList));
Assertions.assertEquals(0L, tableList.size(), "Query table structure failed");
}
private void checkTable(String tableName, DialectProperties dialectProperties, Long dataSourceId) {
// Query table structure
TablePageQueryParam [MASK] = new TablePageQueryParam();
[MASK] .setDataSourceId(dataSourceId);
[MASK] .setDatabaseName(dialectProperties.getDatabaseName());
[MASK] .setTableName(dialectProperties.toCase(tableName));
List<Table> tableList = tableService.pageQuery( [MASK] , TableSelector.builder()
.columnList(Boolean.TRUE)
.indexList(Boolean.TRUE)
.build()).getData();
log.info("Analyzing data returns {}", JSON.toJSONString(tableList));
Assertions.assertEquals(1L, tableList.size(), "Query table structure failed");
Table table = tableList.get(0);
Assertions.assertEquals(dialectProperties.toCase(tableName), table.getName(), "Query table structure failed");
Assertions.assertEquals("Test table", table.getComment(), "Query table structure failed");
TableQueryParam tableQueryParam = new TableQueryParam();
tableQueryParam.setTableName(table.getName());
tableQueryParam.setDataSourceId(dataSourceId);
tableQueryParam.setDatabaseName(dialectProperties.getDatabaseName());
List<TableColumn> columnList = tableService.queryColumns(tableQueryParam);
Assertions.assertEquals(4L, columnList.size(), "Query table structure failed");
TableColumn id = columnList.get(0);
Assertions.assertEquals(dialectProperties.toCase("id"), id.getName(), "Query table structure failed");
Assertions.assertEquals("Primary key auto-increment", id.getComment(), "Query table structure failed");
Assertions.assertTrue(id.getAutoIncrement(), "Query table structure failed");
//Assertions.assertFalse(id.getNullable(), "Query table structure failed");
Assertions.assertTrue(id.getPrimaryKey(), "Query table structure failed");
TableColumn string = columnList.get(3);
Assertions.assertEquals(dialectProperties.toCase("string"), string.getName(), "Query table structure failed");
//Assertions.assertTrue(string.getNullable(), "Query table structure failed");
Assertions.assertEquals("DATA", TestUtils.unWrapperDefaultValue(string.getDefaultValue()),
"Query table structure failed");
List<TableIndex> tableIndexList = tableService.queryIndexes(tableQueryParam);
Assertions.assertEquals(4L, tableIndexList.size(), "Query table structure failed");
Map<String, TableIndex> tableIndexMap = EasyCollectionUtils.toIdentityMap(tableIndexList,
TableIndex::getName);
TableIndex idxDate = tableIndexMap.get(dialectProperties.toCase(tableName + "_idx_date"));
Assertions.assertEquals("date index", idxDate.getComment(), "Query table structure failed");
Assertions.assertEquals(IndexTypeEnum.NORMAL.getCode(), idxDate.getType(), "Query table structure failed");
Assertions.assertEquals(1L, idxDate.getColumnList().size(), "Query table structure failed");
Assertions.assertEquals(dialectProperties.toCase("date"), idxDate.getColumnList().get(0).getColumnName(),
"Query table structure failed");
Assertions.assertEquals(CollationEnum.DESC.getCode(), idxDate.getColumnList().get(0).getCollation(),
"Query table structure failed");
TableIndex ukNumber = tableIndexMap.get(dialectProperties.toCase(tableName + "_uk_number"));
Assertions.assertEquals("unique index", ukNumber.getComment(), "Query table structure failed");
Assertions.assertEquals(IndexTypeEnum.UNIQUE.getCode(), ukNumber.getType(), "Query table structure failed");
TableIndex idxNumberString = tableIndexMap.get(dialectProperties.toCase(tableName + "_idx_number_string"));
Assertions.assertEquals(2, idxNumberString.getColumnList().size(), "Query table structure failed");
}
@Test
@Order(Integer.MAX_VALUE)
public void dropTable() {
for (DialectProperties dialectProperties : dialectPropertiesList) {
try {
String dbTypeEnum = dialectProperties.getDbType();
Long dataSourceId = TestUtils.nextLong();
Long consoleId = TestUtils.nextLong();
DataSourcePreConnectParam dataSourceCreateParam = new DataSourcePreConnectParam();
dataSourceCreateParam.setType(dbTypeEnum);
dataSourceCreateParam.setUrl(dialectProperties.getUrl());
dataSourceCreateParam.setUser(dialectProperties.getUsername());
dataSourceCreateParam.setPassword(dialectProperties.getPassword());
dataSourceService.preConnect(dataSourceCreateParam);
// Create a console
ConsoleConnectParam consoleCreateParam = new ConsoleConnectParam();
consoleCreateParam.setDataSourceId(dataSourceId);
consoleCreateParam.setConsoleId(consoleId);
consoleCreateParam.setDatabaseName(dialectProperties.getDatabaseName());
consoleService.createConsole(consoleCreateParam);
// Create table structure
DlExecuteParam templateQueryParam = new DlExecuteParam();
templateQueryParam.setConsoleId(consoleId);
templateQueryParam.setDataSourceId(dataSourceId);
templateQueryParam.setSql(dialectProperties.getDropTableSql(TABLE_NAME));
dlTemplateService.execute(templateQueryParam);
} catch (Exception e) {
log.warn("Failed to delete table structure.", e);
}
}
}
}
| tablePageQueryParam | java |
"/*\n * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.\n * DO NOT ALTE(...TRUNCATED) | tickBounds | java |
"/*\n * Copyright 2016 Red Hat, Inc. and/or its affiliates\n * and other contributors as indicated b(...TRUNCATED) | mtls_endpoints | java |
End of preview.
No dataset card yet
- Downloads last month
- 66