code
stringlengths
10
749k
repo_name
stringlengths
5
108
path
stringlengths
7
333
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
10
749k
package com.etiennelawlor.loop.network.models.response; import android.os.Parcel; import android.os.Parcelable; import com.google.gson.annotations.SerializedName; /** * Created by etiennelawlor on 5/23/15. */ public class Tag implements Parcelable { // region Fields @SerializedName("uri") private String uri; @SerializedName("name") private String name; @SerializedName("tag") private String tag; @SerializedName("canonical") private String canonical; // endregion // region Constructors public Tag() { } protected Tag(Parcel in) { this.uri = in.readString(); this.name = in.readString(); this.tag = in.readString(); this.canonical = in.readString(); } // endregion // region Getters public String getUri() { return uri; } public String getName() { return name; } public String getTag() { return tag; } public String getCanonical() { return canonical; } // endregion // region Setters public void setUri(String uri) { this.uri = uri; } public void setName(String name) { this.name = name; } public void setTag(String tag) { this.tag = tag; } public void setCanonical(String canonical) { this.canonical = canonical; } // endregion // region Parcelable Methods @Override public int describeContents() { return 0; } @Override public void writeToParcel(Parcel dest, int flags) { dest.writeString(this.uri); dest.writeString(this.name); dest.writeString(this.tag); dest.writeString(this.canonical); } // endregion public static final Parcelable.Creator<Tag> CREATOR = new Parcelable.Creator<Tag>() { @Override public Tag createFromParcel(Parcel source) { return new Tag(source); } @Override public Tag[] newArray(int size) { return new Tag[size]; } }; }
lawloretienne/Loop
app/src/main/java/com/etiennelawlor/loop/network/models/response/Tag.java
Java
apache-2.0
2,066
package rvc.ann; import java.lang.annotation.Retention; import java.lang.annotation.Target; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; /** * @author nurmuhammad */ @Retention(RUNTIME) @Target(METHOD) public @interface OPTIONS { String value() default Constants.NULL_VALUE; boolean absolutePath() default false; }
nurmuhammad/rvc
src/main/java/rvc/ann/OPTIONS.java
Java
apache-2.0
399
package com.xiaojinzi.component.bean; import javax.lang.model.element.Element; /** * time : 2018/07/26 * * @author : xiaojinzi */ public class RouterDegradeAnnoBean { /** * 优先级 */ private int priority; /** * 是一个类实现了 RouterDegrade 接口 */ private Element rawType; public int getPriority() { return priority; } public void setPriority(int priority) { this.priority = priority; } public Element getRawType() { return rawType; } public void setRawType(Element rawType) { this.rawType = rawType; } }
xiaojinzi123/Component
ComponentCompiler/src/main/java/com/xiaojinzi/component/bean/RouterDegradeAnnoBean.java
Java
apache-2.0
632
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.executiongraph; import org.apache.flink.annotation.VisibleForTesting; import org.apache.flink.api.common.Archiveable; import org.apache.flink.api.common.InputDependencyConstraint; import org.apache.flink.api.common.accumulators.Accumulator; import org.apache.flink.api.common.time.Time; import org.apache.flink.core.io.InputSplit; import org.apache.flink.runtime.JobException; import org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult; import org.apache.flink.runtime.checkpoint.CheckpointOptions; import org.apache.flink.runtime.checkpoint.CheckpointType; import org.apache.flink.runtime.checkpoint.JobManagerTaskRestore; import org.apache.flink.runtime.clusterframework.types.AllocationID; import org.apache.flink.runtime.clusterframework.types.ResourceID; import org.apache.flink.runtime.clusterframework.types.ResourceProfile; import org.apache.flink.runtime.clusterframework.types.SlotProfile; import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor; import org.apache.flink.runtime.concurrent.FutureUtils; import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor; import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor; import org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory; import org.apache.flink.runtime.execution.ExecutionState; import org.apache.flink.runtime.instance.SlotSharingGroupId; import org.apache.flink.runtime.io.network.partition.PartitionTracker; import org.apache.flink.runtime.io.network.partition.ResultPartitionID; import org.apache.flink.runtime.jobgraph.IntermediateDataSetID; import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID; import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint; import org.apache.flink.runtime.jobmanager.scheduler.LocationPreferenceConstraint; import org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException; import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit; import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup; import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway; import org.apache.flink.runtime.jobmaster.LogicalSlot; import org.apache.flink.runtime.jobmaster.SlotRequestId; import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider; import org.apache.flink.runtime.messages.Acknowledge; import org.apache.flink.runtime.messages.StackTraceSampleResponse; import org.apache.flink.runtime.shuffle.PartitionDescriptor; import org.apache.flink.runtime.shuffle.ProducerDescriptor; import org.apache.flink.runtime.shuffle.ShuffleDescriptor; import org.apache.flink.runtime.state.KeyGroupRangeAssignment; import org.apache.flink.runtime.taskmanager.TaskManagerLocation; import org.apache.flink.util.ExceptionUtils; import org.apache.flink.util.FlinkException; import org.apache.flink.util.FlinkRuntimeException; import org.apache.flink.util.OptionalFailure; import org.apache.flink.util.function.ThrowingRunnable; import org.slf4j.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.Executor; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.function.Function; import java.util.stream.Collectors; import static org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor; import static org.apache.flink.runtime.execution.ExecutionState.CANCELED; import static org.apache.flink.runtime.execution.ExecutionState.CANCELING; import static org.apache.flink.runtime.execution.ExecutionState.CREATED; import static org.apache.flink.runtime.execution.ExecutionState.DEPLOYING; import static org.apache.flink.runtime.execution.ExecutionState.FAILED; import static org.apache.flink.runtime.execution.ExecutionState.FINISHED; import static org.apache.flink.runtime.execution.ExecutionState.RUNNING; import static org.apache.flink.runtime.execution.ExecutionState.SCHEDULED; import static org.apache.flink.util.Preconditions.checkNotNull; /** * A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times * (for recovery, re-computation, re-configuration), this class tracks the state of a single execution * of that vertex and the resources. * * <h2>Lock free state transitions</h2> * * <p>In several points of the code, we need to deal with possible concurrent state changes and actions. * For example, while the call to deploy a task (send it to the TaskManager) happens, the task gets cancelled. * * <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to running) such that * it is guaranteed that any "cancel command" will only pick up after deployment is done and that the "cancel * command" call will never overtake the deploying call. * * <p>This blocks the threads big time, because the remote calls may take long. Depending of their locking behavior, it * may even result in distributed deadlocks (unless carefully avoided). We therefore use atomic state updates and * occasional double-checking to ensure that the state after a completed call is as expected, and trigger correcting * actions if it is not. Many actions are also idempotent (like canceling). */ public class Execution implements AccessExecution, Archiveable<ArchivedExecution>, LogicalSlot.Payload { private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER = AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state"); private static final AtomicReferenceFieldUpdater<Execution, LogicalSlot> ASSIGNED_SLOT_UPDATER = AtomicReferenceFieldUpdater.newUpdater( Execution.class, LogicalSlot.class, "assignedResource"); private static final Logger LOG = ExecutionGraph.LOG; private static final int NUM_CANCEL_CALL_TRIES = 3; private static final int NUM_STOP_CALL_TRIES = 3; // -------------------------------------------------------------------------------------------- /** The executor which is used to execute futures. */ private final Executor executor; /** The execution vertex whose task this execution executes. */ private final ExecutionVertex vertex; /** The unique ID marking the specific execution instant of the task. */ private final ExecutionAttemptID attemptId; /** Gets the global modification version of the execution graph when this execution was created. * This version is bumped in the ExecutionGraph whenever a global failover happens. It is used * to resolve conflicts between concurrent modification by global and local failover actions. */ private final long globalModVersion; /** The timestamps when state transitions occurred, indexed by {@link ExecutionState#ordinal()}. */ private final long[] stateTimestamps; private final int attemptNumber; private final Time rpcTimeout; private final Collection<PartitionInfo> partitionInfos; /** A future that completes once the Execution reaches a terminal ExecutionState. */ private final CompletableFuture<ExecutionState> terminalStateFuture; private final CompletableFuture<?> releaseFuture; private final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture; private volatile ExecutionState state = CREATED; private volatile LogicalSlot assignedResource; private volatile Throwable failureCause; // once assigned, never changes /** Information to restore the task on recovery, such as checkpoint id and task state snapshot. */ @Nullable private volatile JobManagerTaskRestore taskRestore; /** This field holds the allocation id once it was assigned successfully. */ @Nullable private volatile AllocationID assignedAllocationID; // ------------------------ Accumulators & Metrics ------------------------ /** Lock for updating the accumulators atomically. * Prevents final accumulators to be overwritten by partial accumulators on a late heartbeat. */ private final Object accumulatorLock = new Object(); /* Continuously updated map of user-defined accumulators */ private volatile Map<String, Accumulator<?, ?>> userAccumulators; private volatile IOMetrics ioMetrics; private Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions; // -------------------------------------------------------------------------------------------- /** * Creates a new Execution attempt. * * @param executor * The executor used to dispatch callbacks from futures and asynchronous RPC calls. * @param vertex * The execution vertex to which this Execution belongs * @param attemptNumber * The execution attempt number. * @param globalModVersion * The global modification version of the execution graph when this execution was created * @param startTimestamp * The timestamp that marks the creation of this Execution * @param rpcTimeout * The rpcTimeout for RPC calls like deploy/cancel/stop. */ public Execution( Executor executor, ExecutionVertex vertex, int attemptNumber, long globalModVersion, long startTimestamp, Time rpcTimeout) { this.executor = checkNotNull(executor); this.vertex = checkNotNull(vertex); this.attemptId = new ExecutionAttemptID(); this.rpcTimeout = checkNotNull(rpcTimeout); this.globalModVersion = globalModVersion; this.attemptNumber = attemptNumber; this.stateTimestamps = new long[ExecutionState.values().length]; markTimestamp(CREATED, startTimestamp); this.partitionInfos = new ArrayList<>(16); this.producedPartitions = Collections.emptyMap(); this.terminalStateFuture = new CompletableFuture<>(); this.releaseFuture = new CompletableFuture<>(); this.taskManagerLocationFuture = new CompletableFuture<>(); this.assignedResource = null; } // -------------------------------------------------------------------------------------------- // Properties // -------------------------------------------------------------------------------------------- public ExecutionVertex getVertex() { return vertex; } @Override public ExecutionAttemptID getAttemptId() { return attemptId; } @Override public int getAttemptNumber() { return attemptNumber; } @Override public ExecutionState getState() { return state; } @Nullable public AllocationID getAssignedAllocationID() { return assignedAllocationID; } /** * Gets the global modification version of the execution graph when this execution was created. * * <p>This version is bumped in the ExecutionGraph whenever a global failover happens. It is used * to resolve conflicts between concurrent modification by global and local failover actions. */ public long getGlobalModVersion() { return globalModVersion; } public CompletableFuture<TaskManagerLocation> getTaskManagerLocationFuture() { return taskManagerLocationFuture; } public LogicalSlot getAssignedResource() { return assignedResource; } public Optional<ResultPartitionDeploymentDescriptor> getResultPartitionDeploymentDescriptor( IntermediateResultPartitionID id) { return Optional.ofNullable(producedPartitions.get(id)); } /** * Tries to assign the given slot to the execution. The assignment works only if the * Execution is in state SCHEDULED. Returns true, if the resource could be assigned. * * @param logicalSlot to assign to this execution * @return true if the slot could be assigned to the execution, otherwise false */ @VisibleForTesting boolean tryAssignResource(final LogicalSlot logicalSlot) { assertRunningInJobMasterMainThread(); checkNotNull(logicalSlot); // only allow to set the assigned resource in state SCHEDULED or CREATED // note: we also accept resource assignment when being in state CREATED for testing purposes if (state == SCHEDULED || state == CREATED) { if (ASSIGNED_SLOT_UPDATER.compareAndSet(this, null, logicalSlot)) { if (logicalSlot.tryAssignPayload(this)) { // check for concurrent modification (e.g. cancelling call) if ((state == SCHEDULED || state == CREATED) && !taskManagerLocationFuture.isDone()) { taskManagerLocationFuture.complete(logicalSlot.getTaskManagerLocation()); assignedAllocationID = logicalSlot.getAllocationId(); return true; } else { // free assigned resource and return false ASSIGNED_SLOT_UPDATER.set(this, null); return false; } } else { ASSIGNED_SLOT_UPDATER.set(this, null); return false; } } else { // the slot already has another slot assigned return false; } } else { // do not allow resource assignment if we are not in state SCHEDULED return false; } } public InputSplit getNextInputSplit() { final LogicalSlot slot = this.getAssignedResource(); final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null; return this.vertex.getNextInputSplit(host); } @Override public TaskManagerLocation getAssignedResourceLocation() { // returns non-null only when a location is already assigned final LogicalSlot currentAssignedResource = assignedResource; return currentAssignedResource != null ? currentAssignedResource.getTaskManagerLocation() : null; } public Throwable getFailureCause() { return failureCause; } @Override public String getFailureCauseAsString() { return ExceptionUtils.stringifyException(getFailureCause()); } @Override public long[] getStateTimestamps() { return stateTimestamps; } @Override public long getStateTimestamp(ExecutionState state) { return this.stateTimestamps[state.ordinal()]; } public boolean isFinished() { return state.isTerminal(); } @Nullable public JobManagerTaskRestore getTaskRestore() { return taskRestore; } /** * Sets the initial state for the execution. The serialized state is then shipped via the * {@link TaskDeploymentDescriptor} to the TaskManagers. * * @param taskRestore information to restore the state */ public void setInitialState(@Nullable JobManagerTaskRestore taskRestore) { this.taskRestore = taskRestore; } /** * Gets a future that completes once the task execution reaches a terminal state. * The future will be completed with specific state that the execution reached. * This future is always completed from the job master's main thread. * * @return A future which is completed once the execution reaches a terminal state */ @Override public CompletableFuture<ExecutionState> getTerminalStateFuture() { return terminalStateFuture; } /** * Gets the release future which is completed once the execution reaches a terminal * state and the assigned resource has been released. * This future is always completed from the job master's main thread. * * @return A future which is completed once the assigned resource has been released */ public CompletableFuture<?> getReleaseFuture() { return releaseFuture; } // -------------------------------------------------------------------------------------------- // Actions // -------------------------------------------------------------------------------------------- public CompletableFuture<Void> scheduleForExecution() { final ExecutionGraph executionGraph = getVertex().getExecutionGraph(); final SlotProvider resourceProvider = executionGraph.getSlotProvider(); final boolean allowQueued = executionGraph.isQueuedSchedulingAllowed(); return scheduleForExecution( resourceProvider, allowQueued, LocationPreferenceConstraint.ANY, Collections.emptySet()); } /** * NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if the tasks needs * to be scheduled immediately and no resource is available. If the task is accepted by the schedule, any * error sets the vertex state to failed and triggers the recovery logic. * * @param slotProvider The slot provider to use to allocate slot for this execution attempt. * @param queued Flag to indicate whether the scheduler may queue this task if it cannot * immediately deploy it. * @param locationPreferenceConstraint constraint for the location preferences * @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph. * Can be empty if the allocation ids are not required for scheduling. * @return Future which is completed once the Execution has been deployed */ public CompletableFuture<Void> scheduleForExecution( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint, @Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) { assertRunningInJobMasterMainThread(); final ExecutionGraph executionGraph = vertex.getExecutionGraph(); final Time allocationTimeout = executionGraph.getAllocationTimeout(); try { final CompletableFuture<Execution> allocationFuture = allocateResourcesForExecution( slotProvider, queued, locationPreferenceConstraint, allPreviousExecutionGraphAllocationIds, allocationTimeout); final CompletableFuture<Void> deploymentFuture; if (allocationFuture.isDone() || queued) { deploymentFuture = allocationFuture.thenRun(ThrowingRunnable.unchecked(this::deploy)); } else { deploymentFuture = FutureUtils.completedExceptionally( new IllegalArgumentException("The slot allocation future has not been completed yet.")); } deploymentFuture.whenComplete( (Void ignored, Throwable failure) -> { if (failure != null) { final Throwable stripCompletionException = ExceptionUtils.stripCompletionException(failure); final Throwable schedulingFailureCause; if (stripCompletionException instanceof TimeoutException) { schedulingFailureCause = new NoResourceAvailableException( "Could not allocate enough slots within timeout of " + allocationTimeout + " to run the job. " + "Please make sure that the cluster has enough resources."); } else { schedulingFailureCause = stripCompletionException; } markFailed(schedulingFailureCause); } }); return deploymentFuture; } catch (IllegalExecutionStateException e) { return FutureUtils.completedExceptionally(e); } } /** * Allocates resources for the execution. * * <p>Allocates following resources: * <ol> * <li>slot obtained from the slot provider</li> * <li>registers produced partitions with the {@link org.apache.flink.runtime.shuffle.ShuffleMaster}</li> * </ol> * * @param slotProvider to obtain a new slot from * @param queued if the allocation can be queued * @param locationPreferenceConstraint constraint for the location preferences * @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph. * Can be empty if the allocation ids are not required for scheduling. * @param allocationTimeout rpcTimeout for allocating a new slot * @return Future which is completed with this execution once the slot has been assigned * or with an exception if an error occurred. */ CompletableFuture<Execution> allocateResourcesForExecution( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint, @Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds, Time allocationTimeout) { return allocateAndAssignSlotForExecution( slotProvider, queued, locationPreferenceConstraint, allPreviousExecutionGraphAllocationIds, allocationTimeout) .thenCompose(slot -> registerProducedPartitions(slot.getTaskManagerLocation())); } /** * Allocates and assigns a slot obtained from the slot provider to the execution. * * @param slotProvider to obtain a new slot from * @param queued if the allocation can be queued * @param locationPreferenceConstraint constraint for the location preferences * @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph. * Can be empty if the allocation ids are not required for scheduling. * @param allocationTimeout rpcTimeout for allocating a new slot * @return Future which is completed with the allocated slot once it has been assigned * or with an exception if an error occurred. */ private CompletableFuture<LogicalSlot> allocateAndAssignSlotForExecution( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint, @Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds, Time allocationTimeout) { checkNotNull(slotProvider); assertRunningInJobMasterMainThread(); final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup(); final CoLocationConstraint locationConstraint = vertex.getLocationConstraint(); // sanity check if (locationConstraint != null && sharingGroup == null) { throw new IllegalStateException( "Trying to schedule with co-location constraint but without slot sharing allowed."); } // this method only works if the execution is in the state 'CREATED' if (transitionState(CREATED, SCHEDULED)) { final SlotSharingGroupId slotSharingGroupId = sharingGroup != null ? sharingGroup.getSlotSharingGroupId() : null; ScheduledUnit toSchedule = locationConstraint == null ? new ScheduledUnit(this, slotSharingGroupId) : new ScheduledUnit(this, slotSharingGroupId, locationConstraint); // try to extract previous allocation ids, if applicable, so that we can reschedule to the same slot ExecutionVertex executionVertex = getVertex(); AllocationID lastAllocation = executionVertex.getLatestPriorAllocation(); Collection<AllocationID> previousAllocationIDs = lastAllocation != null ? Collections.singletonList(lastAllocation) : Collections.emptyList(); // calculate the preferred locations final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture = calculatePreferredLocations(locationPreferenceConstraint); final SlotRequestId slotRequestId = new SlotRequestId(); final CompletableFuture<LogicalSlot> logicalSlotFuture = preferredLocationsFuture.thenCompose( (Collection<TaskManagerLocation> preferredLocations) -> slotProvider.allocateSlot( slotRequestId, toSchedule, new SlotProfile( ResourceProfile.UNKNOWN, preferredLocations, previousAllocationIDs, allPreviousExecutionGraphAllocationIds), queued, allocationTimeout)); // register call back to cancel slot request in case that the execution gets canceled releaseFuture.whenComplete( (Object ignored, Throwable throwable) -> { if (logicalSlotFuture.cancel(false)) { slotProvider.cancelSlotRequest( slotRequestId, slotSharingGroupId, new FlinkException("Execution " + this + " was released.")); } }); // This forces calls to the slot pool back into the main thread, for normal and exceptional completion return logicalSlotFuture.handle( (LogicalSlot logicalSlot, Throwable failure) -> { if (failure != null) { throw new CompletionException(failure); } if (tryAssignResource(logicalSlot)) { return logicalSlot; } else { // release the slot logicalSlot.releaseSlot(new FlinkException("Could not assign logical slot to execution " + this + '.')); throw new CompletionException( new FlinkException( "Could not assign slot " + logicalSlot + " to execution " + this + " because it has already been assigned ")); } }); } else { // call race, already deployed, or already done throw new IllegalExecutionStateException(this, CREATED, state); } } @VisibleForTesting CompletableFuture<Execution> registerProducedPartitions(TaskManagerLocation location) { assertRunningInJobMasterMainThread(); return FutureUtils.thenApplyAsyncIfNotDone( registerProducedPartitions(vertex, location, attemptId), vertex.getExecutionGraph().getJobMasterMainThreadExecutor(), producedPartitionsCache -> { producedPartitions = producedPartitionsCache; startTrackingPartitions(location.getResourceID(), producedPartitionsCache.values()); return this; }); } @VisibleForTesting static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions( ExecutionVertex vertex, TaskManagerLocation location, ExecutionAttemptID attemptId) { ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId); boolean lazyScheduling = vertex.getExecutionGraph().getScheduleMode().allowLazyDeployment(); Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values(); Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations = new ArrayList<>(partitions.size()); for (IntermediateResultPartition partition : partitions) { PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition); int maxParallelism = getPartitionMaxParallelism(partition); CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex .getExecutionGraph() .getShuffleMaster() .registerPartitionWithProducer(partitionDescriptor, producerDescriptor); final boolean releasePartitionOnConsumption = vertex.getExecutionGraph().isForcePartitionReleaseOnConsumption() || !partitionDescriptor.getPartitionType().isBlocking(); CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture .thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor( partitionDescriptor, shuffleDescriptor, maxParallelism, lazyScheduling, releasePartitionOnConsumption ? ShuffleDescriptor.ReleaseType.AUTO : ShuffleDescriptor.ReleaseType.MANUAL)); partitionRegistrations.add(partitionRegistration); } return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> { Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions = new LinkedHashMap<>(partitions.size()); rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd)); return producedPartitions; }); } private static int getPartitionMaxParallelism(IntermediateResultPartition partition) { // TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs! final List<List<ExecutionEdge>> consumers = partition.getConsumers(); int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM; if (!consumers.isEmpty()) { List<ExecutionEdge> consumer = consumers.get(0); ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex(); maxParallelism = consumerVertex.getMaxParallelism(); } return maxParallelism; } /** * Deploys the execution to the previously assigned resource. * * @throws JobException if the execution cannot be deployed to the assigned resource */ public void deploy() throws JobException { assertRunningInJobMasterMainThread(); final LogicalSlot slot = assignedResource; checkNotNull(slot, "In order to deploy the execution we first have to assign a resource via tryAssignResource."); // Check if the TaskManager died in the meantime // This only speeds up the response to TaskManagers failing concurrently to deployments. // The more general check is the rpcTimeout of the deployment call if (!slot.isAlive()) { throw new JobException("Target slot (TaskManager) for deployment is no longer alive."); } // make sure exactly one deployment call happens from the correct state // note: the transition from CREATED to DEPLOYING is for testing purposes only ExecutionState previous = this.state; if (previous == SCHEDULED || previous == CREATED) { if (!transitionState(previous, DEPLOYING)) { // race condition, someone else beat us to the deploying call. // this should actually not happen and indicates a race somewhere else throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race."); } } else { // vertex may have been cancelled, or it was already scheduled throw new IllegalStateException("The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous); } if (this != slot.getPayload()) { throw new IllegalStateException( String.format("The execution %s has not been assigned to the assigned slot.", this)); } try { // race double check, did we fail/cancel and do we need to release the slot? if (this.state != DEPLOYING) { slot.releaseSlot(new FlinkException("Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING.")); return; } if (LOG.isInfoEnabled()) { LOG.info(String.format("Deploying %s (attempt #%d) to %s", vertex.getTaskNameWithSubtaskIndex(), attemptNumber, getAssignedResourceLocation())); } final TaskDeploymentDescriptor deployment = TaskDeploymentDescriptorFactory .fromExecutionVertex(vertex, attemptNumber) .createDeploymentDescriptor( slot.getAllocationId(), slot.getPhysicalSlotNumber(), taskRestore, producedPartitions.values()); // null taskRestore to let it be GC'ed taskRestore = null; final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); final ComponentMainThreadExecutor jobMasterMainThreadExecutor = vertex.getExecutionGraph().getJobMasterMainThreadExecutor(); // We run the submission in the future executor so that the serialization of large TDDs does not block // the main thread and sync back to the main thread once submission is completed. CompletableFuture.supplyAsync(() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor) .thenCompose(Function.identity()) .whenCompleteAsync( (ack, failure) -> { // only respond to the failure case if (failure != null) { if (failure instanceof TimeoutException) { String taskname = vertex.getTaskNameWithSubtaskIndex() + " (" + attemptId + ')'; markFailed(new Exception( "Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation() + ") not responding after a rpcTimeout of " + rpcTimeout, failure)); } else { markFailed(failure); } } }, jobMasterMainThreadExecutor); } catch (Throwable t) { markFailed(t); ExceptionUtils.rethrow(t); } } public void cancel() { // depending on the previous state, we go directly to cancelled (no cancel call necessary) // -- or to canceling (cancel call needs to be sent to the task manager) // because of several possibly previous states, we need to again loop until we make a // successful atomic state transition assertRunningInJobMasterMainThread(); while (true) { ExecutionState current = this.state; if (current == CANCELING || current == CANCELED) { // already taken care of, no need to cancel again return; } // these two are the common cases where we need to send a cancel call else if (current == RUNNING || current == DEPLOYING) { // try to transition to canceling, if successful, send the cancel call if (startCancelling(NUM_CANCEL_CALL_TRIES)) { return; } // else: fall through the loop } else if (current == FINISHED || current == FAILED) { // nothing to do any more. finished/failed before it could be cancelled. // in any case, the task is removed from the TaskManager already return; } else if (current == CREATED || current == SCHEDULED) { // from here, we can directly switch to cancelled, because no task has been deployed if (cancelAtomically()) { return; } // else: fall through the loop } else { throw new IllegalStateException(current.name()); } } } public CompletableFuture<?> suspend() { switch(state) { case RUNNING: case DEPLOYING: case CREATED: case SCHEDULED: if (!cancelAtomically()) { throw new IllegalStateException( String.format("Could not directly go to %s from %s.", CANCELED.name(), state.name())); } break; case CANCELING: completeCancelling(); break; case FINISHED: case FAILED: case CANCELED: break; default: throw new IllegalStateException(state.name()); } return releaseFuture; } private void scheduleConsumer(ExecutionVertex consumerVertex) { try { final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph(); consumerVertex.scheduleForExecution( executionGraph.getSlotProvider(), executionGraph.isQueuedSchedulingAllowed(), LocationPreferenceConstraint.ANY, // there must be at least one known location Collections.emptySet()); } catch (Throwable t) { consumerVertex.fail(new IllegalStateException("Could not schedule consumer " + "vertex " + consumerVertex, t)); } } void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) { assertRunningInJobMasterMainThread(); final int numConsumers = allConsumers.size(); if (numConsumers > 1) { fail(new IllegalStateException("Currently, only a single consumer group per partition is supported.")); } else if (numConsumers == 0) { return; } for (ExecutionEdge edge : allConsumers.get(0)) { final ExecutionVertex consumerVertex = edge.getTarget(); final Execution consumer = consumerVertex.getCurrentExecutionAttempt(); final ExecutionState consumerState = consumer.getState(); // ---------------------------------------------------------------- // Consumer is created => try to schedule it and the partition info // is known during deployment // ---------------------------------------------------------------- if (consumerState == CREATED) { // Schedule the consumer vertex if its inputs constraint is satisfied, otherwise skip the scheduling. // A shortcut of input constraint check is added for InputDependencyConstraint.ANY since // at least one of the consumer vertex's inputs is consumable here. This is to avoid the // O(N) complexity introduced by input constraint check for InputDependencyConstraint.ANY, // as we do not want the default scheduling performance to be affected. if (consumerVertex.getInputDependencyConstraint() == InputDependencyConstraint.ANY || consumerVertex.checkInputDependencyConstraints()) { scheduleConsumer(consumerVertex); } } // ---------------------------------------------------------------- // Consumer is running => send update message now // Consumer is deploying => cache the partition info which would be // sent after switching to running // ---------------------------------------------------------------- else if (consumerState == DEPLOYING || consumerState == RUNNING) { final PartitionInfo partitionInfo = createPartitionInfo(edge); if (consumerState == DEPLOYING) { consumerVertex.cachePartitionInfo(partitionInfo); } else { consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(partitionInfo)); } } } } private static PartitionInfo createPartitionInfo(ExecutionEdge executionEdge) { IntermediateDataSetID intermediateDataSetID = executionEdge.getSource().getIntermediateResult().getId(); ShuffleDescriptor shuffleDescriptor = getConsumedPartitionShuffleDescriptor(executionEdge, false); return new PartitionInfo(intermediateDataSetID, shuffleDescriptor); } /** * This method fails the vertex due to an external condition. The task will move to state FAILED. * If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager. * * @param t The exception that caused the task to fail. */ @Override public void fail(Throwable t) { processFail(t, false); } /** * Request a stack trace sample from the task of this execution. * * @param sampleId of the stack trace sample * @param numSamples the sample should contain * @param delayBetweenSamples to wait * @param maxStackTraceDepth of the samples * @param timeout until the request times out * @return Future stack trace sample response */ public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample( int sampleId, int numSamples, Time delayBetweenSamples, int maxStackTraceDepth, Time timeout) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); return taskManagerGateway.requestStackTraceSample( attemptId, sampleId, numSamples, delayBetweenSamples, maxStackTraceDepth, timeout); } else { return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned.")); } } /** * Notify the task of this execution about a completed checkpoint. * * @param checkpointId of the completed checkpoint * @param timestamp of the completed checkpoint */ public void notifyCheckpointComplete(long checkpointId, long timestamp) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); taskManagerGateway.notifyCheckpointComplete(attemptId, getVertex().getJobId(), checkpointId, timestamp); } else { LOG.debug("The execution has no slot assigned. This indicates that the execution is " + "no longer running."); } } /** * Trigger a new checkpoint on the task of this execution. * * @param checkpointId of th checkpoint to trigger * @param timestamp of the checkpoint to trigger * @param checkpointOptions of the checkpoint to trigger */ public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) { triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, false); } /** * Trigger a new checkpoint on the task of this execution. * * @param checkpointId of th checkpoint to trigger * @param timestamp of the checkpoint to trigger * @param checkpointOptions of the checkpoint to trigger * @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline * to fire any registered event-time timers */ public void triggerSynchronousSavepoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) { triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime); } private void triggerCheckpointHelper(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) { final CheckpointType checkpointType = checkpointOptions.getCheckpointType(); if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) { throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX."); } final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime); } else { LOG.debug("The execution has no slot assigned. This indicates that the execution is no longer running."); } } // -------------------------------------------------------------------------------------------- // Callbacks // -------------------------------------------------------------------------------------------- /** * This method marks the task as failed, but will make no attempt to remove task execution from the task manager. * It is intended for cases where the task is known not to be running, or then the TaskManager reports failure * (in which case it has already removed the task). * * @param t The exception that caused the task to fail. */ void markFailed(Throwable t) { processFail(t, true); } void markFailed(Throwable t, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) { processFail(t, true, userAccumulators, metrics); } @VisibleForTesting void markFinished() { markFinished(null, null); } void markFinished(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) { assertRunningInJobMasterMainThread(); // this call usually comes during RUNNING, but may also come while still in deploying (very fast tasks!) while (true) { ExecutionState current = this.state; if (current == RUNNING || current == DEPLOYING) { if (transitionState(current, FINISHED)) { try { for (IntermediateResultPartition finishedPartition : getVertex().finishAllBlockingPartitions()) { IntermediateResultPartition[] allPartitions = finishedPartition .getIntermediateResult().getPartitions(); for (IntermediateResultPartition partition : allPartitions) { scheduleOrUpdateConsumers(partition.getConsumers()); } } updateAccumulatorsAndMetrics(userAccumulators, metrics); releaseAssignedResource(null); vertex.getExecutionGraph().deregisterExecution(this); } finally { vertex.executionFinished(this); } return; } } else if (current == CANCELING) { // we sent a cancel call, and the task manager finished before it arrived. We // will never get a CANCELED call back from the job manager completeCancelling(userAccumulators, metrics); return; } else if (current == CANCELED || current == FAILED) { if (LOG.isDebugEnabled()) { LOG.debug("Task FINISHED, but concurrently went to state " + state); } return; } else { // this should not happen, we need to fail this markFailed(new Exception("Vertex received FINISHED message while being in state " + state)); return; } } } private boolean cancelAtomically() { if (startCancelling(0)) { completeCancelling(); return true; } else { return false; } } private boolean startCancelling(int numberCancelRetries) { if (transitionState(state, CANCELING)) { taskManagerLocationFuture.cancel(false); sendCancelRpcCall(numberCancelRetries); return true; } else { return false; } } void completeCancelling() { completeCancelling(null, null); } void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) { // the taskmanagers can themselves cancel tasks without an external trigger, if they find that the // network stack is canceled (for example by a failing / canceling receiver or sender // this is an artifact of the old network runtime, but for now we need to support task transitions // from running directly to canceled while (true) { ExecutionState current = this.state; if (current == CANCELED) { return; } else if (current == CANCELING || current == RUNNING || current == DEPLOYING) { updateAccumulatorsAndMetrics(userAccumulators, metrics); if (transitionState(current, CANCELED)) { finishCancellation(); return; } // else fall through the loop } else { // failing in the meantime may happen and is no problem. // anything else is a serious problem !!! if (current != FAILED) { String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state); LOG.error(message); vertex.getExecutionGraph().failGlobal(new Exception(message)); } return; } } } private void finishCancellation() { releaseAssignedResource(new FlinkException("Execution " + this + " was cancelled.")); vertex.getExecutionGraph().deregisterExecution(this); // release partitions on TM in case the Task finished while we where already CANCELING stopTrackingAndReleasePartitions(); } void cachePartitionInfo(PartitionInfo partitionInfo) { partitionInfos.add(partitionInfo); } private void sendPartitionInfos() { if (!partitionInfos.isEmpty()) { sendUpdatePartitionInfoRpcCall(new ArrayList<>(partitionInfos)); partitionInfos.clear(); } } // -------------------------------------------------------------------------------------------- // Internal Actions // -------------------------------------------------------------------------------------------- private boolean processFail(Throwable t, boolean isCallback) { return processFail(t, isCallback, null, null); } private boolean processFail(Throwable t, boolean isCallback, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) { // damn, we failed. This means only that we keep our books and notify our parent JobExecutionVertex // the actual computation on the task manager is cleaned up by the TaskManager that noticed the failure // we may need to loop multiple times (in the presence of concurrent calls) in order to // atomically switch to failed assertRunningInJobMasterMainThread(); while (true) { ExecutionState current = this.state; if (current == FAILED) { // already failed. It is enough to remember once that we failed (its sad enough) return false; } if (current == CANCELED || current == FINISHED) { // we are already aborting or are already aborted or we are already finished if (LOG.isDebugEnabled()) { LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current); } return false; } if (current == CANCELING) { completeCancelling(userAccumulators, metrics); return false; } if (transitionState(current, FAILED, t)) { // success (in a manner of speaking) this.failureCause = t; updateAccumulatorsAndMetrics(userAccumulators, metrics); releaseAssignedResource(t); vertex.getExecutionGraph().deregisterExecution(this); stopTrackingAndReleasePartitions(); if (!isCallback && (current == RUNNING || current == DEPLOYING)) { if (LOG.isDebugEnabled()) { LOG.debug("Sending out cancel request, to remove task execution from TaskManager."); } try { if (assignedResource != null) { sendCancelRpcCall(NUM_CANCEL_CALL_TRIES); } } catch (Throwable tt) { // no reason this should ever happen, but log it to be safe LOG.error("Error triggering cancel call while marking task {} as failed.", getVertex().getTaskNameWithSubtaskIndex(), tt); } } // leave the loop return true; } } } boolean switchToRunning() { if (transitionState(DEPLOYING, RUNNING)) { sendPartitionInfos(); return true; } else { // something happened while the call was in progress. // it can mean: // - canceling, while deployment was in progress. state is now canceling, or canceled, if the response overtook // - finishing (execution and finished call overtook the deployment answer, which is possible and happens for fast tasks) // - failed (execution, failure, and failure message overtook the deployment answer) ExecutionState currentState = this.state; if (currentState == FINISHED || currentState == CANCELED) { // do nothing, the task was really fast (nice) // or it was canceled really fast } else if (currentState == CANCELING || currentState == FAILED) { if (LOG.isDebugEnabled()) { // this log statement is guarded because the 'getVertexWithAttempt()' method // performs string concatenations LOG.debug("Concurrent canceling/failing of {} while deployment was in progress.", getVertexWithAttempt()); } sendCancelRpcCall(NUM_CANCEL_CALL_TRIES); } else { String message = String.format("Concurrent unexpected state transition of task %s to %s while deployment was in progress.", getVertexWithAttempt(), currentState); if (LOG.isDebugEnabled()) { LOG.debug(message); } // undo the deployment sendCancelRpcCall(NUM_CANCEL_CALL_TRIES); // record the failure markFailed(new Exception(message)); } return false; } } /** * This method sends a CancelTask message to the instance of the assigned slot. * * <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times. */ private void sendCancelRpcCall(int numberRetries) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); final ComponentMainThreadExecutor jobMasterMainThreadExecutor = getVertex().getExecutionGraph().getJobMasterMainThreadExecutor(); CompletableFuture<Acknowledge> cancelResultFuture = FutureUtils.retry( () -> taskManagerGateway.cancelTask(attemptId, rpcTimeout), numberRetries, jobMasterMainThreadExecutor); cancelResultFuture.whenComplete( (ack, failure) -> { if (failure != null) { fail(new Exception("Task could not be canceled.", failure)); } }); } } private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) { PartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker(); for (ResultPartitionDeploymentDescriptor partition : partitions) { partitionTracker.startTrackingPartition( taskExecutorId, partition); } } void stopTrackingAndReleasePartitions() { LOG.info("Discarding the results produced by task execution {}.", attemptId); if (producedPartitions != null && producedPartitions.size() > 0) { final PartitionTracker partitionTracker = getVertex().getExecutionGraph().getPartitionTracker(); final List<ResultPartitionID> producedPartitionIds = producedPartitions.values().stream() .map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor) .map(ShuffleDescriptor::getResultPartitionID) .collect(Collectors.toList()); partitionTracker.stopTrackingAndReleasePartitions(producedPartitionIds); } } /** * Update the partition infos on the assigned resource. * * @param partitionInfos for the remote task */ private void sendUpdatePartitionInfoRpcCall( final Iterable<PartitionInfo> partitionInfos) { final LogicalSlot slot = assignedResource; if (slot != null) { final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway(); final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation(); CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout); updatePartitionsResultFuture.whenCompleteAsync( (ack, failure) -> { // fail if there was a failure if (failure != null) { fail(new IllegalStateException("Update task on TaskManager " + taskManagerLocation + " failed due to:", failure)); } }, getVertex().getExecutionGraph().getJobMasterMainThreadExecutor()); } } /** * Releases the assigned resource and completes the release future * once the assigned resource has been successfully released. * * @param cause for the resource release, null if none */ private void releaseAssignedResource(@Nullable Throwable cause) { assertRunningInJobMasterMainThread(); final LogicalSlot slot = assignedResource; if (slot != null) { ComponentMainThreadExecutor jobMasterMainThreadExecutor = getVertex().getExecutionGraph().getJobMasterMainThreadExecutor(); slot.releaseSlot(cause) .whenComplete((Object ignored, Throwable throwable) -> { jobMasterMainThreadExecutor.assertRunningInMainThread(); if (throwable != null) { releaseFuture.completeExceptionally(throwable); } else { releaseFuture.complete(null); } }); } else { // no assigned resource --> we can directly complete the release future releaseFuture.complete(null); } } // -------------------------------------------------------------------------------------------- // Miscellaneous // -------------------------------------------------------------------------------------------- /** * Calculates the preferred locations based on the location preference constraint. * * @param locationPreferenceConstraint constraint for the location preference * @return Future containing the collection of preferred locations. This might not be completed if not all inputs * have been a resource assigned. */ @VisibleForTesting public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) { final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations(); final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture; switch(locationPreferenceConstraint) { case ALL: preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures); break; case ANY: final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size()); for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) { if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) { final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null); if (taskManagerLocation == null) { throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug."); } completedTaskManagerLocations.add(taskManagerLocation); } } preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations); break; default: throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.'); } return preferredLocationsFuture; } private boolean transitionState(ExecutionState currentState, ExecutionState targetState) { return transitionState(currentState, targetState, null); } private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) { // sanity check if (currentState.isTerminal()) { throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + '.'); } if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) { markTimestamp(targetState); if (error == null) { LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState); } else { LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState, error); } if (targetState.isTerminal()) { // complete the terminal state future terminalStateFuture.complete(targetState); } // make sure that the state transition completes normally. // potential errors (in listeners may not affect the main logic) try { vertex.notifyStateTransition(this, targetState, error); } catch (Throwable t) { LOG.error("Error while notifying execution graph of execution state transition.", t); } return true; } else { return false; } } private void markTimestamp(ExecutionState state) { markTimestamp(state, System.currentTimeMillis()); } private void markTimestamp(ExecutionState state, long timestamp) { this.stateTimestamps[state.ordinal()] = timestamp; } public String getVertexWithAttempt() { return vertex.getTaskNameWithSubtaskIndex() + " - execution #" + attemptNumber; } // ------------------------------------------------------------------------ // Accumulators // ------------------------------------------------------------------------ /** * Update accumulators (discarded when the Execution has already been terminated). * @param userAccumulators the user accumulators */ public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) { synchronized (accumulatorLock) { if (!state.isTerminal()) { this.userAccumulators = userAccumulators; } } } public Map<String, Accumulator<?, ?>> getUserAccumulators() { return userAccumulators; } @Override public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() { Map<String, OptionalFailure<Accumulator<?, ?>>> accumulators = userAccumulators == null ? null : userAccumulators.entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> OptionalFailure.of(entry.getValue()))); return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulators); } @Override public int getParallelSubtaskIndex() { return getVertex().getParallelSubtaskIndex(); } @Override public IOMetrics getIOMetrics() { return ioMetrics; } private void updateAccumulatorsAndMetrics(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) { if (userAccumulators != null) { synchronized (accumulatorLock) { this.userAccumulators = userAccumulators; } } if (metrics != null) { this.ioMetrics = metrics; } } // ------------------------------------------------------------------------ // Standard utilities // ------------------------------------------------------------------------ @Override public String toString() { final LogicalSlot slot = assignedResource; return String.format("Attempt #%d (%s) @ %s - [%s]", attemptNumber, vertex.getTaskNameWithSubtaskIndex(), (slot == null ? "(unassigned)" : slot), state); } @Override public ArchivedExecution archive() { return new ArchivedExecution(this); } private void assertRunningInJobMasterMainThread() { vertex.getExecutionGraph().assertRunningInJobMasterMainThread(); } }
shaoxuan-wang/flink
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
Java
apache-2.0
59,232
package water; import java.io.*; import java.lang.reflect.Array; import java.net.*; import java.nio.*; import java.nio.channels.*; import java.util.ArrayList; import java.util.Random; import water.network.SocketChannelUtils; import water.util.Log; import water.util.StringUtils; import water.util.TwoDimTable; /** A ByteBuffer backed mixed Input/Output streaming class, using Iced serialization. * * Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it * we go to the ByteChannel for more/less. Because DirectByteBuffers are * expensive to make, we keep a few pooled. * * When talking to a remote H2O node, switches between UDP and TCP transport * protocols depending on the message size. The TypeMap is not included, and * is assumed to exist on the remote H2O node. * * Supports direct NIO FileChannel read/write to disk, used during user-mode * swapping. The TypeMap is not included on write, and is assumed to be the * current map on read. * * Support read/write from byte[] - and this defeats the purpose of a * Streaming protocol, but is frequently handy for small structures. The * TypeMap is not included, and is assumed to be the current map on read. * * Supports read/write from a standard Stream, which by default assumes it is * NOT going in and out of the same Cloud, so the TypeMap IS included. The * serialized object can only be read back into the same minor version of H2O. * * @author <a href="mailto:cliffc@h2o.ai"></a> */ public final class AutoBuffer { // Maximum size of an array we allow to allocate (the value is designed // to mimic the behavior of OpenJDK libraries) private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; // The direct ByteBuffer for schlorping data about. // Set to null to indicate the AutoBuffer is closed. ByteBuffer _bb; public String sourceName = "???"; public boolean isClosed() { return _bb == null ; } // The ByteChannel for moving data in or out. Could be a SocketChannel (for // a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel // (for a UDP connection). Null on closed AutoBuffers. Null on initial // remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null // for open AutoBuffers doing file i/o or reading any TCP/UDP or having // written at least one buffer to TCP/UDP. private Channel _chan; // A Stream for moving data in. Null unless this AutoBuffer is // stream-based, in which case _chan field is null. This path supports // persistance: reading and writing objects from different H2O cluster // instances (but exactly the same H2O revision). The only required // similarity is same-classes-same-fields; changes here will probably // silently crash. If the fields are named the same but the semantics // differ, then again the behavior is probably silent crash. private InputStream _is; private short[] _typeMap; // Mapping from input stream map to current map, or null // If we need a SocketChannel, raise the priority so we get the I/O over // with. Do not want to have some TCP socket open, blocking the TCP channel // and then have the thread stalled out. If we raise the priority - be sure // to lower it again. Note this is for TCP channels ONLY, and only because // we are blocking another Node with I/O. private int _oldPrior = -1; // Where to send or receive data via TCP or UDP (choice made as we discover // how big the message is); used to lazily create a Channel. If NULL, then // _chan should be a pre-existing Channel, such as a FileChannel. final H2ONode _h2o; // TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround. private boolean _read; // TRUE if this AutoBuffer has never advanced past the first "page" of data. // The UDP-flavor, port# and task fields are only valid until we read over // them when flipping the ByteBuffer to the next chunk of data. Used in // asserts all over the place. private boolean _firstPage; // Total size written out from 'new' to 'close'. Only updated when actually // reading or writing data, or after close(). For profiling only. int _size; //int _zeros, _arys; // More profiling: start->close msec, plus nano's spent in blocking I/O // calls. The difference between (close-start) and i/o msec is the time the // i/o thread spends doing other stuff (e.g. allocating Java objects or // (de)serializing). long _time_start_ms, _time_close_ms, _time_io_ns; // I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time. final byte _persist; // The assumed max UDP packetsize static final int MTU = 1500-8/*UDP packet header size*/; // Enable this to test random TCP fails on open or write static final Random RANDOM_TCP_DROP = null; //new Random(); static final java.nio.charset.Charset UTF_8 = java.nio.charset.Charset.forName("UTF-8"); /** Incoming UDP request. Make a read-mode AutoBuffer from the open Channel, * figure the originating H2ONode from the first few bytes read. */ AutoBuffer( DatagramChannel sock ) throws IOException { _chan = null; _bb = BBP_SML.make(); // Get a small / UDP-sized ByteBuffer _read = true; // Reading by default _firstPage = true; // Read a packet; can get H2ONode from 'sad'? Inet4Address addr = null; SocketAddress sad = sock.receive(_bb); if( sad instanceof InetSocketAddress ) { InetAddress address = ((InetSocketAddress) sad).getAddress(); if( address instanceof Inet4Address ) { addr = (Inet4Address) address; } } _size = _bb.position(); _bb.flip(); // Set limit=amount read, and position==0 if( addr == null ) throw new RuntimeException("Unhandled socket type: " + sad); // Read Inet from socket, port from the stream, figure out H2ONode _h2o = H2ONode.intern(addr, getPort()); _firstPage = true; assert _h2o != null; _persist = 0; // No persistance } /** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel, * figure the originating H2ONode from the first few bytes read. * * remoteAddress set to null means that the communication is originating from non-h2o node, non-null value * represents the case where the communication is coming from h2o node. * */ AutoBuffer( ByteChannel sock, InetAddress remoteAddress ) throws IOException { _chan = sock; raisePriority(); // Make TCP priority high _bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer _bb.flip(); _read = true; // Reading by default _firstPage = true; // Read Inet from socket, port from the stream, figure out H2ONode if(remoteAddress!=null) { _h2o = H2ONode.intern(remoteAddress, getPort()); }else{ // In case the communication originates from non-h2o node, we set _h2o node to null. // It is done for 2 reasons: // - H2ONode.intern creates a new thread and if there's a lot of connections // from non-h2o environment, it could end up with too many open files exception. // - H2OIntern also reads port (getPort()) and additional information which we do not send // in communication originating from non-h2o nodes _h2o = null; } _firstPage = true; // Yes, must reset this. _time_start_ms = System.currentTimeMillis(); _persist = Value.TCP; } /** Make an AutoBuffer to write to an H2ONode. Requests for full buffer will * open a TCP socket and roll through writing to the target. Smaller * requests will send via UDP. Small requests get ordered by priority, so * that e.g. NACK and ACKACK messages have priority over most anything else. * This helps in UDP floods to shut down flooding senders. */ private byte _msg_priority; AutoBuffer( H2ONode h2o, byte priority ) { // If UDP goes via UDP, we write into a DBB up front - because we plan on // sending it out via a Datagram socket send call. If UDP goes via batched // TCP, we write into a HBB up front, because this will be copied again // into a large outgoing buffer. _bb = H2O.ARGS.useUDP // Actually use UDP? ? BBP_SML.make() // Make DirectByteBuffers to start with : ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder()); _chan = null; // Channel made lazily only if we write alot _h2o = h2o; _read = false; // Writing by default _firstPage = true; // Filling first page assert _h2o != null; _time_start_ms = System.currentTimeMillis(); _persist = Value.TCP; _msg_priority = priority; } /** Spill-to/from-disk request. */ public AutoBuffer( FileChannel fc, boolean read, byte persist ) { _bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer _chan = fc; // Write to read/write _h2o = null; // File Channels never have an _h2o _read = read; // Mostly assert reading vs writing if( read ) _bb.flip(); _time_start_ms = System.currentTimeMillis(); _persist = persist; // One of Value.ICE, NFS, S3, HDFS } /** Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O. */ AutoBuffer( DatagramPacket pack ) { _size = pack.getLength(); _bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder()); _bb.position(0); _read = true; _firstPage = true; _chan = null; _h2o = H2ONode.intern(pack.getAddress(), getPort()); _persist = 0; // No persistance } /** Read from a UDP_TCP buffer; could be in the middle of a large buffer */ AutoBuffer( H2ONode h2o, byte[] buf, int off, int len ) { assert buf != null : "null fed to ByteBuffer.wrap"; _h2o = h2o; _bb = ByteBuffer.wrap(buf,off,len).order(ByteOrder.nativeOrder()); _chan = null; _read = true; _firstPage = true; _persist = 0; // No persistance _size = len; } /** Read from a fixed byte[]; should not be closed. */ public AutoBuffer( byte[] buf ) { this(null,buf,0, buf.length); } /** Write to an ever-expanding byte[]. Instead of calling {@link #close()}, * call {@link #buf()} to retrieve the final byte[]. */ public AutoBuffer( ) { _bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder()); _chan = null; _h2o = null; _read = false; _firstPage = true; _persist = 0; // No persistance } /** Write to a known sized byte[]. Instead of calling close(), call * {@link #bufClose()} to retrieve the final byte[]. */ public AutoBuffer( int len ) { _bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder()); _chan = null; _h2o = null; _read = false; _firstPage = true; _persist = 0; // No persistance } /** Write to a persistent Stream, including all TypeMap info to allow later * reloading (by the same exact rev of H2O). */ public AutoBuffer( OutputStream os, boolean persist ) { _bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder()); _read = false; _chan = Channels.newChannel(os); _h2o = null; _firstPage = true; _persist = 0; if( persist ) put1(0x1C).put1(0xED).putStr(H2O.ABV.projectVersion()).putAStr(TypeMap.CLAZZES); else put1(0); } /** Read from a persistent Stream (including all TypeMap info) into same * exact rev of H2O). */ public AutoBuffer( InputStream is ) { _chan = null; _h2o = null; _firstPage = true; _persist = 0; _read = true; _bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder()); _bb.flip(); _is = is; int b = get1U(); if( b==0 ) return; // No persistence info int magic = get1U(); if( b!=0x1C || magic != 0xED ) throw new IllegalArgumentException("Missing magic number 0x1CED at stream start"); String version = getStr(); if( !version.equals(H2O.ABV.projectVersion()) ) throw new IllegalArgumentException("Found version "+version+", but running version "+H2O.ABV.projectVersion()); String[] typeMap = getAStr(); _typeMap = new short[typeMap.length]; for( int i=0; i<typeMap.length; i++ ) _typeMap[i] = (short)(typeMap[i]==null ? 0 : TypeMap.onIce(typeMap[i])); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[AB ").append(_read ? "read " : "write "); sb.append(_firstPage?"first ":"2nd ").append(_h2o); sb.append(" ").append(Value.nameOfPersist(_persist)); if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit()); if( _bb != null ) sb.append(" <= ").append(_bb.capacity()); return sb.append("]").toString(); } // Fetch a DBB from an object pool... they are fairly expensive to make // because a native call is required to get the backing memory. I've // included BB count tracking code to help track leaks. As of 12/17/2012 the // leaks are under control, but figure this may happen again so keeping these // counters around. // // We use 2 pool sizes: lots of small UDP packet-sized buffers and fewer // larger TCP-sized buffers. private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks"); private static long HWM=0; static class BBPool { long _made, _cached, _freed; long _numer, _denom, _goal=4*H2O.NUMCPUS, _lastGoal; final ArrayList<ByteBuffer> _bbs = new ArrayList<>(); final int _size; // Big or small size of ByteBuffers BBPool( int sz) { _size=sz; } private ByteBuffer stats( ByteBuffer bb ) { if( !DEBUG ) return bb; if( ((_made+_cached)&255)!=255 ) return bb; // Filter printing to 1 in 256 long now = System.currentTimeMillis(); if( now < HWM ) return bb; HWM = now+1000; water.util.SB sb = new water.util.SB(); sb.p("BB").p(this==BBP_BIG?1:0).p(" made=").p(_made).p(" -freed=").p(_freed).p(", cache hit=").p(_cached).p(" ratio=").p(_numer/_denom).p(", goal=").p(_goal).p(" cache size=").p(_bbs.size()).nl(); for( int i=0; i<H2O.MAX_PRIORITY; i++ ) { int x = H2O.getWrkQueueSize(i); if( x > 0 ) sb.p('Q').p(i).p('=').p(x).p(' '); } Log.warn(sb.nl().toString()); return bb; } ByteBuffer make() { while( true ) { // Repeat loop for DBB OutOfMemory errors ByteBuffer bb=null; synchronized(_bbs) { int sz = _bbs.size(); if( sz > 0 ) { bb = _bbs.remove(sz-1); _cached++; _numer++; } } if( bb != null ) return stats(bb); // Cache empty; go get one from C/Native memory try { bb = ByteBuffer.allocateDirect(_size).order(ByteOrder.nativeOrder()); synchronized(this) { _made++; _denom++; _goal = Math.max(_goal,_made-_freed); _lastGoal=System.nanoTime(); } // Goal was too low, raise it return stats(bb); } catch( OutOfMemoryError oome ) { // java.lang.OutOfMemoryError: Direct buffer memory if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome; System.out.println("OOM DBB - Sleeping & retrying"); try { Thread.sleep(100); } catch( InterruptedException ignore ) { } } } } void free(ByteBuffer bb) { // Heuristic: keep the ratio of BB's made to cache-hits at a fixed level. // Free to GC if ratio is high, free to internal cache if low. long ratio = _numer/(_denom+1); synchronized(_bbs) { if( ratio < 100 || _bbs.size() < _goal ) { // low hit/miss ratio or below goal bb.clear(); // Clear-before-add _bbs.add(bb); } else _freed++; // Toss the extras (above goal & ratio) long now = System.nanoTime(); if( now-_lastGoal > 1000000000L ) { // Once/sec, drop goal by 10% _lastGoal = now; if( ratio > 110 ) // If ratio is really high, lower goal _goal=Math.max(4*H2O.NUMCPUS,(long)(_goal*0.99)); // Once/sec, lower numer/denom... means more recent activity outweighs really old stuff long denom = (long) (0.99 * _denom); // Proposed reduction if( denom > 10 ) { // Keep a little precision _numer = (long) (0.99 * _numer); // Keep ratio between made & cached the same _denom = denom; // ... by lowering both by 10% } } } } static int FREE( ByteBuffer bb ) { if(bb.isDirect()) (bb.capacity()==BBP_BIG._size ? BBP_BIG : BBP_SML).free(bb); return 0; // Flow coding } } static BBPool BBP_SML = new BBPool( 2*1024); // Bytebuffer "common small size", for UDP static BBPool BBP_BIG = new BBPool(64*1024); // Bytebuffer "common big size", for TCP public static int TCP_BUF_SIZ = BBP_BIG._size; private int bbFree() { if(_bb != null && _bb.isDirect()) BBPool.FREE(_bb); _bb = null; return 0; // Flow-coding } // You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the // time under heavy network load. Connection-reset-by-peer & connection // timeouts abound, even after a socket open and after a 1st successful // ByteBuffer write. It *appears* that the reader is unaware that a writer // was told "go ahead and write" by the TCP stack, so all these fails are // only on the writer-side. public static class AutoBufferException extends RuntimeException { public final IOException _ioe; AutoBufferException( IOException ioe ) { _ioe = ioe; } } // For reads, just assert all was read and close and release resources. // (release ByteBuffer back to the common pool). For writes, force any final // bytes out. If the write is to an H2ONode and is short, send via UDP. // AutoBuffer close calls order; i.e. a reader close() will block until the // writer does a close(). public final int close() { //if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys); if( isClosed() ) return 0; // Already closed assert _h2o != null || _chan != null || _is != null; // Byte-array backed should not be closed try { if( _chan == null ) { // No channel? if( _read ) { if( _is != null ) _is.close(); return 0; } else { // Write // For small-packet write, send via UDP. Since nothing is sent until // now, this close() call trivially orders - since the reader will not // even start (much less close()) until this packet is sent. if( _bb.position() < MTU) return udpSend(); // oops - Big Write, switch to TCP and finish out there } } // Force AutoBuffer 'close' calls to order; i.e. block readers until // writers do a 'close' - by writing 1 more byte in the close-call which // the reader will have to wait for. if( hasTCP()) { // TCP connection? try { if( _read ) { // Reader? int x = get1U(); // Read 1 more byte assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this; assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP // Write the reader-handshake-byte. SocketChannelUtils.underlyingSocketChannel(_chan).socket().getOutputStream().write(0xcd); // do not close actually reader socket; recycle it in TCPReader thread } else { // Writer? put1(0xab); // Write one-more byte ; might set _chan from null to not-null sendPartial(); // Finish partial writes; might set _chan from null to not-null assert _chan != null; // _chan is set not-null now! // Read the writer-handshake-byte. int x = SocketChannelUtils.underlyingSocketChannel(_chan).socket().getInputStream().read(); // either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled). if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read"); assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x; } } catch( IOException ioe ) { try { _chan.close(); } catch( IOException ignore ) {} // Silently close _chan = null; // No channel now, since i/o error throw ioe; // Rethrow after close } finally { if( !_read ) _h2o.freeTCPSocket((ByteChannel) _chan); // Recycle writable TCP channel restorePriority(); // And if we raised priority, lower it back } } else { // FileChannel if( !_read ) sendPartial(); // Finish partial file-system writes _chan.close(); _chan = null; // Closed file channel } } catch( IOException e ) { // Dunno how to handle so crash-n-burn throw new AutoBufferException(e); } finally { bbFree(); _time_close_ms = System.currentTimeMillis(); // TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections assert isClosed(); } return 0; } // Need a sock for a big read or write operation. // See if we got one already, else open a new socket. private void tcpOpen() throws IOException { assert _firstPage && _bb.limit() >= 1+2+4; // At least something written assert _chan == null; // assert _bb.position()==0; _chan = _h2o.getTCPSocket(); raisePriority(); } // Just close the channel here without reading anything. Without the task // object at hand we do not know what (how many bytes) should we read from // the channel. And since the other side will try to read confirmation from // us before closing the channel, we can not read till the end. So we just // close the channel and let the other side to deal with it and figure out // the task has been cancelled (still sending ack ack back). void drainClose() { if( isClosed() ) return; // Already closed final Channel chan = _chan; // Read before closing assert _h2o != null || chan != null; // Byte-array backed should not be closed if( chan != null ) { // Channel assumed sick from prior IOException try { chan.close(); } catch( IOException ignore ) {} // Silently close _chan = null; // No channel now! if( !_read && SocketChannelUtils.isSocketChannel(chan)) _h2o.freeTCPSocket((ByteChannel) chan); // Recycle writable TCP channel } restorePriority(); // And if we raised priority, lower it back bbFree(); _time_close_ms = System.currentTimeMillis(); // TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections assert isClosed(); } // True if we opened a TCP channel, or will open one to close-and-send boolean hasTCP() { assert !isClosed(); return SocketChannelUtils.isSocketChannel(_chan) || (_h2o!=null && _bb.position() >= MTU); } // Size in bytes sent, after a close() int size() { return _size; } //int zeros() { return _zeros; } public int position () { return _bb.position(); } public AutoBuffer position(int p) {_bb.position(p); return this;} /** Skip over some bytes in the byte buffer. Caller is responsible for not * reading off end of the bytebuffer; generally this is easy for * array-backed autobuffers and difficult for i/o-backed bytebuffers. */ public void skip(int skip) { _bb.position(_bb.position()+skip); } // Return byte[] from a writable AutoBuffer public final byte[] buf() { assert _h2o==null && _chan==null && !_read && !_bb.isDirect(); return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position()); } public final byte[] bufClose() { byte[] res = _bb.array(); bbFree(); return res; } // For TCP sockets ONLY, raise the thread priority. We assume we are // blocking other Nodes with our network I/O, so try to get the I/O // over with. private void raisePriority() { if(_oldPrior == -1){ assert SocketChannelUtils.isSocketChannel(_chan); _oldPrior = Thread.currentThread().getPriority(); Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1); } } private void restorePriority() { if( _oldPrior == -1 ) return; Thread.currentThread().setPriority(_oldPrior); _oldPrior = -1; } // Send via UDP socket. Unlike eg TCP sockets, we only need one for sending // so we keep a global one. Also, we do not close it when done, and we do // not connect it up-front to a target - but send the entire packet right now. private int udpSend() throws IOException { assert _chan == null; TimeLine.record_send(this,false); _size = _bb.position(); assert _size < AutoBuffer.BBP_SML._size; _bb.flip(); // Flip for sending if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal water.init.NetworkInit.multicast(_bb, _msg_priority); } else { // Else single-cast send if(H2O.ARGS.useUDP) // Send via UDP directly water.init.NetworkInit.CLOUD_DGRAM.send(_bb, _h2o._key); else // Send via bulk TCP _h2o.sendMessage(_bb, _msg_priority); } return 0; // Flow-coding } // Flip to write-mode AutoBuffer clearForWriting(byte priority) { assert _read; _read = false; _msg_priority = priority; _bb.clear(); _firstPage = true; return this; } // Flip to read-mode public AutoBuffer flipForReading() { assert !_read; _read = true; _bb.flip(); _firstPage = true; return this; } /** Ensure the buffer has space for sz more bytes */ private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; } /** Ensure buffer has at least sz bytes in it. * - Also, set position just past this limit for future reading. */ private ByteBuffer getSz(int sz) { assert _firstPage : "getSz() is only valid for early UDP bytes"; if( sz > _bb.limit() ) getImpl(sz); _bb.position(sz); return _bb; } private ByteBuffer getImpl( int sz ) { assert _read : "Reading from a buffer in write mode"; _bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading // Its got to fit or we asked for too much assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")"; long ns = System.nanoTime(); while( _bb.position() < sz ) { // Read until we got enuf try { int res = readAnInt(); // Read more // Readers are supposed to be strongly typed and read the exact expected bytes. // However, if a TCP connection fails mid-read we'll get a short-read. // This is indistinguishable from a mis-alignment between the writer and reader! if( res <= 0 ) throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this)); if( _is != null ) _bb.position(_bb.position()+res); // Advance BB for Streams manually _size += res; // What we read } catch( IOException e ) { // Dunno how to handle so crash-n-burn // Linux/Ubuntu message for a reset-channel if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") ) throw new AutoBufferException(e); // Windows message for a reset-channel if( e.getMessage().equals("An established connection was aborted by the software in your host machine") ) throw new AutoBufferException(e); throw Log.throwErr(e); } } _time_io_ns += (System.nanoTime()-ns); _bb.flip(); // Prep for handing out bytes //for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++; _firstPage = false; // First page of data is gone gone gone return _bb; } private int readAnInt() throws IOException { if (_is == null) return ((ReadableByteChannel) _chan).read(_bb); final byte[] array = _bb.array(); final int position = _bb.position(); final int remaining = _bb.remaining(); try { return _is.read(array, position, remaining); } catch (IOException ioe) { throw new IOException("Failed reading " + remaining + " bytes into buffer[" + array.length + "] at " + position + " from " + sourceName + " " + _is, ioe); } } /** Put as needed to keep from overflowing the ByteBuffer. */ private ByteBuffer putSp( int sz ) { assert !_read; if (sz > _bb.remaining()) { if ((_h2o == null && _chan == null) || (_bb.hasArray() && _bb.capacity() < BBP_BIG._size)) expandByteBuffer(sz); else sendPartial(); assert sz <= _bb.remaining(); } return _bb; } // Do something with partial results, because the ByteBuffer is full. // If we are doing I/O, ship the bytes we have now and flip the ByteBuffer. private ByteBuffer sendPartial() { // Doing I/O with the full ByteBuffer - ship partial results _size += _bb.position(); if( _chan == null ) TimeLine.record_send(this, true); _bb.flip(); // Prep for writing. try { if( _chan == null ) tcpOpen(); // This is a big operation. Open a TCP socket as-needed. //for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++; long ns = System.nanoTime(); while( _bb.hasRemaining() ) { ((WritableByteChannel) _chan).write(_bb); if( RANDOM_TCP_DROP != null && SocketChannelUtils.isSocketChannel(_chan) && RANDOM_TCP_DROP.nextInt(100) == 0 ) throw new IOException("Random TCP Write Fail"); } _time_io_ns += (System.nanoTime()-ns); } catch( IOException e ) { // Some kind of TCP fail? // Change to an unchecked exception (so we don't have to annotate every // frick'n put1/put2/put4/read/write call). Retry & recovery happens at // a higher level. AutoBuffers are used for many things including e.g. // disk i/o & UDP writes; this exception only happens on a failed TCP // write - and we don't want to make the other AutoBuffer users have to // declare (and then ignore) this exception. throw new AutoBufferException(e); } _firstPage = false; _bb.clear(); return _bb; } // Called when the byte buffer doesn't have enough room // If buffer is array backed, and the needed room is small, // increase the size of the backing array, // otherwise dump into a large direct buffer private ByteBuffer expandByteBuffer(int sizeHint) { final long needed = (long) sizeHint - _bb.remaining() + _bb.capacity(); // Max needed is 2G if ((_h2o==null && _chan == null) || (_bb.hasArray() && needed < MTU)) { if (needed > MAX_ARRAY_SIZE) { throw new IllegalArgumentException("Cannot allocate more than 2GB array: sizeHint="+sizeHint+", " + "needed="+needed + ", bb.remaining()=" + _bb.remaining() + ", bb.capacity()="+_bb.capacity()); } byte[] ary = _bb.array(); // just get twice what is currently needed but not more then max array size (2G) // Be careful not to overflow because of integer math! int newLen = (int) Math.min(1L << (water.util.MathUtils.log2(needed)+1), MAX_ARRAY_SIZE); int oldpos = _bb.position(); _bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newLen),oldpos,newLen-oldpos) .order(ByteOrder.nativeOrder()); } else if (_bb.capacity() != BBP_BIG._size) { //avoid expanding existing BBP items int oldPos = _bb.position(); _bb.flip(); _bb = BBP_BIG.make().put(_bb); _bb.position(oldPos); } return _bb; } @SuppressWarnings("unused") public String getStr(int off, int len) { return new String(_bb.array(), _bb.arrayOffset()+off, len, UTF_8); } // ----------------------------------------------- // Utility functions to get various Java primitives @SuppressWarnings("unused") public boolean getZ() { return get1()!=0; } @SuppressWarnings("unused") public byte get1 () { return getSp(1).get (); } @SuppressWarnings("unused") public int get1U() { return get1() & 0xFF; } @SuppressWarnings("unused") public char get2 () { return getSp(2).getChar (); } @SuppressWarnings("unused") public short get2s () { return getSp(2).getShort (); } @SuppressWarnings("unused") public int get3 () { getSp(3); return get1U() | get1U() << 8 | get1U() << 16; } @SuppressWarnings("unused") public int get4 () { return getSp(4).getInt (); } @SuppressWarnings("unused") public float get4f() { return getSp(4).getFloat (); } @SuppressWarnings("unused") public long get8 () { return getSp(8).getLong (); } @SuppressWarnings("unused") public double get8d() { return getSp(8).getDouble(); } int get1U(int off) { return _bb.get (off)&0xFF; } int get4 (int off) { return _bb.getInt (off); } long get8 (int off) { return _bb.getLong(off); } @SuppressWarnings("unused") public AutoBuffer putZ (boolean b){ return put1(b?1:0); } @SuppressWarnings("unused") public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte"; putSp(1).put((byte)b); return this; } @SuppressWarnings("unused") public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; } @SuppressWarnings("unused") public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; } @SuppressWarnings("unused") public AutoBuffer put2s ( short s) { return put2(s); } @SuppressWarnings("unused") public AutoBuffer put3( int x ) { assert (-1<<24) <= x && x < (1<<24); return put1((x)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16); } @SuppressWarnings("unused") public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; } @SuppressWarnings("unused") public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; } @SuppressWarnings("unused") public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; } @SuppressWarnings("unused") public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; } public AutoBuffer put(Freezable f) { if( f == null ) return putInt(TypeMap.NULL); assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName(); putInt(f.frozenType()); return f.write(this); } public <T extends Freezable> T get() { int id = getInt(); if( id == TypeMap.NULL ) return null; if( _is!=null ) id = _typeMap[id]; return (T)TypeMap.newFreezable(id).read(this); } public <T extends Freezable> T get(Class<T> tc) { int id = getInt(); if( id == TypeMap.NULL ) return null; if( _is!=null ) id = _typeMap[id]; assert tc.isInstance(TypeMap.theFreezable(id)):tc.getName() + " != " + TypeMap.theFreezable(id).getClass().getName() + ", id = " + id; return (T)TypeMap.newFreezable(id).read(this); } // Write Key's target IFF the Key is not null; target can be null. public AutoBuffer putKey(Key k) { if( k==null ) return this; // Key is null ==> write nothing Keyed kd = DKV.getGet(k); put(kd); return kd == null ? this : kd.writeAll_impl(this); } public Keyed getKey(Key k, Futures fs) { return k==null ? null : getKey(fs); // Key is null ==> read nothing } public Keyed getKey(Futures fs) { Keyed kd = get(Keyed.class); if( kd == null ) return null; DKV.put(kd,fs); return kd.readAll_impl(this,fs); } // Put a (compressed) integer. Specifically values in the range -1 to ~250 // will take 1 byte, values near a Short will take 1+2 bytes, values near an // Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is // optimized for small integers (including -1 which is often used as a "array // is null" flag when passing the array length). public AutoBuffer putInt(int x) { if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1); if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x); return put1(254).put4(x); } // Get a (compressed) integer. See above for the compression strategy and reasoning. int getInt( ) { int x = get1U(); if( x <= 253 ) return x-1; if( x==255 ) return (short)get2(); assert x==254; return get4(); } // Put a zero-compressed array. Compression is: // If null : putInt(-1) // Else // putInt(# of leading nulls) // putInt(# of non-nulls) // If # of non-nulls is > 0, putInt( # of trailing nulls) long putZA( Object[] A ) { if( A==null ) { putInt(-1); return 0; } int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break; int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break; putInt(x); // Leading zeros to skip putInt(y-x); // Mixed non-zero guts in middle if( y > x ) // If any trailing nulls putInt(A.length-y); // Trailing zeros return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros } // Get the lengths of a zero-compressed array. // Returns -1 if null. // Returns a long of (leading zeros | middle non-zeros). // If there are non-zeros, caller has to read the trailing zero-length. long getZA( ) { int x=getInt(); // Length of leading zeros if( x == -1 ) return -1; // or a null int nz=getInt(); // Non-zero in the middle return ((long)x<<32)|(long)nz; // Return both ints } // TODO: untested. . . @SuppressWarnings("unused") public AutoBuffer putAEnum(Enum[] enums) { //_arys++; long xy = putZA(enums); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putEnum(enums[i]); return this; } @SuppressWarnings("unused") public <E extends Enum> E[] getAEnum(E[] values) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls E[] ts = (E[]) Array.newInstance(values.getClass().getComponentType(), x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getEnum(values); return ts; } @SuppressWarnings("unused") public AutoBuffer putA(Freezable[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) put(fs[i]); return this; } public AutoBuffer putAA(Freezable[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAAA(Freezable[][][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA(fs[i]); return this; } public <T extends Freezable> T[] getA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[] ts = (T[]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = get(tc); return ts; } public <T extends Freezable> T[][] getAA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass(); T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getA(tc); return ts; } @SuppressWarnings("unused") public <T extends Freezable> T[][][] getAAA(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass(); Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass(); T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc); return ts; } public AutoBuffer putAStr(String[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putStr(fs[i]); return this; } public String[] getAStr() { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls String[] ts = new String[x+y+z]; for( int i = x; i < x+y; ++i ) ts[i] = getStr(); return ts; } @SuppressWarnings("unused") public AutoBuffer putAAStr(String[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAStr(fs[i]); return this; } @SuppressWarnings("unused") public String[][] getAAStr() { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls String[][] ts = new String[x+y+z][]; for( int i = x; i < x+y; ++i ) ts[i] = getAStr(); return ts; } // Read the smaller of _bb.remaining() and len into buf. // Return bytes read, which could be zero. int read( byte[] buf, int off, int len ) { int sz = Math.min(_bb.remaining(),len); _bb.get(buf,off,sz); return sz; } // ----------------------------------------------- // Utility functions to handle common UDP packet tasks. // Get the 1st control byte int getCtrl( ) { return getSz(1).get(0)&0xFF; } // Get the port in next 2 bytes int getPort( ) { return getSz(1+2).getChar(1); } // Get the task# in the next 4 bytes int getTask( ) { return getSz(1+2+4).getInt(1+2); } // Get the flag in the next 1 byte int getFlag( ) { return getSz(1+2+4+1).get(1+2+4); } // Set the ctrl, port, task. Ready to write more bytes afterwards AutoBuffer putUdp (UDP.udp type) { assert _bb.position() == 0; putSp(_bb.position()+1+2); _bb.put ((byte)type.ordinal()); _bb.putChar((char)H2O.H2O_PORT ); // Outgoing port is always the sender's (me) port return this; } AutoBuffer putTask(UDP.udp type, int tasknum) { return putUdp(type).put4(tasknum); } AutoBuffer putTask(int ctrl, int tasknum) { assert _bb.position() == 0; putSp(_bb.position()+1+2+4); _bb.put((byte)ctrl).putChar((char)H2O.H2O_PORT).putInt(tasknum); return this; } // ----------------------------------------------- // Utility functions to read & write arrays public boolean[] getAZ() { int len = getInt(); if (len == -1) return null; boolean[] r = new boolean[len]; for (int i=0;i<len;++i) r[i] = getZ(); return r; } public byte[] getA1( ) { //_arys++; int len = getInt(); return len == -1 ? null : getA1(len); } public byte[] getA1( int len ) { byte[] buf = MemoryManager.malloc1(len); int sofar = 0; while( sofar < len ) { int more = Math.min(_bb.remaining(), len - sofar); _bb.get(buf, sofar, more); sofar += more; if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar)); } return buf; } public short[] getA2( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; short[] buf = MemoryManager.malloc2(len); int sofar = 0; while( sofar < buf.length ) { ShortBuffer as = _bb.asShortBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*2); if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2)); } return buf; } public int[] getA4( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; int[] buf = MemoryManager.malloc4(len); int sofar = 0; while( sofar < buf.length ) { IntBuffer as = _bb.asIntBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*4); if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4)); } return buf; } public float[] getA4f( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; float[] buf = MemoryManager.malloc4f(len); int sofar = 0; while( sofar < buf.length ) { FloatBuffer as = _bb.asFloatBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*4); if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4)); } return buf; } public long[] getA8( ) { //_arys++; // Get the lengths of lead & trailing zero sections, and the non-zero // middle section. int x = getInt(); if( x == -1 ) return null; int y = getInt(); // Non-zero in the middle int z = y==0 ? 0 : getInt();// Trailing zeros long[] buf = MemoryManager.malloc8(x+y+z); switch( get1U() ) { // 1,2,4 or 8 for how the middle section is passed case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1U(); return buf; case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf; case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf; case 8: break; default: throw H2O.fail(); } int sofar = x; while( sofar < x+y ) { LongBuffer as = _bb.asLongBuffer(); int more = Math.min(as.remaining(), x+y - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*8); if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8)); } return buf; } public double[] getA8d( ) { //_arys++; int len = getInt(); if( len == -1 ) return null; double[] buf = MemoryManager.malloc8d(len); int sofar = 0; while( sofar < len ) { DoubleBuffer as = _bb.asDoubleBuffer(); int more = Math.min(as.remaining(), len - sofar); as.get(buf, sofar, more); sofar += more; _bb.position(_bb.position() + as.position()*8); if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8)); } return buf; } @SuppressWarnings("unused") public byte[][] getAA1( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls byte[][] ary = new byte[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA1(); return ary; } @SuppressWarnings("unused") public short[][] getAA2( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls short[][] ary = new short[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA2(); return ary; } public int[][] getAA4( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls int[][] ary = new int[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA4(); return ary; } @SuppressWarnings("unused") public float[][] getAA4f( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls float[][] ary = new float[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA4f(); return ary; } public long[][] getAA8( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls long[][] ary = new long[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA8(); return ary; } @SuppressWarnings("unused") public double[][] getAA8d( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls double[][] ary = new double[x+y+z][]; for( int i=x; i<x+y; i++ ) ary[i] = getA8d(); return ary; } @SuppressWarnings("unused") public int[][][] getAAA4( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls int[][][] ary = new int[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA4(); return ary; } @SuppressWarnings("unused") public long[][][] getAAA8( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls long[][][] ary = new long[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA8(); return ary; } public double[][][] getAAA8d( ) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls double[][][] ary = new double[x+y+z][][]; for( int i=x; i<x+y; i++ ) ary[i] = getAA8d(); return ary; } public String getStr( ) { int len = getInt(); return len == -1 ? null : new String(getA1(len), UTF_8); } public <E extends Enum> E getEnum(E[] values ) { int idx = get1(); return idx == -1 ? null : values[idx]; } public AutoBuffer putAZ( boolean[] ary ) { if( ary == null ) return putInt(-1); putInt(ary.length); for (boolean anAry : ary) putZ(anAry); return this; } public AutoBuffer putA1( byte[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); return putA1(ary,ary.length); } public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); } public AutoBuffer putA1( byte[] ary, int sofar, int length ) { if (length - sofar > _bb.remaining()) expandByteBuffer(length-sofar); while( sofar < length ) { int len = Math.min(length - sofar, _bb.remaining()); _bb.put(ary, sofar, len); sofar += len; if( sofar < length ) sendPartial(); } return this; } AutoBuffer putA2( short[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*2 > _bb.remaining()) expandByteBuffer(ary.length*2); int sofar = 0; while( sofar < ary.length ) { ShortBuffer sb = _bb.asShortBuffer(); int len = Math.min(ary.length - sofar, sb.remaining()); sb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + sb.position()*2); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA4( int[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); // Note: based on Brandon commit this should improve performance during parse (7d950d622ee3037555ecbab0e39404f8f0917652) if (ary.length*4 > _bb.remaining()) { expandByteBuffer(ary.length*4); // Try to expand BB buffer to fit input array } int sofar = 0; while( sofar < ary.length ) { IntBuffer ib = _bb.asIntBuffer(); int len = Math.min(ary.length - sofar, ib.remaining()); ib.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + ib.position()*4); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA8( long[] ary ) { //_arys++; if( ary == null ) return putInt(-1); // Trim leading & trailing zeros. Pass along the length of leading & // trailing zero sections, and the non-zero section in the middle. int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break; int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break; int nzlen = y-x; putInt(x); putInt(nzlen); if( nzlen > 0 ) // If any trailing nulls putInt(ary.length-y); // Trailing zeros // Size trim the NZ section: pass as bytes or shorts if possible. long min=Long.MAX_VALUE, max=Long.MIN_VALUE; for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; } if( 0 <= min && max < 256 ) { // Ship as unsigned bytes put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]); return this; } if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]); return this; } if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]); return this; } put1(8); // Ship as full longs int sofar = x; if ((y-sofar)*8 > _bb.remaining()) expandByteBuffer(ary.length*8); while( sofar < y ) { LongBuffer lb = _bb.asLongBuffer(); int len = Math.min(y - sofar, lb.remaining()); lb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + lb.position() * 8); if( sofar < y ) sendPartial(); } return this; } public AutoBuffer putA4f( float[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*4 > _bb.remaining()) expandByteBuffer(ary.length*4); int sofar = 0; while( sofar < ary.length ) { FloatBuffer fb = _bb.asFloatBuffer(); int len = Math.min(ary.length - sofar, fb.remaining()); fb.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + fb.position()*4); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putA8d( double[] ary ) { //_arys++; if( ary == null ) return putInt(-1); putInt(ary.length); if (ary.length*8 > _bb.remaining()) expandByteBuffer(ary.length*8); int sofar = 0; while( sofar < ary.length ) { DoubleBuffer db = _bb.asDoubleBuffer(); int len = Math.min(ary.length - sofar, db.remaining()); db.put(ary, sofar, len); sofar += len; _bb.position(_bb.position() + db.position()*8); if( sofar < ary.length ) sendPartial(); } return this; } public AutoBuffer putAA1( byte[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA1(ary[i]); return this; } @SuppressWarnings("unused") AutoBuffer putAA2( short[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA2(ary[i]); return this; } public AutoBuffer putAA4( int[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA4(ary[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAA4f( float[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA4f(ary[i]); return this; } public AutoBuffer putAA8( long[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA8(ary[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAA8d( double[][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putA8d(ary[i]); return this; } public AutoBuffer putAAA4( int[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA4(ary[i]); return this; } public AutoBuffer putAAA8( long[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA8(ary[i]); return this; } public AutoBuffer putAAA8d( double[][][] ary ) { //_arys++; long xy = putZA(ary); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAA8d(ary[i]); return this; } // Put a String as bytes (not chars!) public AutoBuffer putStr( String s ) { if( s==null ) return putInt(-1); return putA1(StringUtils.bytesOf(s)); } @SuppressWarnings("unused") public AutoBuffer putEnum( Enum x ) { return put1(x==null ? -1 : x.ordinal()); } public static byte[] javaSerializeWritePojo(Object o) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream out = null; try { out = new ObjectOutputStream(bos); out.writeObject(o); out.close(); return bos.toByteArray(); } catch (IOException e) { throw Log.throwErr(e); } } public static Object javaSerializeReadPojo(byte [] bytes) { try { final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes)); Object o = ois.readObject(); return o; } catch (IOException e) { String className = nameOfClass(bytes); throw Log.throwErr(new RuntimeException("Failed to deserialize " + className, e)); } catch (ClassNotFoundException e) { throw Log.throwErr(e); } } static String nameOfClass(byte[] bytes) { if (bytes == null) return "(null)"; if (bytes.length < 11) return "(no name)"; int nameSize = Math.min(40, Math.max(3, bytes[7])); return new String(bytes, 8, Math.min(nameSize, bytes.length - 8)); } // ========================================================================== // Java Serializable objects // Note: These are heck-a-lot more expensive than their Freezable equivalents. @SuppressWarnings("unused") public AutoBuffer putSer( Object obj ) { if (obj == null) return putA1(null); return putA1(javaSerializeWritePojo(obj)); } @SuppressWarnings("unused") public AutoBuffer putASer(Object[] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putSer(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAASer(Object[][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putASer(fs[i]); return this; } @SuppressWarnings("unused") public AutoBuffer putAAASer(Object[][][] fs) { //_arys++; long xy = putZA(fs); if( xy == -1 ) return this; int x=(int)(xy>>32); int y=(int)xy; for( int i=x; i<x+y; i++ ) putAASer(fs[i]); return this; } @SuppressWarnings("unused") public Object getSer() { byte[] ba = getA1(); return ba == null ? null : javaSerializeReadPojo(ba); } @SuppressWarnings("unused") public <T> T getSer(Class<T> tc) { return (T)getSer(); } @SuppressWarnings("unused") public <T> T[] getASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[] ts = (T[]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getSer(tc); return ts; } @SuppressWarnings("unused") public <T> T[][] getAASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[][] ts = (T[][]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getASer(tc); return ts; } @SuppressWarnings("unused") public <T> T[][][] getAAASer(Class<T> tc) { //_arys++; long xy = getZA(); if( xy == -1 ) return null; int x=(int)(xy>>32); // Leading nulls int y=(int)xy; // Middle non-zeros int z = y==0 ? 0 : getInt(); // Trailing nulls T[][][] ts = (T[][][]) Array.newInstance(tc, x+y+z); for( int i = x; i < x+y; ++i ) ts[i] = getAASer(tc); return ts; } // ========================================================================== // JSON AutoBuffer printers public AutoBuffer putJNULL( ) { return put1('n').put1('u').put1('l').put1('l'); } // Escaped JSON string private AutoBuffer putJStr( String s ) { byte[] b = StringUtils.bytesOf(s); int off=0; for( int i=0; i<b.length; i++ ) { if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes putA1(b,off,i); // Everything so far (no backslashes) put1('\\'); // The extra backslash off=i; // Advance the "so far" variable } // Handle remaining special cases in JSON // if( b[i] == '/' ) { putA1(b,off,i); put1('\\'); put1('/'); off=i+1; continue;} if( b[i] == '\b' ) { putA1(b,off,i); put1('\\'); put1('b'); off=i+1; continue;} if( b[i] == '\f' ) { putA1(b,off,i); put1('\\'); put1('f'); off=i+1; continue;} if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; continue;} if( b[i] == '\r' ) { putA1(b,off,i); put1('\\'); put1('r'); off=i+1; continue;} if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; continue;} // ASCII Control characters if( b[i] == 127 ) { putA1(b,off,i); put1('\\'); put1('u'); put1('0'); put1('0'); put1('7'); put1('f'); off=i+1; continue;} if( b[i] >= 0 && b[i] < 32 ) { String hexStr = Integer.toHexString(b[i]); putA1(b, off, i); put1('\\'); put1('u'); for (int j = 0; j < 4 - hexStr.length(); j++) put1('0'); for (int j = 0; j < hexStr.length(); j++) put1(hexStr.charAt(hexStr.length()-j-1)); off=i+1; } } return putA1(b,off,b.length); } public AutoBuffer putJSONStrUnquoted ( String s ) { return s==null ? putJNULL() : putJStr(s); } public AutoBuffer putJSONStrUnquoted ( String name, String s ) { return s==null ? putJSONStr(name).put1(':').putJNULL() : putJSONStr(name).put1(':').putJStr(s); } public AutoBuffer putJSONName( String s ) { return put1('"').putJStr(s).put1('"'); } public AutoBuffer putJSONStr ( String s ) { return s==null ? putJNULL() : putJSONName(s); } public AutoBuffer putJSONAStr(String[] ss) { if( ss == null ) return putJNULL(); put1('['); for( int i=0; i<ss.length; i++ ) { if( i>0 ) put1(','); putJSONStr(ss[i]); } return put1(']'); } private AutoBuffer putJSONAAStr( String[][] sss) { if( sss == null ) return putJNULL(); put1('['); for( int i=0; i<sss.length; i++ ) { if( i>0 ) put1(','); putJSONAStr(sss[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSONStr (String name, String s ) { return putJSONStr(name).put1(':').putJSONStr(s); } @SuppressWarnings("unused") public AutoBuffer putJSONAStr (String name, String[] ss ) { return putJSONStr(name).put1(':').putJSONAStr(ss); } @SuppressWarnings("unused") public AutoBuffer putJSONAAStr(String name, String[][]sss) { return putJSONStr(name).put1(':').putJSONAAStr(sss); } @SuppressWarnings("unused") public AutoBuffer putJSONSer (String name, Object o ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONASer (String name, Object[] oo ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONAASer (String name, Object[][] ooo ) { return putJSONStr(name).put1(':').putJNULL(); } @SuppressWarnings("unused") public AutoBuffer putJSONAAASer(String name, Object[][][] oooo) { return putJSONStr(name).put1(':').putJNULL(); } public AutoBuffer putJSONAZ( String name, boolean[] f) { return putJSONStr(name).put1(':').putJSONAZ(f); } public AutoBuffer putJSON(Freezable ice) { return ice == null ? putJNULL() : ice.writeJSON(this); } public AutoBuffer putJSONA( Freezable fs[] ) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSON(fs[i]); } return put1(']'); } public AutoBuffer putJSONAA( Freezable fs[][]) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSONA(fs[i]); } return put1(']'); } public AutoBuffer putJSONAAA( Freezable fs[][][]) { if( fs == null ) return putJNULL(); put1('['); for( int i=0; i<fs.length; i++ ) { if( i>0 ) put1(','); putJSONAA(fs[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSON ( String name, Freezable f ) { return putJSONStr(name).put1(':').putJSON (f); } public AutoBuffer putJSONA ( String name, Freezable f[] ) { return putJSONStr(name).put1(':').putJSONA (f); } @SuppressWarnings("unused") public AutoBuffer putJSONAA( String name, Freezable f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); } @SuppressWarnings("unused") public AutoBuffer putJSONAAA( String name, Freezable f[][][]){ return putJSONStr(name).put1(':').putJSONAAA(f); } @SuppressWarnings("unused") public AutoBuffer putJSONZ( String name, boolean value ) { return putJSONStr(name).put1(':').putJStr("" + value); } private AutoBuffer putJSONAZ(boolean [] b) { if (b == null) return putJNULL(); put1('['); for( int i = 0; i < b.length; ++i) { if (i > 0) put1(','); putJStr(""+b[i]); } return put1(']'); } // Most simple integers private AutoBuffer putJInt( int i ) { byte b[] = StringUtils.toBytes(i); return putA1(b,b.length); } public AutoBuffer putJSON1( byte b ) { return putJInt(b); } public AutoBuffer putJSONA1( byte ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON1(ary[i]); } return put1(']'); } private AutoBuffer putJSONAA1(byte ary[][]) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONA1(ary[i]); } return put1(']'); } @SuppressWarnings("unused") public AutoBuffer putJSON1 (String name, byte b ) { return putJSONStr(name).put1(':').putJSON1(b); } @SuppressWarnings("unused") public AutoBuffer putJSONA1 (String name, byte b[] ) { return putJSONStr(name).put1(':').putJSONA1(b); } @SuppressWarnings("unused") public AutoBuffer putJSONAA1(String name, byte b[][]) { return putJSONStr(name).put1(':').putJSONAA1(b); } public AutoBuffer putJSONAEnum(String name, Enum[] enums) { return putJSONStr(name).put1(':').putJSONAEnum(enums); } public AutoBuffer putJSONAEnum( Enum[] enums ) { if( enums == null ) return putJNULL(); put1('['); for( int i=0; i<enums.length; i++ ) { if( i>0 ) put1(','); putJSONEnum(enums[i]); } return put1(']'); } AutoBuffer putJSON2( char c ) { return putJSON4(c); } AutoBuffer putJSON2( String name, char c ) { return putJSONStr(name).put1(':').putJSON2(c); } AutoBuffer putJSON2( short c ) { return putJSON4(c); } AutoBuffer putJSON2( String name, short c ) { return putJSONStr(name).put1(':').putJSON2(c); } public AutoBuffer putJSONA2( String name, short ary[] ) { return putJSONStr(name).put1(':').putJSONA2(ary); } AutoBuffer putJSONA2( short ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON2(ary[i]); } return put1(']'); } AutoBuffer putJSON8 ( long l ) { return putJStr(Long.toString(l)); } AutoBuffer putJSONA8( long ary[] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSON8(ary[i]); } return put1(']'); } AutoBuffer putJSONAA8( long ary[][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONA8(ary[i]); } return put1(']'); } AutoBuffer putJSONAAA8( long ary[][][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONAA8(ary[i]); } return put1(']'); } AutoBuffer putJSONEnum( Enum e ) { return e==null ? putJNULL() : put1('"').putJStr(e.toString()).put1('"'); } public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); } public AutoBuffer putJSONEnum( String name, Enum e ) { return putJSONStr(name).put1(':').putJSONEnum(e); } public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); } public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); } public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); } public AutoBuffer putJSON4(int i) { return putJStr(Integer.toString(i)); } AutoBuffer putJSONA4( int[] a) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON4(a[i]); } return put1(']'); } AutoBuffer putJSONAA4( int[][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA4(a[i]); } return put1(']'); } AutoBuffer putJSONAAA4( int[][][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONAA4(a[i]); } return put1(']'); } public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); } public AutoBuffer putJSONA4( String name, int[] a) { return putJSONStr(name).put1(':').putJSONA4(a); } public AutoBuffer putJSONAA4( String name, int[][] a ) { return putJSONStr(name).put1(':').putJSONAA4(a); } public AutoBuffer putJSONAAA4( String name, int[][][] a ) { return putJSONStr(name).put1(':').putJSONAAA4(a); } AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putJStr(Float .toString(f)))); } public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); } AutoBuffer putJSONA4f( float[] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON4f(a[i]); } return put1(']'); } public AutoBuffer putJSONA4f(String name, float[] a) { putJSONStr(name).put1(':'); return putJSONA4f(a); } AutoBuffer putJSONAA4f(String name, float[][] a) { putJSONStr(name).put1(':'); if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA4f(a[i]); } return put1(']'); } AutoBuffer putJSON8d( double d ) { if (TwoDimTable.isEmpty(d)) return putJNULL(); return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putJStr(Double.toString(d)))); } public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); } public AutoBuffer putJSONA8d( String name, double[] a ) { return putJSONStr(name).put1(':').putJSONA8d(a); } public AutoBuffer putJSONAA8d( String name, double[][] a) { return putJSONStr(name).put1(':').putJSONAA8d(a); } public AutoBuffer putJSONAAA8d( String name, double[][][] a) { return putJSONStr(name).put1(':').putJSONAAA8d(a); } public AutoBuffer putJSONA8d( double[] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSON8d(a[i]); } return put1(']'); } public AutoBuffer putJSONAA8d( double[][] a ) { if( a == null ) return putJNULL(); put1('['); for( int i=0; i<a.length; i++ ) { if( i>0 ) put1(','); putJSONA8d(a[i]); } return put1(']'); } AutoBuffer putJSONAAA8d( double ary[][][] ) { if( ary == null ) return putJNULL(); put1('['); for( int i=0; i<ary.length; i++ ) { if( i>0 ) put1(','); putJSONAA8d(ary[i]); } return put1(']'); } static final String JSON_NAN = "NaN"; static final String JSON_POS_INF = "Infinity"; static final String JSON_NEG_INF = "-Infinity"; }
mathemage/h2o-3
h2o-core/src/main/java/water/AutoBuffer.java
Java
apache-2.0
74,621
/* * Copyright (c) 2010-2013 Evolveum * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.evolveum.midpoint.model.impl.lens; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import javax.xml.namespace.QName; import com.evolveum.midpoint.prism.*; import com.evolveum.midpoint.schema.DeltaConvertor; import com.evolveum.midpoint.schema.result.OperationResult; import com.evolveum.midpoint.util.exception.*; import com.evolveum.midpoint.xml.ns._public.model.model_context_3.LensProjectionContextType; import org.apache.commons.lang.StringUtils; import org.jvnet.jaxb2_commons.lang.Validate; import com.evolveum.midpoint.common.crypto.CryptoUtil; import com.evolveum.midpoint.common.refinery.RefinedObjectClassDefinition; import com.evolveum.midpoint.common.refinery.RefinedResourceSchema; import com.evolveum.midpoint.common.refinery.ResourceShadowDiscriminator; import com.evolveum.midpoint.model.api.context.ModelProjectionContext; import com.evolveum.midpoint.model.api.context.SynchronizationPolicyDecision; import com.evolveum.midpoint.prism.delta.ChangeType; import com.evolveum.midpoint.prism.delta.DeltaSetTriple; import com.evolveum.midpoint.prism.delta.ObjectDelta; import com.evolveum.midpoint.prism.delta.PrismValueDeltaSetTriple; import com.evolveum.midpoint.prism.delta.ReferenceDelta; import com.evolveum.midpoint.prism.path.ItemPath; import com.evolveum.midpoint.schema.processor.ResourceAttribute; import com.evolveum.midpoint.schema.processor.ResourceSchema; import com.evolveum.midpoint.schema.util.MiscSchemaUtil; import com.evolveum.midpoint.schema.util.ShadowUtil; import com.evolveum.midpoint.schema.util.ResourceTypeUtil; import com.evolveum.midpoint.schema.util.SchemaDebugUtil; import com.evolveum.midpoint.util.Cloner; import com.evolveum.midpoint.util.DebugUtil; import com.evolveum.midpoint.xml.ns._public.common.common_3.AssignmentPolicyEnforcementType; import com.evolveum.midpoint.xml.ns._public.common.common_3.FocusType; import com.evolveum.midpoint.xml.ns._public.common.common_3.LayerType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType; import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultStatusType; import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ProjectionPolicyType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDefinitionType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDependencyType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowAssociationType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowDiscriminatorType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowKindType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowType; import com.evolveum.midpoint.xml.ns._public.common.common_3.SynchronizationSituationType; import com.evolveum.midpoint.xml.ns._public.common.common_3.ValuePolicyType; /** * @author semancik * */ public class LensProjectionContext extends LensElementContext<ShadowType> implements ModelProjectionContext { private ObjectDelta<ShadowType> syncDelta; /** * If set to true: absolute state of this projection was detected by the synchronization. * This is mostly for debugging and visibility. It is not used by projection logic. */ private boolean syncAbsoluteTrigger = false; /** * The wave in which this resource should be processed. Initial value of -1 means "undetermined". */ private int wave = -1; /** * Indicates that the wave computation is still in progress. */ private transient boolean waveIncomplete = false; /** * Definition of account type. */ private ResourceShadowDiscriminator resourceShadowDiscriminator; private boolean fullShadow = false; /** * True if the account is "legal" (assigned to the user). It may be false for accounts that are either * found to be illegal by live sync, were unassigned from user, etc. * If set to null the situation is not yet known. Null is a typical value when the context is constructed. */ private boolean isAssigned; /** * True if the account should be part of the synchronization. E.g. outbound expression should be applied to it. */ private boolean isActive; /** * True if there is a valid assignment for this projection and/or the policy allows such project to exist. */ private Boolean isLegal = null; private Boolean isLegalOld = null; private boolean isExists; /** * Decision regarding the account. It indicated what the engine has DECIDED TO DO with the context. * If set to null no decision was made yet. Null is also a typical value when the context is created. */ private SynchronizationPolicyDecision synchronizationPolicyDecision; /** * True if we want to reconcile account in this context. */ private boolean doReconciliation; /** * Synchronization situation as it was originally detected by the synchronization code (SynchronizationService). * This is mostly for debug purposes. Projector and clockwork do not need to care about this. * The synchronization intent is used instead. */ private SynchronizationSituationType synchronizationSituationDetected = null; /** * Synchronization situation which was the result of synchronization reaction (projector and clockwork run). * This is mostly for debug purposes. Projector and clockwork do not care about this (except for setting it). * The synchronization decision is used instead. */ private SynchronizationSituationType synchronizationSituationResolved = null; /** * Delta set triple for accounts. Specifies which accounts should be added, removed or stay as they are. * It tells almost nothing about attributes directly although the information about attributes are inside * each account construction (in a form of ValueConstruction that contains attribute delta triples). * * Intermediary computation result. It is stored to allow re-computing of account constructions during * iterative computations. */ private transient PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple; private transient Construction outboundConstruction; private transient Collection<ResourceObjectTypeDependencyType> dependencies = null; private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes; private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations; private ValuePolicyType accountPasswordPolicy; /** * Resource that hosts this projection. */ transient private ResourceType resource; LensProjectionContext(LensContext<? extends ObjectType> lensContext, ResourceShadowDiscriminator resourceAccountType) { super(ShadowType.class, lensContext); this.resourceShadowDiscriminator = resourceAccountType; this.isAssigned = false; } public ObjectDelta<ShadowType> getSyncDelta() { return syncDelta; } public void setSyncDelta(ObjectDelta<ShadowType> syncDelta) { this.syncDelta = syncDelta; } public boolean isSyncAbsoluteTrigger() { return syncAbsoluteTrigger; } public void setSyncAbsoluteTrigger(boolean syncAbsoluteTrigger) { this.syncAbsoluteTrigger = syncAbsoluteTrigger; } public int getWave() { return wave; } public void setWave(int wave) { this.wave = wave; } public boolean isWaveIncomplete() { return waveIncomplete; } public void setWaveIncomplete(boolean waveIncomplete) { this.waveIncomplete = waveIncomplete; } public boolean isDoReconciliation() { return doReconciliation; } public void setDoReconciliation(boolean doReconciliation) { this.doReconciliation = doReconciliation; } public ResourceShadowDiscriminator getResourceShadowDiscriminator() { return resourceShadowDiscriminator; } public void setResourceShadowDiscriminator(ResourceShadowDiscriminator resourceShadowDiscriminator) { this.resourceShadowDiscriminator = resourceShadowDiscriminator; } public boolean compareResourceShadowDiscriminator(ResourceShadowDiscriminator rsd, boolean compareOrder) { Validate.notNull(rsd.getResourceOid()); if (resourceShadowDiscriminator == null) { // This may be valid case e.g. in case of broken contexts or if a context is just loading return false; } if (!rsd.getResourceOid().equals(resourceShadowDiscriminator.getResourceOid())) { return false; } if (!rsd.getKind().equals(resourceShadowDiscriminator.getKind())) { return false; } if (rsd.isThombstone() != resourceShadowDiscriminator.isThombstone()) { return false; } if (rsd.getIntent() == null) { try { if (!getRefinedAccountDefinition().isDefaultInAKind()) { return false; } } catch (SchemaException e) { throw new SystemException("Internal error: "+e.getMessage(), e); } } else if (!rsd.getIntent().equals(resourceShadowDiscriminator.getIntent())) { return false; } if (compareOrder && rsd.getOrder() != resourceShadowDiscriminator.getOrder()) { return false; } return true; } public boolean isThombstone() { if (resourceShadowDiscriminator == null) { return false; } return resourceShadowDiscriminator.isThombstone(); } public void addAccountSyncDelta(ObjectDelta<ShadowType> delta) throws SchemaException { if (syncDelta == null) { syncDelta = delta; } else { syncDelta.merge(delta); } } public boolean isAdd() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) { return true; } else if (synchronizationPolicyDecision != null){ return false; } return super.isAdd(); } public boolean isModify() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.KEEP) { return true; } else if (synchronizationPolicyDecision != null){ return false; } return super.isModify(); } public boolean isDelete() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.DELETE) { return true; } else if (synchronizationPolicyDecision != null){ return false; } if (syncDelta != null && syncDelta.isDelete()) { return true; } return super.isDelete(); } public ResourceType getResource() { return resource; } public void setResource(ResourceType resource) { this.resource = resource; } public boolean isAssigned() { return isAssigned; } public void setAssigned(boolean isAssigned) { this.isAssigned = isAssigned; } public boolean isActive() { return isActive; } public void setActive(boolean isActive) { this.isActive = isActive; } public Boolean isLegal() { return isLegal; } public void setLegal(Boolean isLegal) { this.isLegal = isLegal; } public Boolean isLegalOld() { return isLegalOld; } public void setLegalOld(Boolean isLegalOld) { this.isLegalOld = isLegalOld; } public boolean isExists() { return isExists; } public void setExists(boolean exists) { this.isExists = exists; } public SynchronizationPolicyDecision getSynchronizationPolicyDecision() { return synchronizationPolicyDecision; } public void setSynchronizationPolicyDecision(SynchronizationPolicyDecision policyDecision) { this.synchronizationPolicyDecision = policyDecision; } public SynchronizationSituationType getSynchronizationSituationDetected() { return synchronizationSituationDetected; } public void setSynchronizationSituationDetected( SynchronizationSituationType synchronizationSituationDetected) { this.synchronizationSituationDetected = synchronizationSituationDetected; } public SynchronizationSituationType getSynchronizationSituationResolved() { return synchronizationSituationResolved; } public void setSynchronizationSituationResolved( SynchronizationSituationType synchronizationSituationResolved) { this.synchronizationSituationResolved = synchronizationSituationResolved; } public boolean isFullShadow() { return fullShadow; } /** * Returns true if full shadow is available, either loaded or in a create delta. */ public boolean hasFullShadow() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) { return true; } return isFullShadow(); } public void setFullShadow(boolean fullShadow) { this.fullShadow = fullShadow; } public ShadowKindType getKind() { ResourceShadowDiscriminator discr = getResourceShadowDiscriminator(); if (discr != null) { return discr.getKind(); } if (getObjectOld()!=null) { return getObjectOld().asObjectable().getKind(); } if (getObjectCurrent()!=null) { return getObjectCurrent().asObjectable().getKind(); } if (getObjectNew()!=null) { return getObjectNew().asObjectable().getKind(); } return ShadowKindType.ACCOUNT; } public PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> getConstructionDeltaSetTriple() { return constructionDeltaSetTriple; } public void setConstructionDeltaSetTriple( PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple) { this.constructionDeltaSetTriple = constructionDeltaSetTriple; } public Construction getOutboundConstruction() { return outboundConstruction; } public void setOutboundConstruction(Construction outboundConstruction) { this.outboundConstruction = outboundConstruction; } public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> getSqueezedAttributes() { return squeezedAttributes; } public void setSqueezedAttributes(Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes) { this.squeezedAttributes = squeezedAttributes; } public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> getSqueezedAssociations() { return squeezedAssociations; } public void setSqueezedAssociations( Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations) { this.squeezedAssociations = squeezedAssociations; } public ResourceObjectTypeDefinitionType getResourceObjectTypeDefinitionType() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) { return null; } ResourceObjectTypeDefinitionType def = ResourceTypeUtil.getResourceObjectTypeDefinitionType( resource, getResourceShadowDiscriminator().getKind(), resourceShadowDiscriminator.getIntent()); return def; } private ResourceSchema getResourceSchema() throws SchemaException { return RefinedResourceSchema.getResourceSchema(resource, getNotNullPrismContext()); } public RefinedResourceSchema getRefinedResourceSchema() throws SchemaException { if (resource == null) { return null; } return RefinedResourceSchema.getRefinedSchema(resource, LayerType.MODEL, getNotNullPrismContext()); } public RefinedObjectClassDefinition getRefinedAccountDefinition() throws SchemaException { RefinedResourceSchema refinedSchema = getRefinedResourceSchema(); if (refinedSchema == null) { return null; } return refinedSchema.getRefinedDefinition(getResourceShadowDiscriminator().getKind(), getResourceShadowDiscriminator().getIntent()); } public Collection<ResourceObjectTypeDependencyType> getDependencies() { if (dependencies == null) { ResourceObjectTypeDefinitionType resourceAccountTypeDefinitionType = getResourceObjectTypeDefinitionType(); if (resourceAccountTypeDefinitionType == null) { // No dependencies. But we cannot set null as that means "unknown". So let's set empty collection instead. dependencies = new ArrayList<ResourceObjectTypeDependencyType>(); } else { dependencies = resourceAccountTypeDefinitionType.getDependency(); } } return dependencies; } public ValuePolicyType getAccountPasswordPolicy() { return accountPasswordPolicy; } public void setAccountPasswordPolicy(ValuePolicyType accountPasswordPolicy) { this.accountPasswordPolicy = accountPasswordPolicy; } public ValuePolicyType getEffectivePasswordPolicy() { if (accountPasswordPolicy != null) { return accountPasswordPolicy; } if (getLensContext().getFocusContext().getOrgPasswordPolicy() != null){ return getLensContext().getFocusContext().getOrgPasswordPolicy(); } return getLensContext().getGlobalPasswordPolicy(); } public AssignmentPolicyEnforcementType getAssignmentPolicyEnforcementType() { // TODO: per-resource assignment enforcement ResourceType resource = getResource(); ProjectionPolicyType globalAccountSynchronizationSettings = null; if (resource != null){ globalAccountSynchronizationSettings = resource.getProjection(); } if (globalAccountSynchronizationSettings == null) { globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings(); } AssignmentPolicyEnforcementType globalAssignmentPolicyEnforcement = MiscSchemaUtil.getAssignmentPolicyEnforcementType(globalAccountSynchronizationSettings); return globalAssignmentPolicyEnforcement; } public boolean isLegalize(){ ResourceType resource = getResource(); ProjectionPolicyType globalAccountSynchronizationSettings = null; if (resource != null){ globalAccountSynchronizationSettings = resource.getProjection(); } if (globalAccountSynchronizationSettings == null) { globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings(); } if (globalAccountSynchronizationSettings == null){ return false; } if (globalAccountSynchronizationSettings.isLegalize() == null){ return false; } return globalAccountSynchronizationSettings.isLegalize(); } /** * Recomputes the new state of account (accountNew). It is computed by applying deltas to the old state (accountOld). * Assuming that oldAccount is already set (or is null if it does not exist) */ public void recompute() throws SchemaException { ObjectDelta<ShadowType> accDelta = getDelta(); PrismObject<ShadowType> base = getObjectCurrent(); if (base == null) { base = getObjectOld(); } ObjectDelta<ShadowType> syncDelta = getSyncDelta(); if (base == null && syncDelta != null && ChangeType.ADD.equals(syncDelta.getChangeType())) { PrismObject<ShadowType> objectToAdd = syncDelta.getObjectToAdd(); if (objectToAdd != null) { PrismObjectDefinition<ShadowType> objectDefinition = objectToAdd.getDefinition(); // TODO: remove constructor, use some factory method instead base = new PrismObject<ShadowType>(objectToAdd.getElementName(), objectDefinition, getNotNullPrismContext()); base = syncDelta.computeChangedObject(base); } } if (accDelta == null) { // No change setObjectNew(base); return; } if (base == null && accDelta.isModify()) { RefinedObjectClassDefinition rAccountDef = getRefinedAccountDefinition(); if (rAccountDef != null) { base = (PrismObject<ShadowType>) rAccountDef.createBlankShadow(); } } setObjectNew(accDelta.computeChangedObject(base)); } public void clearIntermediateResults() { constructionDeltaSetTriple = null; outboundConstruction = null; squeezedAttributes = null; } /** * Distribute the resource that's in the context into all the prism objects (old, new) and deltas. * The resourceRef will not just contain the OID but also full resource object. This may optimize handling * of the objects in upper layers (e.g. GUI). */ public void distributeResource() { ResourceType resourceType = getResource(); if (resourceType == null) { return; } PrismObject<ResourceType> resource = resourceType.asPrismObject(); distributeResourceObject(getObjectOld(), resource); distributeResourceObject(getObjectCurrent(), resource); distributeResourceObject(getObjectNew(), resource); distributeResourceDelta(getPrimaryDelta(), resource); distributeResourceDelta(getSecondaryDelta(), resource); } private void distributeResourceObject(PrismObject<ShadowType> object, PrismObject<ResourceType> resource) { if (object == null) { return; } PrismReference resourceRef = object.findReference(ShadowType.F_RESOURCE_REF); if (resourceRef != null) { distributeResourceValues(resourceRef.getValues(), resource); } } private void distributeResourceValue(PrismReferenceValue resourceRefVal, PrismObject<ResourceType> resource) { if (resourceRefVal != null) { resourceRefVal.setObject(resource); } } private void distributeResourceDelta(ObjectDelta<ShadowType> delta, PrismObject<ResourceType> resource) { if (delta == null) { return; } if (delta.isAdd()) { distributeResourceObject(delta.getObjectToAdd(), resource); } else if (delta.isModify()) { ReferenceDelta referenceDelta = delta.findReferenceModification(ShadowType.F_RESOURCE_REF); if (referenceDelta != null) { distributeResourceValues(referenceDelta.getValuesToAdd(), resource); distributeResourceValues(referenceDelta.getValuesToDelete(), resource); distributeResourceValues(referenceDelta.getValuesToReplace(), resource); } } // Nothing to do for DELETE delta } private void distributeResourceValues(Collection<PrismReferenceValue> values, PrismObject<ResourceType> resource) { if (values == null) { return; } for(PrismReferenceValue pval: values) { distributeResourceValue(pval, resource); } } /** * Returns delta suitable for execution. The primary and secondary deltas may not make complete sense all by themselves. * E.g. they may both be MODIFY deltas even in case that the account should be created. The deltas begin to make sense * only if combined with sync decision. This method provides the deltas all combined and ready for execution. */ public ObjectDelta<ShadowType> getExecutableDelta() throws SchemaException { SynchronizationPolicyDecision policyDecision = getSynchronizationPolicyDecision(); ObjectDelta<ShadowType> origDelta = getDelta(); if (policyDecision == SynchronizationPolicyDecision.ADD) { if (origDelta == null || origDelta.isModify()) { // We need to convert modify delta to ADD ObjectDelta<ShadowType> addDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(), ChangeType.ADD, getPrismContext()); RefinedObjectClassDefinition rAccount = getRefinedAccountDefinition(); if (rAccount == null) { throw new IllegalStateException("Definition for account type " + getResourceShadowDiscriminator() + " not found in the context, but it should be there"); } PrismObject<ShadowType> newAccount = (PrismObject<ShadowType>) rAccount.createBlankShadow(); addDelta.setObjectToAdd(newAccount); if (origDelta != null) { addDelta.merge(origDelta); } return addDelta; } } else if (policyDecision == SynchronizationPolicyDecision.KEEP) { // Any delta is OK } else if (policyDecision == SynchronizationPolicyDecision.DELETE) { ObjectDelta<ShadowType> deleteDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(), ChangeType.DELETE, getPrismContext()); String oid = getOid(); if (oid == null) { throw new IllegalStateException( "Internal error: account context OID is null during attempt to create delete secondary delta; context=" +this); } deleteDelta.setOid(oid); return deleteDelta; } else { // This is either UNLINK or null, both are in fact the same as KEEP // Any delta is OK } return origDelta; } public void checkConsistence() { checkConsistence(null, true, false); } public void checkConsistence(String contextDesc, boolean fresh, boolean force) { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) { // No not check these. they may be quite wild. return; } super.checkConsistence(contextDesc); if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) { return; } if (fresh && !force) { if (resource == null) { throw new IllegalStateException("Null resource in "+this + (contextDesc == null ? "" : " in " +contextDesc)); } if (resourceShadowDiscriminator == null) { throw new IllegalStateException("Null resource account type in "+this + (contextDesc == null ? "" : " in " +contextDesc)); } } if (syncDelta != null) { try { syncDelta.checkConsistence(true, true, true); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e); } catch (IllegalStateException e) { throw new IllegalStateException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e); } } } protected boolean isRequireSecondardyDeltaOid() { if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD || synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN || synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) { return false; } if (getResourceShadowDiscriminator() != null && getResourceShadowDiscriminator().getOrder() > 0) { // These may not have the OID yet return false; } return super.isRequireSecondardyDeltaOid(); } @Override public void cleanup() { super.cleanup(); synchronizationPolicyDecision = null; // isLegal = null; // isLegalOld = null; isAssigned = false; isActive = false; } @Override public void normalize() { super.normalize(); if (syncDelta != null) { syncDelta.normalize(); } } @Override public void reset() { super.reset(); wave = -1; fullShadow = false; isAssigned = false; isActive = false; synchronizationPolicyDecision = null; constructionDeltaSetTriple = null; outboundConstruction = null; dependencies = null; squeezedAttributes = null; accountPasswordPolicy = null; } @Override public void adopt(PrismContext prismContext) throws SchemaException { super.adopt(prismContext); if (syncDelta != null) { prismContext.adopt(syncDelta); } } @Override public LensProjectionContext clone(LensContext<? extends ObjectType> lensContext) { LensProjectionContext clone = new LensProjectionContext(lensContext, resourceShadowDiscriminator); copyValues(clone, lensContext); return clone; } protected void copyValues(LensProjectionContext clone, LensContext<? extends ObjectType> lensContext) { super.copyValues(clone, lensContext); // do NOT clone transient values such as accountConstructionDeltaSetTriple // these are not meant to be cloned and they are also not directly clonnable clone.dependencies = this.dependencies; clone.doReconciliation = this.doReconciliation; clone.fullShadow = this.fullShadow; clone.isAssigned = this.isAssigned; clone.outboundConstruction = this.outboundConstruction; clone.synchronizationPolicyDecision = this.synchronizationPolicyDecision; clone.resource = this.resource; clone.resourceShadowDiscriminator = this.resourceShadowDiscriminator; clone.squeezedAttributes = cloneSqueezedAttributes(); if (this.syncDelta != null) { clone.syncDelta = this.syncDelta.clone(); } clone.wave = this.wave; } private Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> cloneSqueezedAttributes() { if (squeezedAttributes == null) { return null; } Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> clonedMap = new HashMap<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>>(); Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>> cloner = new Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>>() { @Override public ItemValueWithOrigin<PrismPropertyValue<?>> clone(ItemValueWithOrigin<PrismPropertyValue<?>> original) { return original.clone(); } }; for (Entry<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> entry: squeezedAttributes.entrySet()) { clonedMap.put(entry.getKey(), entry.getValue().clone(cloner)); } return clonedMap; } /** * Returns true if the projection has any value for specified attribute. */ public boolean hasValueForAttribute(QName attributeName) throws SchemaException { ItemPath attrPath = new ItemPath(ShadowType.F_ATTRIBUTES, attributeName); if (getObjectNew() != null) { PrismProperty<?> attrNew = getObjectNew().findProperty(attrPath); if (attrNew != null && !attrNew.isEmpty()) { return true; } } return false; } private boolean hasValueForAttribute(QName attributeName, Collection<PrismPropertyValue<Construction>> acPpvSet) { if (acPpvSet == null) { return false; } for (PrismPropertyValue<Construction> acPpv: acPpvSet) { Construction ac = acPpv.getValue(); if (ac.hasValueForAttribute(attributeName)) { return true; } } return false; } public AccountOperation getOperation() { if (isAdd()) { return AccountOperation.ADD; } if (isDelete()) { return AccountOperation.DELETE; } return AccountOperation.MODIFY; } @Override public void checkEncrypted() { super.checkEncrypted(); if (syncDelta != null) { CryptoUtil.checkEncrypted(syncDelta); } } public String getHumanReadableName() { StringBuilder sb = new StringBuilder(); sb.append("account("); String humanReadableAccountIdentifier = getHumanReadableIdentifier(); if (StringUtils.isEmpty(humanReadableAccountIdentifier)) { sb.append("no ID"); } else { sb.append("ID "); sb.append(humanReadableAccountIdentifier); } ResourceShadowDiscriminator discr = getResourceShadowDiscriminator(); if (discr != null) { sb.append(", type '"); sb.append(discr.getIntent()); sb.append("', "); if (discr.getOrder() != 0) { sb.append("order ").append(discr.getOrder()).append(", "); } } else { sb.append(" (no discriminator) "); } sb.append(getResource()); sb.append(")"); return sb.toString(); } private String getHumanReadableIdentifier() { PrismObject<ShadowType> object = getObjectNew(); if (object == null) { object = getObjectOld(); } if (object == null) { object = getObjectCurrent(); } if (object == null) { return null; } if (object.canRepresent(ShadowType.class)) { PrismObject<ShadowType> shadow = (PrismObject<ShadowType>)object; Collection<ResourceAttribute<?>> identifiers = ShadowUtil.getIdentifiers(shadow); if (identifiers == null) { return null; } StringBuilder sb = new StringBuilder(); Iterator<ResourceAttribute<?>> iterator = identifiers.iterator(); while (iterator.hasNext()) { ResourceAttribute<?> id = iterator.next(); sb.append(id.toHumanReadableString()); if (iterator.hasNext()) { sb.append(","); } } return sb.toString(); } else { return object.toString(); } } @Override public String debugDump() { return debugDump(0); } @Override public String debugDump(int indent) { return debugDump(indent, true); } public String debugDump(int indent, boolean showTriples) { StringBuilder sb = new StringBuilder(); SchemaDebugUtil.indentDebugDump(sb, indent); sb.append("PROJECTION "); sb.append(getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName()); sb.append(" "); sb.append(getResourceShadowDiscriminator()); if (resource != null) { sb.append(" : "); sb.append(resource.getName().getOrig()); } sb.append("\n"); SchemaDebugUtil.indentDebugDump(sb, indent + 1); sb.append("OID: ").append(getOid()); sb.append(", wave ").append(wave); if (fullShadow) { sb.append(", full"); } else { sb.append(", shadow"); } sb.append(", exists=").append(isExists); sb.append(", assigned=").append(isAssigned); sb.append(", active=").append(isActive); sb.append(", legal=").append(isLegalOld).append("->").append(isLegal); sb.append(", recon=").append(doReconciliation); sb.append(", syncIntent=").append(getSynchronizationIntent()); sb.append(", decision=").append(synchronizationPolicyDecision); if (!isFresh()) { sb.append(", NOT FRESH"); } if (resourceShadowDiscriminator != null && resourceShadowDiscriminator.isThombstone()) { sb.append(", THOMBSTONE"); } if (syncAbsoluteTrigger) { sb.append(", SYNC TRIGGER"); } if (getIteration() != 0) { sb.append(", iteration=").append(getIteration()).append(" (").append(getIterationToken()).append(")"); } sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("old"), getObjectOld(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("current"), getObjectCurrent(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("new"), getObjectNew(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("primary delta"), getPrimaryDelta(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("secondary delta"), getSecondaryDelta(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("sync delta"), getSyncDelta(), indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("executed deltas"), getExecutedDeltas(), indent+1); if (showTriples) { sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("constructionDeltaSetTriple"), constructionDeltaSetTriple, indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("outbound account construction"), outboundConstruction, indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed attributes"), squeezedAttributes, indent + 1); sb.append("\n"); DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed associations"), squeezedAssociations, indent + 1); // This is just a debug thing // sb.append("\n"); // DebugUtil.indentDebugDump(sb, indent); // sb.append("ACCOUNT dependencies\n"); // sb.append(DebugUtil.debugDump(dependencies, indent + 1)); } return sb.toString(); } @Override protected String getElementDefaultDesc() { return "projection"; } @Override public String toString() { return "LensProjectionContext(" + (getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName()) + ":" + getOid() + ( resource == null ? "" : " on " + resource ) + ")"; } /** * Return a human readable name of the projection object suitable for logs. */ public String toHumanReadableString() { if (resourceShadowDiscriminator == null) { return "(null" + resource + ")"; } if (resource != null) { return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resource + ")"; } else { return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resourceShadowDiscriminator.getResourceOid() + ")"; } } public String getHumanReadableKind() { if (resourceShadowDiscriminator == null) { return "resource object"; } return getKindValue(resourceShadowDiscriminator.getKind()); } private String getKindValue(ShadowKindType kind) { if (kind == null) { return "null"; } return kind.value(); } @Override protected String getElementDesc() { if (resourceShadowDiscriminator == null) { return "shadow"; } return getKindValue(resourceShadowDiscriminator.getKind()); } public void addToPrismContainer(PrismContainer<LensProjectionContextType> lensProjectionContextTypeContainer) throws SchemaException { LensProjectionContextType lensProjectionContextType = lensProjectionContextTypeContainer.createNewValue().asContainerable(); super.storeIntoLensElementContextType(lensProjectionContextType); lensProjectionContextType.setSyncDelta(syncDelta != null ? DeltaConvertor.toObjectDeltaType(syncDelta) : null); lensProjectionContextType.setWave(wave); lensProjectionContextType.setResourceShadowDiscriminator(resourceShadowDiscriminator != null ? resourceShadowDiscriminator.toResourceShadowDiscriminatorType() : null); lensProjectionContextType.setFullShadow(fullShadow); lensProjectionContextType.setIsAssigned(isAssigned); lensProjectionContextType.setIsActive(isActive); lensProjectionContextType.setIsLegal(isLegal); lensProjectionContextType.setIsLegalOld(isLegalOld); lensProjectionContextType.setIsExists(isExists); lensProjectionContextType.setSynchronizationPolicyDecision(synchronizationPolicyDecision != null ? synchronizationPolicyDecision.toSynchronizationPolicyDecisionType() : null); lensProjectionContextType.setDoReconciliation(doReconciliation); lensProjectionContextType.setSynchronizationSituationDetected(synchronizationSituationDetected); lensProjectionContextType.setSynchronizationSituationResolved(synchronizationSituationResolved); lensProjectionContextType.setAccountPasswordPolicy(accountPasswordPolicy); lensProjectionContextType.setSyncAbsoluteTrigger(syncAbsoluteTrigger); } public static LensProjectionContext fromLensProjectionContextType(LensProjectionContextType projectionContextType, LensContext lensContext, OperationResult result) throws SchemaException, ConfigurationException, ObjectNotFoundException, CommunicationException { String objectTypeClassString = projectionContextType.getObjectTypeClass(); if (StringUtils.isEmpty(objectTypeClassString)) { throw new SystemException("Object type class is undefined in LensProjectionContextType"); } ResourceShadowDiscriminator resourceShadowDiscriminator = ResourceShadowDiscriminator.fromResourceShadowDiscriminatorType(projectionContextType.getResourceShadowDiscriminator()); LensProjectionContext projectionContext = new LensProjectionContext(lensContext, resourceShadowDiscriminator); projectionContext.retrieveFromLensElementContextType(projectionContextType, result); if (projectionContextType.getSyncDelta() != null) { projectionContext.syncDelta = DeltaConvertor.createObjectDelta(projectionContextType.getSyncDelta(), lensContext.getPrismContext()); } else { projectionContext.syncDelta = null; } projectionContext.wave = projectionContextType.getWave() != null ? projectionContextType.getWave() : 0; projectionContext.fullShadow = projectionContextType.isFullShadow() != null ? projectionContextType.isFullShadow() : false; projectionContext.isAssigned = projectionContextType.isIsAssigned() != null ? projectionContextType.isIsAssigned() : false; projectionContext.isActive = projectionContextType.isIsActive() != null ? projectionContextType.isIsActive() : false; projectionContext.isLegal = projectionContextType.isIsLegal(); projectionContext.isExists = projectionContextType.isIsExists() != null ? projectionContextType.isIsExists() : false; projectionContext.synchronizationPolicyDecision = SynchronizationPolicyDecision.fromSynchronizationPolicyDecisionType(projectionContextType.getSynchronizationPolicyDecision()); projectionContext.doReconciliation = projectionContextType.isDoReconciliation() != null ? projectionContextType.isDoReconciliation() : false; projectionContext.synchronizationSituationDetected = projectionContextType.getSynchronizationSituationDetected(); projectionContext.synchronizationSituationResolved = projectionContextType.getSynchronizationSituationResolved(); projectionContext.accountPasswordPolicy = projectionContextType.getAccountPasswordPolicy(); projectionContext.syncAbsoluteTrigger = projectionContextType.isSyncAbsoluteTrigger(); return projectionContext; } // determines whether full shadow is present, based on operation result got from provisioning public void determineFullShadowFlag(OperationResultType fetchResult) { if (fetchResult != null && (fetchResult.getStatus() == OperationResultStatusType.PARTIAL_ERROR || fetchResult.getStatus() == OperationResultStatusType.FATAL_ERROR)) { // todo what about other kinds of status? [e.g. in-progress] setFullShadow(false); } else { setFullShadow(true); } } }
sabriarabacioglu/engerek
model/model-impl/src/main/java/com/evolveum/midpoint/model/impl/lens/LensProjectionContext.java
Java
apache-2.0
42,728
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.io.kafka; import static org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult; import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem; import static org.hamcrest.Matchers.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import javax.annotation.Nullable; import org.apache.beam.sdk.Pipeline.PipelineExecutionException; import org.apache.beam.sdk.PipelineResult; import org.apache.beam.sdk.coders.BigEndianIntegerCoder; import org.apache.beam.sdk.coders.BigEndianLongCoder; import org.apache.beam.sdk.coders.CoderRegistry; import org.apache.beam.sdk.coders.InstantCoder; import org.apache.beam.sdk.coders.StringUtf8Coder; import org.apache.beam.sdk.coders.VarLongCoder; import org.apache.beam.sdk.io.Read; import org.apache.beam.sdk.io.UnboundedSource; import org.apache.beam.sdk.io.UnboundedSource.UnboundedReader; import org.apache.beam.sdk.io.kafka.serialization.InstantDeserializer; import org.apache.beam.sdk.metrics.GaugeResult; import org.apache.beam.sdk.metrics.MetricName; import org.apache.beam.sdk.metrics.MetricNameFilter; import org.apache.beam.sdk.metrics.MetricQueryResults; import org.apache.beam.sdk.metrics.MetricResult; import org.apache.beam.sdk.metrics.MetricsFilter; import org.apache.beam.sdk.metrics.SinkMetrics; import org.apache.beam.sdk.metrics.SourceMetrics; import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.testing.PAssert; import org.apache.beam.sdk.testing.TestPipeline; import org.apache.beam.sdk.transforms.Count; import org.apache.beam.sdk.transforms.Distinct; import org.apache.beam.sdk.transforms.DoFn; import org.apache.beam.sdk.transforms.Flatten; import org.apache.beam.sdk.transforms.Max; import org.apache.beam.sdk.transforms.Min; import org.apache.beam.sdk.transforms.ParDo; import org.apache.beam.sdk.transforms.SerializableFunction; import org.apache.beam.sdk.transforms.Values; import org.apache.beam.sdk.transforms.display.DisplayData; import org.apache.beam.sdk.util.CoderUtils; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; import org.apache.beam.sdk.values.PCollectionList; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.IntegerDeserializer; import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.LongDeserializer; import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.utils.Utils; import org.hamcrest.collection.IsIterableContainingInAnyOrder; import org.hamcrest.collection.IsIterableWithSize; import org.joda.time.Instant; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tests of {@link KafkaIO}. * Run with 'mvn test -Dkafka.clients.version=0.10.1.1', * or 'mvn test -Dkafka.clients.version=0.9.0.1' for either Kafka client version. */ @RunWith(JUnit4.class) public class KafkaIOTest { private static final Logger LOG = LoggerFactory.getLogger(KafkaIOTest.class); /* * The tests below borrow code and structure from CountingSourceTest. In addition verifies * the reader interleaves the records from multiple partitions. * * Other tests to consider : * - test KafkaRecordCoder */ @Rule public final transient TestPipeline p = TestPipeline.create(); @Rule public ExpectedException thrown = ExpectedException.none(); // Update mock consumer with records distributed among the given topics, each with given number // of partitions. Records are assigned in round-robin order among the partitions. private static MockConsumer<byte[], byte[]> mkMockConsumer( List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy) { final List<TopicPartition> partitions = new ArrayList<>(); final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>(); Map<String, List<PartitionInfo>> partitionMap = new HashMap<>(); for (String topic : topics) { List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic); for (int i = 0; i < partitionsPerTopic; i++) { TopicPartition tp = new TopicPartition(topic, i); partitions.add(tp); partIds.add(new PartitionInfo(topic, i, null, null, null)); records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>()); } partitionMap.put(topic, partIds); } int numPartitions = partitions.size(); final long[] offsets = new long[numPartitions]; for (int i = 0; i < numElements; i++) { int pIdx = i % numPartitions; TopicPartition tp = partitions.get(pIdx); records.get(tp).add( new ConsumerRecord<>( tp.topic(), tp.partition(), offsets[pIdx]++, ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key is 4 byte record id ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value is 8 byte record id } // This is updated when reader assigns partitions. final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList()); final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) { // override assign() in order to set offset limits & to save assigned partitions. //remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as: //1. SpEL can find this function, either input is List or Collection; //2. List extends Collection, so super.assign() could find either assign(List) // or assign(Collection). public void assign(final List<TopicPartition> assigned) { super.assign(assigned); assignedPartitions.set(ImmutableList.copyOf(assigned)); for (TopicPartition tp : assigned) { updateBeginningOffsets(ImmutableMap.of(tp, 0L)); updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size())); } } // Override offsetsForTimes() in order to look up the offsets by timestamp. // Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have // this method. // Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions // does not have the OffsetAndTimestamp class. So return a raw type and use reflection // here. @SuppressWarnings("unchecked") public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { HashMap<TopicPartition, Object> result = new HashMap<>(); try { Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp"); // OffsetAndTimestamp(long offset, long timestamp) Constructor constructor = cls.getDeclaredConstructor(long.class, long.class); // In test scope, timestamp == offset. for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) { long maxOffset = offsets[partitions.indexOf(entry.getKey())]; Long offset = entry.getValue(); if (offset >= maxOffset) { offset = null; } result.put( entry.getKey(), constructor.newInstance(entry.getValue(), offset)); } return result; } catch (ClassNotFoundException | IllegalAccessException | InstantiationException | NoSuchMethodException | InvocationTargetException e) { throw new RuntimeException(e); } } }; for (String topic : topics) { consumer.updatePartitions(topic, partitionMap.get(topic)); } // MockConsumer does not maintain any relationship between partition seek position and the // records added. e.g. if we add 10 records to a partition and then seek to end of the // partition, MockConsumer is still going to return the 10 records in next poll. It is // our responsibility to make sure currently enqueued records sync with partition offsets. // The following task will be called inside each invocation to MockConsumer.poll(). // We enqueue only the records with the offset >= partition's current position. Runnable recordEnqueueTask = new Runnable() { @Override public void run() { // add all the records with offset >= current partition position. for (TopicPartition tp : assignedPartitions.get()) { long curPos = consumer.position(tp); for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) { if (r.offset() >= curPos) { consumer.addRecord(r); } } } consumer.schedulePollTask(this); } }; consumer.schedulePollTask(recordEnqueueTask); return consumer; } private static class ConsumerFactoryFn implements SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> { private final List<String> topics; private final int partitionsPerTopic; private final int numElements; private final OffsetResetStrategy offsetResetStrategy; public ConsumerFactoryFn(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy) { this.topics = topics; this.partitionsPerTopic = partitionsPerTopic; this.numElements = numElements; this.offsetResetStrategy = offsetResetStrategy; } @Override public Consumer<byte[], byte[]> apply(Map<String, Object> config) { return mkMockConsumer(topics, partitionsPerTopic, numElements, offsetResetStrategy); } } private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform( int numElements, @Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) { return mkKafkaReadTransform(numElements, numElements, timestampFn); } /** * Creates a consumer with two topics, with 10 partitions each. * numElements are (round-robin) assigned all the 20 partitions. */ private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform( int numElements, int maxNumRecords, @Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) { List<String> topics = ImmutableList.of("topic_a", "topic_b"); KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read() .withBootstrapServers("myServer1:9092,myServer2:9092") .withTopics(topics) .withConsumerFactoryFn(new ConsumerFactoryFn( topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withMaxNumRecords(maxNumRecords); if (timestampFn != null) { return reader.withTimestampFn(timestampFn); } else { return reader; } } private static class AssertMultipleOf implements SerializableFunction<Iterable<Long>, Void> { private final int num; public AssertMultipleOf(int num) { this.num = num; } @Override public Void apply(Iterable<Long> values) { for (Long v : values) { assertEquals(0, v % num); } return null; } } public static void addCountingAsserts(PCollection<Long> input, long numElements) { // Count == numElements // Unique count == numElements // Min == 0 // Max == numElements-1 addCountingAsserts(input, numElements, numElements, 0L, numElements - 1); } public static void addCountingAsserts( PCollection<Long> input, long count, long uniqueCount, long min, long max) { PAssert .thatSingleton(input.apply("Count", Count.<Long>globally())) .isEqualTo(count); PAssert .thatSingleton(input.apply(Distinct.<Long>create()) .apply("UniqueCount", Count.<Long>globally())) .isEqualTo(uniqueCount); PAssert .thatSingleton(input.apply("Min", Min.<Long>globally())) .isEqualTo(min); PAssert .thatSingleton(input.apply("Max", Max.<Long>globally())) .isEqualTo(max); } @Test public void testUnboundedSource() { int numElements = 1000; PCollection<Long> input = p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply(Values.<Long>create()); addCountingAsserts(input, numElements); p.run(); } @Test public void testUnreachableKafkaBrokers() { // Expect an exception when the Kafka brokers are not reachable on the workers. // We specify partitions explicitly so that splitting does not involve server interaction. // Set request timeout to 10ms so that test does not take long. thrown.expect(Exception.class); thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'"); int numElements = 1000; PCollection<Long> input = p .apply(KafkaIO.<Integer, Long>read() .withBootstrapServers("8.8.8.8:9092") // Google public DNS ip. .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0))) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .updateConsumerProperties(ImmutableMap.<String, Object>of( ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10, ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5, ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8, ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8)) .withMaxNumRecords(10) .withoutMetadata()) .apply(Values.<Long>create()); addCountingAsserts(input, numElements); p.run(); } @Test public void testUnboundedSourceWithSingleTopic() { // same as testUnboundedSource, but with single topic int numElements = 1000; String topic = "my_topic"; KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read() .withBootstrapServers("none") .withTopic("my_topic") .withConsumerFactoryFn(new ConsumerFactoryFn( ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST)) .withMaxNumRecords(numElements) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class); PCollection<Long> input = p .apply(reader.withoutMetadata()) .apply(Values.<Long>create()); addCountingAsserts(input, numElements); p.run(); } @Test public void testUnboundedSourceWithExplicitPartitions() { int numElements = 1000; List<String> topics = ImmutableList.of("test"); KafkaIO.Read<byte[], Long> reader = KafkaIO.<byte[], Long>read() .withBootstrapServers("none") .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5))) .withConsumerFactoryFn(new ConsumerFactoryFn( topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions .withKeyDeserializer(ByteArrayDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withMaxNumRecords(numElements / 10); PCollection<Long> input = p .apply(reader.withoutMetadata()) .apply(Values.<Long>create()); // assert that every element is a multiple of 5. PAssert .that(input) .satisfies(new AssertMultipleOf(5)); PAssert .thatSingleton(input.apply(Count.<Long>globally())) .isEqualTo(numElements / 10L); p.run(); } private static class ElementValueDiff extends DoFn<Long, Long> { @ProcessElement public void processElement(ProcessContext c) throws Exception { c.output(c.element() - c.timestamp().getMillis()); } } @Test public void testUnboundedSourceTimestamps() { int numElements = 1000; PCollection<Long> input = p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()) .apply(Values.<Long>create()); addCountingAsserts(input, numElements); PCollection<Long> diffs = input .apply("TimestampDiff", ParDo.of(new ElementValueDiff())) .apply("DistinctTimestamps", Distinct.<Long>create()); // This assert also confirms that diffs only has one unique value. PAssert.thatSingleton(diffs).isEqualTo(0L); p.run(); } private static class RemoveKafkaMetadata<K, V> extends DoFn<KafkaRecord<K, V>, KV<K, V>> { @ProcessElement public void processElement(ProcessContext ctx) throws Exception { ctx.output(ctx.element().getKV()); } } @Test public void testUnboundedSourceSplits() throws Exception { int numElements = 1000; int numSplits = 10; // Coders must be specified explicitly here due to the way the transform // is used in the test. UnboundedSource<KafkaRecord<Integer, Long>, ?> initial = mkKafkaReadTransform(numElements, null) .withKeyDeserializerAndCoder(IntegerDeserializer.class, BigEndianIntegerCoder.of()) .withValueDeserializerAndCoder(LongDeserializer.class, BigEndianLongCoder.of()) .makeSource(); List<? extends UnboundedSource<KafkaRecord<Integer, Long>, ?>> splits = initial.split(numSplits, p.getOptions()); assertEquals("Expected exact splitting", numSplits, splits.size()); long elementsPerSplit = numElements / numSplits; assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits); PCollectionList<Long> pcollections = PCollectionList.empty(p); for (int i = 0; i < splits.size(); ++i) { pcollections = pcollections.and( p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit)) .apply("Remove Metadata " + i, ParDo.of(new RemoveKafkaMetadata<Integer, Long>())) .apply("collection " + i, Values.<Long>create())); } PCollection<Long> input = pcollections.apply(Flatten.<Long>pCollections()); addCountingAsserts(input, numElements); p.run(); } /** * A timestamp function that uses the given value as the timestamp. */ private static class ValueAsTimestampFn implements SerializableFunction<KV<Integer, Long>, Instant> { @Override public Instant apply(KV<Integer, Long> input) { return new Instant(input.getValue()); } } // Kafka records are read in a separate thread inside the reader. As a result advance() might not // read any records even from the mock consumer, especially for the first record. // This is a helper method to loop until we read a record. private static void advanceOnce(UnboundedReader<?> reader, boolean isStarted) throws IOException { if (!isStarted && reader.start()) { return; } while (!reader.advance()) { // very rarely will there be more than one attempts. // In case of a bug we might end up looping forever, and test will fail with a timeout. // Avoid hard cpu spinning in case of a test failure. try { Thread.sleep(1); } catch (InterruptedException e) { throw new RuntimeException(e); } } } @Test public void testUnboundedSourceCheckpointMark() throws Exception { int numElements = 85; // 85 to make sure some partitions have more records than other. // create a single split: UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source = mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null); final int numToSkip = 20; // one from each partition. // advance numToSkip elements for (int i = 0; i < numToSkip; ++i) { advanceOnce(reader, i > 0); } // Confirm that we get the expected element in sequence before checkpointing. assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue()); assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis()); // Checkpoint and restart, and confirm that the source continues correctly. KafkaCheckpointMark mark = CoderUtils.clone( source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark()); reader = source.createReader(null, mark); // Confirm that we get the next elements in sequence. // This also confirms that Reader interleaves records from each partitions by the reader. for (int i = numToSkip; i < numElements; i++) { advanceOnce(reader, i > numToSkip); assertEquals(i, (long) reader.getCurrent().getKV().getValue()); assertEquals(i, reader.getCurrentTimestamp().getMillis()); } } @Test public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception { // Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes // properly from empty partitions, without missing messages added since checkpoint. // Initialize consumer with fewer elements than number of partitions so that some are empty. int initialNumElements = 5; UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source = mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null); for (int l = 0; l < initialNumElements; ++l) { advanceOnce(reader, l > 0); } // Checkpoint and restart, and confirm that the source continues correctly. KafkaCheckpointMark mark = CoderUtils.clone( source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark()); // Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that // the reader need to explicitly need to seek to first offset for partitions that were empty. int numElements = 100; // all the 20 partitions will have elements List<String> topics = ImmutableList.of("topic_a", "topic_b"); source = KafkaIO.<Integer, Long>read() .withBootstrapServers("none") .withTopics(topics) .withConsumerFactoryFn(new ConsumerFactoryFn( topics, 10, numElements, OffsetResetStrategy.LATEST)) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withMaxNumRecords(numElements) .withTimestampFn(new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); reader = source.createReader(null, mark); // Verify in any order. As the partitions are unevenly read, the returned records are not in a // simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder. List<Long> expected = new ArrayList<>(); List<Long> actual = new ArrayList<>(); for (long i = initialNumElements; i < numElements; i++) { advanceOnce(reader, i > initialNumElements); expected.add(i); actual.add(reader.getCurrent().getKV().getValue()); } assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray())); } @Test public void testUnboundedSourceMetrics() { int numElements = 1000; String readStep = "readFromKafka"; p.apply(readStep, mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()); PipelineResult result = p.run(); String splitId = "0"; MetricName elementsRead = SourceMetrics.elementsRead().getName(); MetricName elementsReadBySplit = SourceMetrics.elementsReadBySplit(splitId).getName(); MetricName bytesRead = SourceMetrics.bytesRead().getName(); MetricName bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId).getName(); MetricName backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId).getName(); MetricName backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId).getName(); MetricQueryResults metrics = result.metrics().queryMetrics( MetricsFilter.builder().build()); Iterable<MetricResult<Long>> counters = metrics.counters(); assertThat(counters, hasItem(attemptedMetricsResult( elementsRead.namespace(), elementsRead.name(), readStep, 1000L))); assertThat(counters, hasItem(attemptedMetricsResult( elementsReadBySplit.namespace(), elementsReadBySplit.name(), readStep, 1000L))); assertThat(counters, hasItem(attemptedMetricsResult( bytesRead.namespace(), bytesRead.name(), readStep, 12000L))); assertThat(counters, hasItem(attemptedMetricsResult( bytesReadBySplit.namespace(), bytesReadBySplit.name(), readStep, 12000L))); MetricQueryResults backlogElementsMetrics = result.metrics().queryMetrics( MetricsFilter.builder() .addNameFilter( MetricNameFilter.named( backlogElementsOfSplit.namespace(), backlogElementsOfSplit.name())) .build()); // since gauge values may be inconsistent in some environments assert only on their existence. assertThat(backlogElementsMetrics.gauges(), IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1)); MetricQueryResults backlogBytesMetrics = result.metrics().queryMetrics( MetricsFilter.builder() .addNameFilter( MetricNameFilter.named( backlogBytesOfSplit.namespace(), backlogBytesOfSplit.name())) .build()); // since gauge values may be inconsistent in some environments assert only on their existence. assertThat(backlogBytesMetrics.gauges(), IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1)); } @Test public void testSink() throws Exception { // Simply read from kafka source and write to kafka sink. Then verify the records // are correctly published to mock kafka producer. int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test"; p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply(KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); p.run(); completionThread.shutdown(); verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false); } } @Test public void testValuesSink() throws Exception { // similar to testSink(), but use values()' interface. int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test"; p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply(Values.<Long>create()) // there are no keys .apply(KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)) .values()); p.run(); completionThread.shutdown(); verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true); } } @Test public void testEOSink() { // testSink() with EOS enabled. // This does not actually inject retries in a stage to test exactly-once-semantics. // It mainly exercises the code in normal flow without retries. // Ideally we should test EOS Sink by triggering replays of a messages between stages. // It is not feasible to test such retries with direct runner. When DoFnTester supports // state, we can test KafkaEOWriter DoFn directly to ensure it handles retries correctly. if (!ProducerSpEL.supportsTransactions()) { LOG.warn("testEOSink() is disabled as Kafka client version does not support transactions."); return; } int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test"; p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply(KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withEOS(1, "test") .withConsumerFactoryFn(new ConsumerFactoryFn( Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST)) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); p.run(); completionThread.shutdown(); verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false); } } @Test public void testSinkWithSendErrors() throws Throwable { // similar to testSink(), except that up to 10 of the send calls to producer will fail // asynchronously. // TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail. // We limit the number of errors injected to 10 below. This would reflect a real streaming // pipeline. But I am sure how to achieve that. For now expect an exception: thrown.expect(InjectedErrorException.class); thrown.expectMessage("Injected Error #1"); int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { ProducerSendCompletionThread completionThreadWithErrors = new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start(); String topic = "test"; p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply(KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); try { p.run(); } catch (PipelineExecutionException e) { // throwing inner exception helps assert that first exception is thrown from the Sink throw e.getCause().getCause(); } finally { completionThreadWithErrors.shutdown(); } } } @Test public void testUnboundedSourceStartReadTime() { assumeTrue(new ConsumerSpEL().hasOffsetsForTimes()); int numElements = 1000; // In this MockConsumer, we let the elements of the time and offset equal and there are 20 // partitions. So set this startTime can read half elements. int startTime = numElements / 20 / 2; int maxNumRecords = numElements / 2; PCollection<Long> input = p .apply(mkKafkaReadTransform(numElements, maxNumRecords, new ValueAsTimestampFn()) .withStartReadTime(new Instant(startTime)) .withoutMetadata()) .apply(Values.<Long>create()); addCountingAsserts(input, maxNumRecords, maxNumRecords, maxNumRecords, numElements - 1); p.run(); } @Rule public ExpectedException noMessagesException = ExpectedException.none(); @Test public void testUnboundedSourceStartReadTimeException() { assumeTrue(new ConsumerSpEL().hasOffsetsForTimes()); noMessagesException.expect(RuntimeException.class); int numElements = 1000; // In this MockConsumer, we let the elements of the time and offset equal and there are 20 // partitions. So set this startTime can not read any element. int startTime = numElements / 20; p.apply(mkKafkaReadTransform(numElements, numElements, new ValueAsTimestampFn()) .withStartReadTime(new Instant(startTime)) .withoutMetadata()) .apply(Values.<Long>create()); p.run(); } @Test public void testSourceDisplayData() { KafkaIO.Read<Integer, Long> read = mkKafkaReadTransform(10, null); DisplayData displayData = DisplayData.from(read); assertThat(displayData, hasDisplayItem("topics", "topic_a,topic_b")); assertThat(displayData, hasDisplayItem("enable.auto.commit", false)); assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092")); assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest")); assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288)); } @Test public void testSourceWithExplicitPartitionsDisplayData() { KafkaIO.Read<byte[], Long> read = KafkaIO.<byte[], Long>read() .withBootstrapServers("myServer1:9092,myServer2:9092") .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5), new TopicPartition("test", 6))) .withConsumerFactoryFn(new ConsumerFactoryFn( Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)) // 10 partitions .withKeyDeserializer(ByteArrayDeserializer.class) .withValueDeserializer(LongDeserializer.class); DisplayData displayData = DisplayData.from(read); assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6")); assertThat(displayData, hasDisplayItem("enable.auto.commit", false)); assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092")); assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest")); assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288)); } @Test public void testSinkDisplayData() { try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { KafkaIO.Write<Integer, Long> write = KafkaIO.<Integer, Long>write() .withBootstrapServers("myServerA:9092,myServerB:9092") .withTopic("myTopic") .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)); DisplayData displayData = DisplayData.from(write); assertThat(displayData, hasDisplayItem("topic", "myTopic")); assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092")); assertThat(displayData, hasDisplayItem("retries", 3)); } } // interface for testing coder inference private interface DummyInterface<T> { } // interface for testing coder inference private interface DummyNonparametricInterface { } // class for testing coder inference private static class DeserializerWithInterfaces implements DummyInterface<String>, DummyNonparametricInterface, Deserializer<Long> { @Override public void configure(Map<String, ?> configs, boolean isKey) { } @Override public Long deserialize(String topic, byte[] bytes) { return 0L; } @Override public void close() { } } // class for which a coder cannot be infered private static class NonInferableObject { } // class for testing coder inference private static class NonInferableObjectDeserializer implements Deserializer<NonInferableObject> { @Override public void configure(Map<String, ?> configs, boolean isKey) { } @Override public NonInferableObject deserialize(String topic, byte[] bytes) { return new NonInferableObject(); } @Override public void close() { } } @Test public void testInferKeyCoder() { CoderRegistry registry = CoderRegistry.createDefault(); assertTrue(KafkaIO.inferCoder(registry, LongDeserializer.class).getValueCoder() instanceof VarLongCoder); assertTrue(KafkaIO.inferCoder(registry, StringDeserializer.class).getValueCoder() instanceof StringUtf8Coder); assertTrue(KafkaIO.inferCoder(registry, InstantDeserializer.class).getValueCoder() instanceof InstantCoder); assertTrue(KafkaIO.inferCoder(registry, DeserializerWithInterfaces.class).getValueCoder() instanceof VarLongCoder); } @Rule public ExpectedException cannotInferException = ExpectedException.none(); @Test public void testInferKeyCoderFailure() throws Exception { cannotInferException.expect(RuntimeException.class); CoderRegistry registry = CoderRegistry.createDefault(); KafkaIO.inferCoder(registry, NonInferableObjectDeserializer.class); } @Test public void testSinkMetrics() throws Exception { // Simply read from kafka source and write to kafka sink. Then verify the metrics are reported. int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test"; p .apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .withoutMetadata()) .apply("writeToKafka", KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); PipelineResult result = p.run(); MetricName elementsWritten = SinkMetrics.elementsWritten().getName(); MetricQueryResults metrics = result.metrics().queryMetrics( MetricsFilter.builder() .addNameFilter(MetricNameFilter.inNamespace(elementsWritten.namespace())) .build()); assertThat(metrics.counters(), hasItem( attemptedMetricsResult( elementsWritten.namespace(), elementsWritten.name(), "writeToKafka", 1000L))); completionThread.shutdown(); } } private static void verifyProducerRecords(MockProducer<Integer, Long> mockProducer, String topic, int numElements, boolean keyIsAbsent) { // verify that appropriate messages are written to kafka List<ProducerRecord<Integer, Long>> sent = mockProducer.history(); // sort by values Collections.sort(sent, new Comparator<ProducerRecord<Integer, Long>>() { @Override public int compare(ProducerRecord<Integer, Long> o1, ProducerRecord<Integer, Long> o2) { return Long.compare(o1.value(), o2.value()); } }); for (int i = 0; i < numElements; i++) { ProducerRecord<Integer, Long> record = sent.get(i); assertEquals(topic, record.topic()); if (keyIsAbsent) { assertNull(record.key()); } else { assertEquals(i, record.key().intValue()); } assertEquals(i, record.value().longValue()); } } /** * This wrapper over MockProducer. It also places the mock producer in global MOCK_PRODUCER_MAP. * The map is needed so that the producer returned by ProducerFactoryFn during pipeline can be * used in verification after the test. We also override {@code flush()} method in MockProducer * so that test can control behavior of {@code send()} method (e.g. to inject errors). */ private static class MockProducerWrapper implements AutoCloseable { final String producerKey; final MockProducer<Integer, Long> mockProducer; // MockProducer has "closed" method starting version 0.11. private static Method closedMethod; static { try { closedMethod = MockProducer.class.getMethod("closed"); } catch (NoSuchMethodException e) { closedMethod = null; } } MockProducerWrapper() { producerKey = String.valueOf(ThreadLocalRandom.current().nextLong()); mockProducer = new MockProducer<Integer, Long>( false, // disable synchronous completion of send. see ProducerSendCompletionThread below. new IntegerSerializer(), new LongSerializer()) { // override flush() so that it does not complete all the waiting sends, giving a chance to // ProducerCompletionThread to inject errors. @Override public void flush() { while (completeNext()) { // there are some uncompleted records. let the completion thread handle them. try { Thread.sleep(10); } catch (InterruptedException e) { // ok to retry. } } } }; // Add the producer to the global map so that producer factory function can access it. assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer)); } public void close() { MOCK_PRODUCER_MAP.remove(producerKey); try { if (closedMethod == null || !((Boolean) closedMethod.invoke(mockProducer))) { mockProducer.close(); } } catch (Exception e) { // Not expected. throw new RuntimeException(e); } } } private static final ConcurrentMap<String, MockProducer<Integer, Long>> MOCK_PRODUCER_MAP = new ConcurrentHashMap<>(); private static class ProducerFactoryFn implements SerializableFunction<Map<String, Object>, Producer<Integer, Long>> { final String producerKey; ProducerFactoryFn(String producerKey) { this.producerKey = producerKey; } @SuppressWarnings("unchecked") @Override public Producer<Integer, Long> apply(Map<String, Object> config) { // Make sure the config is correctly set up for serializers. // There may not be a key serializer if we're interested only in values. if (config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) != null) { Utils.newInstance( ((Class<?>) config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) .asSubclass(Serializer.class) ).configure(config, true); } Utils.newInstance( ((Class<?>) config.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) .asSubclass(Serializer.class) ).configure(config, false); // Returning same producer in each instance in a pipeline seems to work fine currently. // If DirectRunner creates multiple DoFn instances for sinks, we might need to handle // it appropriately. I.e. allow multiple producers for each producerKey and concatenate // all the messages written to each producer for verification after the pipeline finishes. return MOCK_PRODUCER_MAP.get(producerKey); } } private static class InjectedErrorException extends RuntimeException { InjectedErrorException(String message) { super(message); } } /** * We start MockProducer with auto-completion disabled. That implies a record is not marked sent * until #completeNext() is called on it. This class starts a thread to asynchronously 'complete' * the the sends. During completion, we can also make those requests fail. This error injection * is used in one of the tests. */ private static class ProducerSendCompletionThread { private final MockProducer<Integer, Long> mockProducer; private final int maxErrors; private final int errorFrequency; private final AtomicBoolean done = new AtomicBoolean(false); private final ExecutorService injectorThread; private int numCompletions = 0; ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer) { // complete everything successfully this(mockProducer, 0, 0); } ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer, int maxErrors, int errorFrequency) { this.mockProducer = mockProducer; this.maxErrors = maxErrors; this.errorFrequency = errorFrequency; injectorThread = Executors.newSingleThreadExecutor(); } ProducerSendCompletionThread start() { injectorThread.submit(new Runnable() { @Override public void run() { int errorsInjected = 0; while (!done.get()) { boolean successful; if (errorsInjected < maxErrors && ((numCompletions + 1) % errorFrequency) == 0) { successful = mockProducer.errorNext( new InjectedErrorException("Injected Error #" + (errorsInjected + 1))); if (successful) { errorsInjected++; } } else { successful = mockProducer.completeNext(); } if (successful) { numCompletions++; } else { // wait a bit since there are no unsent records try { Thread.sleep(1); } catch (InterruptedException e) { // ok to retry. } } } } }); return this; } void shutdown() { done.set(true); injectorThread.shutdown(); try { assertTrue(injectorThread.awaitTermination(10, TimeUnit.SECONDS)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } }
wangyum/beam
sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOTest.java
Java
apache-2.0
49,154
/* * Copyright 2017 Exorath * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.exorath.plugin.game.cakewars.rewards; import com.exorath.plugin.game.cakewars.Main; import com.exorath.service.currency.api.CurrencyServiceAPI; import com.exorath.victoryHandler.rewards.CurrencyReward; import net.md_5.bungee.api.ChatColor; /** * Created by toonsev on 5/31/2017. */ public class KillsReward extends CurrencyReward{ public static final int CRUMBS_PER_KILL = 2; private int kills; public KillsReward(CurrencyServiceAPI currencyServiceAPI) { super(null, currencyServiceAPI, Main.CRUMBS_CURRENCY, 0); setCurrencyColor(ChatColor.GOLD); setCurrencyName("Crumbs"); } public void addKill(){ kills++; setAmount(kills*CRUMBS_PER_KILL); setReason("Killing " + kills + " Players"); } }
Exorath/CakeWarsGamePlugin
src/main/java/com/exorath/plugin/game/cakewars/rewards/KillsReward.java
Java
apache-2.0
1,398
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.irc; import java.util.ArrayList; import java.util.Dictionary; import java.util.Hashtable; import java.util.List; import org.junit.Before; import org.junit.Test; import org.schwering.irc.lib.IRCConnection; import org.schwering.irc.lib.IRCEventAdapter; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class IrcEndpointTest { private IrcComponent component; private IrcConfiguration configuration; private IRCConnection connection; private IrcEndpoint endpoint; @Before public void doSetup() { component = mock(IrcComponent.class); configuration = mock(IrcConfiguration.class); connection = mock(IRCConnection.class); List<String> channels = new ArrayList<String>(); Dictionary<String, String> keys = new Hashtable<String, String>(); channels.add("chan1"); channels.add("chan2"); keys.put("chan1", ""); keys.put("chan2", "chan2key"); when(configuration.getChannels()).thenReturn(channels); when(configuration.getKey("chan1")).thenReturn(""); when(configuration.getKey("chan2")).thenReturn("chan2key"); when(component.getIRCConnection(configuration)).thenReturn(connection); endpoint = new IrcEndpoint("foo", component, configuration); } @Test public void doJoinChannelTestNoKey() throws Exception { endpoint.joinChannel("chan1"); verify(connection).doJoin("chan1"); } @Test public void doJoinChannelTestKey() throws Exception { endpoint.joinChannel("chan2"); verify(connection).doJoin("chan2", "chan2key"); } @Test public void doJoinChannels() throws Exception { endpoint.joinChannels(); verify(connection).doJoin("chan1"); verify(connection).doJoin("chan2", "chan2key"); } @Test public void doHandleIrcErrorNickInUse() throws Exception { when(connection.getNick()).thenReturn("nick"); endpoint.handleIrcError(IRCEventAdapter.ERR_NICKNAMEINUSE, "foo"); verify(connection).doNick("nick-"); when(connection.getNick()).thenReturn("nick---"); // confirm doNick was not called verify(connection, never()).doNick("foo"); } }
everttigchelaar/camel-svn
components/camel-irc/src/test/java/org/apache/camel/component/irc/IrcEndpointTest.java
Java
apache-2.0
3,292
package com.zxinsight.classifier.ruleengine.admin; import java.rmi.RemoteException; import java.util.Map; import javax.rules.admin.LocalRuleExecutionSetProvider; import javax.rules.admin.RuleAdministrator; import javax.rules.admin.RuleExecutionSet; import javax.rules.admin.RuleExecutionSetDeregistrationException; import javax.rules.admin.RuleExecutionSetProvider; import javax.rules.admin.RuleExecutionSetRegisterException; @SuppressWarnings("rawtypes") public class RuleAdministratorImpl implements RuleAdministrator { @Override public void deregisterRuleExecutionSet(String bindUri, Map properties) throws RuleExecutionSetDeregistrationException, RemoteException { RuleExecutionSetRepository repository = RuleExecutionSetRepository .getInstance(); if (repository.getRuleExecutionSet(bindUri) == null) { throw new RuleExecutionSetDeregistrationException( "no execution set bound to: " + bindUri); } repository.unregisterRuleExecutionSet(bindUri); } @Override public LocalRuleExecutionSetProvider getLocalRuleExecutionSetProvider( Map properties) throws RemoteException { return new LocalRuleExecutionSetProviderImple(); } @Override public RuleExecutionSetProvider getRuleExecutionSetProvider(Map properties) throws RemoteException { return new RuleExecutionSetProviderImpl(); } @Override public void registerRuleExecutionSet(String bindUri, RuleExecutionSet ruleExecutionSet, Map properties) throws RuleExecutionSetRegisterException, RemoteException { RuleExecutionSetRepository repository = RuleExecutionSetRepository .getInstance(); repository.registerRuleExecutionSet(bindUri, ruleExecutionSet); } }
kevin-ww/commentClassifier
src/main/java/com/zxinsight/classifier/ruleengine/admin/RuleAdministratorImpl.java
Java
apache-2.0
1,738
package trendli.me.makhana.common.entities; import java.util.Arrays; import java.util.Collections; import java.util.List; public enum ActionType { MOVE( "Moving", "newTile" ), FABRICATING( "Fabricating" ); private final String verb; private final List< String > dataKeys; private ActionType( String verb, String... dataKeys ) { this.verb = verb; if ( dataKeys != null ) { this.dataKeys = Arrays.asList( dataKeys ); } else { this.dataKeys = Collections.emptyList( ); } } /** * @return the dataKeys */ public List< String > getDataKeys( ) { return dataKeys; } /** * @return the verb */ public String getVerb( ) { return verb; } }
elliottmb/makhana
common/src/main/java/trendli/me/makhana/common/entities/ActionType.java
Java
apache-2.0
806
/* * Copyright 2014-2015 Nikos Grammatikos * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://raw.githubusercontent.com/nikosgram13/OglofusProtection/master/LICENSE * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.nikosgram.oglofus.protection; import com.google.common.base.Optional; import com.sk89q.intake.argument.ArgumentException; import com.sk89q.intake.argument.ArgumentParseException; import com.sk89q.intake.argument.CommandArgs; import com.sk89q.intake.parametric.ProvisionException; import me.nikosgram.oglofus.protection.api.ActionResponse; import me.nikosgram.oglofus.protection.api.CommandExecutor; import me.nikosgram.oglofus.protection.api.entity.User; import me.nikosgram.oglofus.protection.api.message.MessageType; import me.nikosgram.oglofus.protection.api.region.ProtectionRank; import me.nikosgram.oglofus.protection.api.region.ProtectionRegion; import me.nikosgram.oglofus.protection.api.region.ProtectionStaff; import org.apache.commons.lang3.ClassUtils; import org.spongepowered.api.entity.player.Player; import org.spongepowered.api.service.user.UserStorage; import org.spongepowered.api.util.command.CommandSource; import javax.annotation.Nullable; import java.lang.annotation.Annotation; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; public class OglofusProtectionStaff implements ProtectionStaff { private final List<User> staff = new ArrayList<User>(); private final Map<UUID, ProtectionRank> ranks = new HashMap<UUID, ProtectionRank>(); private final User owner; private final ProtectionRegion region; private final OglofusSponge sponge; protected OglofusProtectionStaff(ProtectionRegion region, OglofusSponge sponge) { this.region = region; this.sponge = sponge; owner = sponge.getUserManager().getUser(UUID.fromString(sponge.connector.getString( "oglofus_regions", "uuid", region.getUuid().toString(), "owner" ).get())).get(); Map<String, String> staff = sponge.connector.getStringMap( "oglofus_regions", "uuid", region.getUuid().toString(), new String[]{"player", "rank"} ); for (String uid : staff.keySet()) { UUID uuid = UUID.fromString(uid); this.staff.add(sponge.getUserManager().getUser(uuid).get()); ranks.put(uuid, ProtectionRank.valueOf(staff.get(uid))); } } @Override public UUID getOwnerUuid() { return owner.getUuid(); } @Override public User getOwner() { return owner; } @Override @SuppressWarnings("unchecked") public <T> Optional<T> getOwnerAs(Class<T> tClass) { if (ClassUtils.isAssignable(tClass, Player.class)) { return (Optional<T>) sponge.server.getPlayer(owner.getUuid()); } else if (ClassUtils.isAssignable(tClass, User.class)) { UserStorage storage; if ((storage = sponge.game.getServiceManager().provide(UserStorage.class).orNull()) != null) { return (Optional<T>) storage.get(owner.getUuid()).orNull(); } } return Optional.absent(); } @Override @SuppressWarnings("unchecked") public <T> Collection<T> getOfficersAs(Class<T> tClass) { List<T> returned = new ArrayList<T>(); if (ClassUtils.isAssignable(tClass, Player.class)) { for (UUID uuid : getOfficersUuid()) { Player player; if ((player = sponge.server.getPlayer(uuid).orNull()) != null) { returned.add((T) player); } } } return returned; } @Override public Collection<UUID> getOfficersUuid() { List<UUID> returned = new ArrayList<UUID>(); for (User user : getOfficers()) { returned.add(user.getUuid()); } return returned; } @Override public Collection<User> getOfficers() { List<User> returned = new ArrayList<User>(); for (User user : this) { if (ranks.get(user.getUuid()).equals(ProtectionRank.Officer)) { returned.add(user); } } return returned; } @Override @SuppressWarnings("unchecked") public <T> Collection<T> getMembersAs(Class<T> tClass) { List<T> returned = new ArrayList<T>(); if (ClassUtils.isAssignable(tClass, Player.class)) { for (UUID uuid : getMembersUuid()) { Player player; if ((player = sponge.server.getPlayer(uuid).orNull()) != null) { returned.add((T) player); } } } return returned; } @Override public Collection<UUID> getMembersUuid() { List<UUID> returned = new ArrayList<UUID>(); for (User user : getMembers()) { returned.add(user.getUuid()); } return returned; } @Override public Collection<User> getMembers() { List<User> returned = new ArrayList<User>(); for (User user : this) { if (ranks.get(user.getUuid()).equals(ProtectionRank.Member)) { returned.add(user); } } return returned; } @Override @SuppressWarnings("unchecked") public <T> Collection<T> getStaffAs(Class<T> tClass) { List<T> returned = new ArrayList<T>(); if (ClassUtils.isAssignable(tClass, Player.class)) { for (User user : this) { Player player; if ((player = sponge.server.getPlayer(user.getUuid()).orNull()) != null) { returned.add((T) player); } } } return returned; } @Override public Collection<UUID> getStaffUuid() { Collection<UUID> returned = new ArrayList<UUID>(); for (User user : this) { returned.add(user.getUuid()); } return returned; } @Override public boolean isOwner(UUID target) { return owner.getUuid().equals(target); } @Override public boolean isOwner(User target) { return owner.getUuid().equals(target.getUuid()); } @Override public boolean isOfficer(UUID target) { return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Officer); } @Override public boolean isOfficer(User target) { return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Officer); } @Override public boolean isMember(UUID target) { return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Member); } @Override public boolean isMember(User target) { return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Member); } @Override public boolean isStaff(UUID target) { return ranks.containsKey(target); } @Override public boolean isStaff(User target) { return ranks.containsKey(target.getUuid()); } @Override public boolean hasOwnerAccess(UUID target) { return isOwner(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.owner"); } @Override public boolean hasOwnerAccess(User target) { return isOwner(target) || target.hasPermission("oglofus.protection.bypass.owner"); } @Override public boolean hasOfficerAccess(UUID target) { return isOfficer(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer"); } @Override public boolean hasOfficerAccess(User target) { return isOfficer(target) || target.hasPermission("oglofus.protection.bypass.officer"); } @Override public boolean hasMemberAccess(UUID target) { return isMember(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer"); } @Override public boolean hasMemberAccess(User target) { return isMember(target) || target.hasPermission("oglofus.protection.bypass.member"); } @Override public ProtectionRank getRank(UUID target) { return ranks.containsKey(target) ? ranks.get(target) : ProtectionRank.None; } @Override public ProtectionRank getRank(User target) { return ranks.containsKey(target.getUuid()) ? ranks.get(target.getUuid()) : ProtectionRank.None; } @Override public void broadcast(String message) { broadcast(MessageType.CHAT, message); } @Override public void broadcast(String message, ProtectionRank rank) { broadcast(MessageType.CHAT, message, rank); } @Override public void broadcast(MessageType type, String message) { for (User user : this) { user.sendMessage(type, message); } } @Override public void broadcast(MessageType type, String message, ProtectionRank rank) { switch (rank) { case Member: for (User user : getMembers()) { user.sendMessage(type, message); } break; case Officer: for (User user : getOfficers()) { user.sendMessage(type, message); } break; case Owner: owner.sendMessage(type, message); break; } } @Override public void broadcastRaw(Object message) { for (User user : this) { user.sendMessage(message); } } @Override public void broadcastRaw(Object message, ProtectionRank rank) { switch (rank) { case Member: for (User user : getMembers()) { user.sendMessage(message); } break; case Officer: for (User user : getOfficers()) { user.sendMessage(message); } break; case Owner: owner.sendMessage(message); break; } } @Override public void broadcastRaw(MessageType type, Object message) { throw new UnsupportedOperationException("Not supported yet."); } @Override public void broadcastRaw(MessageType type, Object message, ProtectionRank rank) { throw new UnsupportedOperationException("Not supported yet."); } @Override public ActionResponse reFlag() { //TODO: make it. return null; } @Override public ActionResponse invite(Object sender, UUID target) { return sponge.getUserManager().invite(sender, target, region); } @Override public ActionResponse invite(CommandExecutor sender, UUID target) { return null; } @Override public ActionResponse invite(Object sender, User target) { return null; } @Override public ActionResponse invite(CommandExecutor sender, User target) { return null; } @Override public ActionResponse invite(UUID target) { return sponge.getUserManager().invite(target, region); } @Override public ActionResponse invite(User target) { return null; } @Override public ActionResponse kick(Object sender, UUID target) { if (sender instanceof CommandSource) { if (sender instanceof Player) { if (region.getProtectionStaff().hasOwnerAccess(((Player) sender).getUniqueId())) { //TODO: call the handler PlayerKickHandler. return kick(target); } return ActionResponse.Failure.setMessage("access"); } if (((CommandSource) sender).hasPermission("oglofus.protection.bypass")) { return kick(target); } return ActionResponse.Failure.setMessage("access"); } return ActionResponse.Failure.setMessage("object"); } @Override public ActionResponse kick(CommandExecutor sender, UUID target) { return null; } @Override public ActionResponse kick(Object sender, User target) { return null; } @Override public ActionResponse kick(CommandExecutor sender, User target) { return null; } @Override public ActionResponse kick(UUID target) { //TODO: call the handler PlayerKickHandler. return null; } @Override public ActionResponse kick(User target) { return null; } @Override public ActionResponse promote(Object sender, UUID target) { return null; } @Override public ActionResponse promote(CommandExecutor sender, UUID target) { return null; } @Override public ActionResponse promote(Object sender, User target) { return null; } @Override public ActionResponse promote(CommandExecutor sender, User target) { return null; } @Override public ActionResponse promote(UUID target) { return null; } @Override public ActionResponse promote(User target) { return null; } @Override public ActionResponse demote(Object sender, UUID target) { return null; } @Override public ActionResponse demote(CommandExecutor sender, UUID target) { return null; } @Override public ActionResponse demote(Object sender, User target) { return null; } @Override public ActionResponse demote(CommandExecutor sender, User target) { return null; } @Override public ActionResponse demote(UUID target) { return null; } @Override public ActionResponse demote(User target) { return null; } @Override public ActionResponse changeRank(Object sender, UUID target, ProtectionRank rank) { return null; } @Override public ActionResponse changeRank(CommandExecutor sender, UUID target, ProtectionRank rank) { return null; } @Override public ActionResponse changeRank(Object sender, User target, ProtectionRank rank) { return null; } @Override public ActionResponse changeRank(CommandExecutor sender, User target, ProtectionRank rank) { return null; } @Override public ActionResponse changeRank(UUID target, ProtectionRank rank) { return null; } @Override public ActionResponse changeRank(User target, ProtectionRank rank) { return null; } @Override public Iterator<User> iterator() { return staff.iterator(); } @Override public boolean isProvided() { return false; } @Nullable @Override public User get(CommandArgs arguments, List<? extends Annotation> modifiers) throws ArgumentException, ProvisionException { String name = arguments.next(); Optional<User> user = sponge.getUserManager().getUser(name); if (user.isPresent() && isStaff(user.get())) { return user.get(); } else { throw new ArgumentParseException(String.format("I can't find the Staff with name '%s'.", name)); } } @Override public List<String> getSuggestions(String prefix) { List<String> returned = new ArrayList<String>(); for (User user : this) { if (user.getName().startsWith(prefix)) { returned.add(user.getName()); } } return returned; } }
Oglofus/OglofusProtection
sponge/src/main/java/me/nikosgram/oglofus/protection/OglofusProtectionStaff.java
Java
apache-2.0
16,214
package commons; import org.makagiga.commons.ConfigFile; import org.makagiga.test.AbstractEnumTest; import org.makagiga.test.Test; import org.makagiga.test.TestMethod; import org.makagiga.test.Tester; @Test(className = ConfigFile.Format.class) public final class TestConfigFile_Format extends AbstractEnumTest<ConfigFile.Format> { // public public TestConfigFile_Format() { super( ConfigFile.Format.values(), ConfigFile.Format.DESKTOP, ConfigFile.Format.INI ); } @Test public void test_commons() { for (final ConfigFile.Format i : ConfigFile.Format.values()) { assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { i.validateGroup(null); } } ); assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { i.validateGroup(""); } } ); assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { i.validateKey(null); } } ); assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { i.validateKey(""); } } ); } final String LONG_VALUE = "AZaz09-"; final String SHORT_VALUE = "X"; // DESKTOP ConfigFile.Format f = ConfigFile.Format.DESKTOP; assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { ConfigFile.Format.DESKTOP.validateGroup("["); } } ); assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { ConfigFile.Format.DESKTOP.validateGroup("]"); } } ); assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE; assert f.validateGroup(LONG_VALUE) == LONG_VALUE; assertIllegalArgumentException(new Tester.Code() { public void run() throws Throwable { ConfigFile.Format.DESKTOP.validateKey("="); } } ); assert f.validateKey(SHORT_VALUE) == SHORT_VALUE; assert f.validateKey(LONG_VALUE) == LONG_VALUE; f.validateGroup(" "); f.validateGroup("Foo Bar"); // INI f = ConfigFile.Format.INI; assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE; assert f.validateGroup(LONG_VALUE) == LONG_VALUE; assert f.validateKey(SHORT_VALUE) == SHORT_VALUE; assert f.validateKey(LONG_VALUE) == LONG_VALUE; } @Test( methods = @TestMethod(name = "equals", parameters = "String, String") ) public void test_equals() { ConfigFile.Format f; f = ConfigFile.Format.DESKTOP; assert f.equals("foo", "foo"); assert !f.equals("foo", "FOO"); f = ConfigFile.Format.INI; assert f.equals("foo", "foo"); assert f.equals("foo", "FOO"); } @Test( methods = @TestMethod(name = "escape", parameters = "String") ) public void test_escape() { assertNull(ConfigFile.Format.escape(null)); assertEmpty(ConfigFile.Format.escape("")); assertEquals("\\tFoo\\sBar\\r\\nBaz\\\\", ConfigFile.Format.escape("\tFoo Bar\r\nBaz\\")); } @Test( methods = @TestMethod(name = "unescape", parameters = "String") ) public void test_unescape() { assertNull(ConfigFile.Format.unescape(null)); assertEmpty(ConfigFile.Format.unescape("")); assertEquals("Foo Bar", ConfigFile.Format.unescape("Foo Bar")); assertEquals("\tFoo Bar\r\nBaz\\", ConfigFile.Format.unescape("\\tFoo\\sBar\\r\\nBaz\\\\")); assertEquals("\n\n \\\\", ConfigFile.Format.unescape("\\n\\n\\s\\s\\\\\\\\")); } @Test( methods = @TestMethod(name = "getComment") ) public void test_getComment() { assert ConfigFile.Format.DESKTOP.getComment().equals("#"); assert ConfigFile.Format.INI.getComment().equals(";"); } @Test( methods = @TestMethod(name = "getEOL") ) public void test_getEOL() { assert ConfigFile.Format.DESKTOP.getEOL().equals("\n"); assert ConfigFile.Format.INI.getEOL().equals("\r\n"); } @Test( methods = @TestMethod(name = "getSuffix") ) public void test_getSuffix() { assert ConfigFile.Format.DESKTOP.getSuffix().equals(".desktop"); assert ConfigFile.Format.INI.getSuffix().equals(".ini"); } @Test( methods = @TestMethod(name = "isCaseSensitive") ) public void test_isCaseSensitive() { assert ConfigFile.Format.DESKTOP.isCaseSensitive(); assert !ConfigFile.Format.INI.isCaseSensitive(); } }
stuffer2325/Makagiga
test/src/commons/TestConfigFile_Format.java
Java
apache-2.0
4,186
package org.apache.rave.portal.service.impl; import org.apache.rave.model.ExcercicesHasTrainingPlan; import org.apache.rave.model.Serie; import org.apache.rave.model.TrainingPlan; import org.apache.rave.portal.repository.ExcercicesHasTrainingPlanRepository; import org.apache.rave.portal.repository.SerieRepository; import org.apache.rave.portal.repository.TrainingPlanRepository; import org.apache.rave.portal.service.TrainingPlanService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.util.ArrayList; import java.util.Collection; /** * Created by fhernandez on 23/09/14. */ @Service public class DefaultTrainingPlanService implements TrainingPlanService { private final Logger logger = LoggerFactory.getLogger(DefaultTrainingPlanService.class); private final TrainingPlanRepository trainingPlanRepository; private final ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository; private final SerieRepository serieRepository; @Autowired public DefaultTrainingPlanService(TrainingPlanRepository trainingPlanRepository,ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository,SerieRepository serieRepository) { this.trainingPlanRepository = trainingPlanRepository; this.exercisesHasTrainingPlanRepository = exercisesHasTrainingPlanRepository; this.serieRepository = serieRepository; } @Override @Transactional public TrainingPlan getById(Long trainingPlanId) { TrainingPlan trainingPlan =trainingPlanRepository.getById(trainingPlanId); if(trainingPlan!=null) { trainingPlan.getExercisesHasTrainingplans().size(); } return trainingPlan; } @Transactional public TrainingPlan save(TrainingPlan newPlan) { Collection<ExcercicesHasTrainingPlan> exerciseList=newPlan.getExercisesHasTrainingplans(); try { if(newPlan.getEntityId()==null) { newPlan = trainingPlanRepository.save(newPlan); } for (ExcercicesHasTrainingPlan exerciseHasTraining : exerciseList) { Serie serie = serieRepository.save(exerciseHasTraining.getSerie()); exerciseHasTraining.setSerie(serie); exerciseHasTraining.setSerieId(serie.getEntityId()); exerciseHasTraining.setTrainingplanId(newPlan.getEntityId()); exerciseHasTraining.setTrainingPlan(newPlan); } exercisesHasTrainingPlanRepository.saveList(exerciseList); }catch(Exception e){ logger.error("Exception saving plan " + e); } return newPlan; } public Collection<TrainingPlan> getByTrainerID(Long trainerId){ return trainingPlanRepository.getByTrainerID(trainerId); } }
lletsica/my_test_repo
rave-components/rave-core/src/main/java/org/apache/rave/portal/service/impl/DefaultTrainingPlanService.java
Java
apache-2.0
2,990
/* =========================================================================== Copyright 2002-2010 Martin Dvorak Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================== */ package com.mindcognition.mindraider.ui.swing.trash; import java.awt.BorderLayout; import java.awt.GridLayout; import java.awt.Toolkit; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.FocusEvent; import java.util.HashMap; import javax.swing.JButton; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JToolBar; import javax.swing.JTree; import javax.swing.event.TreeExpansionEvent; import javax.swing.event.TreeExpansionListener; import javax.swing.event.TreeModelEvent; import javax.swing.event.TreeModelListener; import javax.swing.event.TreeSelectionEvent; import javax.swing.event.TreeSelectionListener; import javax.swing.event.TreeWillExpandListener; import javax.swing.tree.DefaultMutableTreeNode; import javax.swing.tree.DefaultTreeModel; import javax.swing.tree.ExpandVetoException; import javax.swing.tree.MutableTreeNode; import javax.swing.tree.TreePath; import javax.swing.tree.TreeSelectionModel; import org.apache.commons.lang.ArrayUtils; import org.apache.log4j.Logger; import com.emental.mindraider.core.MindRaider; import com.emental.mindraider.core.rest.Metadata; import com.emental.mindraider.core.rest.ResourceDescriptor; import com.emental.mindraider.core.rest.resource.FolderResource; import com.emental.mindraider.core.rest.resource.OutlineResource; import com.emental.mindraider.ui.dialogs.ProgressDialogJFrame; import com.emental.mindraider.ui.gfx.IconsRegistry; import com.mindcognition.mindraider.application.model.label.LabelCustodianListener; import com.mindcognition.mindraider.l10n.Messages; import com.mindcognition.mindraider.ui.swing.dialogs.RestoreNotebookJDialog; import com.mindcognition.mindraider.ui.swing.explorer.ExplorerJPanel; import com.mindcognition.mindraider.utils.SwingWorker; public class TrashJPanel extends JPanel implements TreeWillExpandListener, TreeExpansionListener, LabelCustodianListener { private static final Logger logger = Logger.getLogger(TrashJPanel.class); public static final int LEVEL_ROOT = 0; public static final int LEVEL_FOLDERS = 1; public static final int LEVEL_NOTEBOOKS = 2; /* * UI components */ protected DefaultMutableTreeNode rootNode; protected DefaultTreeModel treeModel; protected final JTree tree; protected JButton undoButton, emptyButton, deleteButton; private Toolkit toolkit = Toolkit.getDefaultToolkit(); /* * model */ private HashMap treeNodeToResourceUriMap; /* * singleton */ private static TrashJPanel singleton; public static TrashJPanel getInstance() { if (singleton == null) { singleton = new TrashJPanel(); } return singleton; } private ResourceDescriptor[] discardedNotebooksDescriptors; /** * Constructor. */ private TrashJPanel() { treeNodeToResourceUriMap = new HashMap(); rootNode = new DefaultMutableTreeNode(Messages.getString("TrashJPanel.notebookArchive")); treeModel = new DefaultTreeModel(rootNode); treeModel.addTreeModelListener(new MyTreeModelListener()); tree = new JTree(treeModel); tree.setEditable(false); tree.getSelectionModel().setSelectionMode( TreeSelectionModel.SINGLE_TREE_SELECTION); tree.addTreeExpansionListener(this); tree.addTreeWillExpandListener(this); tree.setShowsRootHandles(true); tree.putClientProperty("JTree.lineStyle", "Angled"); // tree rendered // TODO implement own renderer in order to tooltips tree.setCellRenderer(new TrashTreeCellRenderer(IconsRegistry .getImageIcon("trashFull.png"), IconsRegistry .getImageIcon("explorerNotebookIcon.png"))); setLayout(new BorderLayout()); // control panel JToolBar tp = new JToolBar(); tp.setLayout(new GridLayout(1, 6)); undoButton = new JButton("", IconsRegistry .getImageIcon("trashUndo.png")); undoButton.setEnabled(false); undoButton.setToolTipText("Restore Outline"); undoButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree .getLastSelectedPathComponent(); if (node == null) { return; } new RestoreNotebookJDialog( (String)treeNodeToResourceUriMap.get(node), "Restore Outline", "Restore", true); } }); tp.add(undoButton); deleteButton = new JButton("", IconsRegistry .getImageIcon("explorerDeleteSmall.png")); deleteButton.setToolTipText("Delete Outline"); deleteButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree .getLastSelectedPathComponent(); if (node == null) { return; } int result = JOptionPane.showConfirmDialog( MindRaider.mainJFrame, "Do you really want to DELETE this Outline?", "Delete Outline", JOptionPane.YES_NO_OPTION); if (result == JOptionPane.YES_OPTION) { MindRaider.labelCustodian .deleteOutline((String) treeNodeToResourceUriMap .get(node)); refresh(); ExplorerJPanel.getInstance().refresh(); } } }); tp.add(deleteButton); emptyButton = new JButton("", IconsRegistry .getImageIcon("trashEmpty.png")); emptyButton.setToolTipText(Messages.getString("TrashJPanel.emptyArchive")); emptyButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { int result = JOptionPane .showConfirmDialog( MindRaider.mainJFrame, "Do you really want to DELETE all discarded Outlines?", "Empty Trash", JOptionPane.YES_NO_OPTION); if (result == JOptionPane.YES_OPTION) { final SwingWorker worker = new SwingWorker() { public Object construct() { ProgressDialogJFrame progressDialogJFrame = new ProgressDialogJFrame( "Empty Trash", "<html><br>&nbsp;&nbsp;<b>Deleting:</b>&nbsp;&nbsp;</html>"); try { ResourceDescriptor[] resourceDescriptors = MindRaider.labelCustodian .getDiscardedOutlineDescriptors(); if (resourceDescriptors != null) { for (int i = 0; i < resourceDescriptors.length; i++) { MindRaider.labelCustodian .deleteOutline(resourceDescriptors[i] .getUri()); } refresh(); } } finally { if (progressDialogJFrame != null) { progressDialogJFrame.dispose(); } } return null; } }; worker.start(); } } }); tp.add(emptyButton); add(tp, BorderLayout.NORTH); // add the tree JScrollPane scrollPane = new JScrollPane(tree); add(scrollPane); // build the whole tree buildTree(); // click handler tree.addTreeSelectionListener(new TreeSelectionListener() { public void valueChanged(TreeSelectionEvent e) { DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree .getLastSelectedPathComponent(); if (node == null) { return; } logger.debug("Tree selection path: " + node.getPath()[node.getLevel()]); enableDisableToolbarButtons(node.getLevel()); } }); } /** * Build tree. This method is called on startup and tree refresh in order to * reload disc content. Adding/removing of particular nodes during the * program run is performed on individual nodes. */ void buildTree() { discardedNotebooksDescriptors = MindRaider.labelCustodian .getDiscardedOutlineDescriptors(); if (!ArrayUtils.isEmpty(discardedNotebooksDescriptors)) { for (int i = 0; i < discardedNotebooksDescriptors.length; i++) { addDiscardedNotebookNode(discardedNotebooksDescriptors[i] .getLabel(), discardedNotebooksDescriptors[i].getUri()); } // now expland all rows for (int i = 0; i < tree.getRowCount(); i++) { tree.expandRow(i); } } tree.setSelectionRow(0); enableDisableToolbarButtons(0); } /** * Add discarded notebook node. * * @param uri * notebook node. * @return the node. */ public DefaultMutableTreeNode addDiscardedNotebookNode(String label, String uri) { DefaultMutableTreeNode parent = null; Object child = label; DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child); // store node to map to be able to get URI from node object treeNodeToResourceUriMap.put(childNode, uri); if (parent == null) { parent = rootNode; } treeModel.insertNodeInto(childNode, parent, parent.getChildCount()); return childNode; } /** * Call this method in order to update the tree. */ public void refresh() { clear(); buildTree(); } /** * Move notebook up in the folder. * * @param notebookUri * @param folderUri */ protected boolean moveNotebookUp(String folderUri, String notebookUri) { logger.debug(" moveNotebookUp: " + folderUri + " " + notebookUri); if (folderUri != null && notebookUri != null) { try { // add notebook to folder boolean result = MindRaider.labelCustodian.moveNotebookUp( folderUri, notebookUri); // TODO PERFORMANCE move it just in the tree instead of refresh refresh(); return result; } catch (Exception e1) { logger.error("moveNotebookUp(String, String)", e1); JOptionPane.showMessageDialog(TrashJPanel.this, "Outline Manipulation Error", "Unable to move outline up: " + e1.getMessage(), JOptionPane.ERROR_MESSAGE); return false; } } logger.debug("Outline wont be added URIs are null!"); return false; } /** * Move notebook down in the folder. * * @param notebookUri * @param folderUri */ protected boolean moveNotebookDown(String folderUri, String notebookUri) { logger.debug(" moveNotebookDown: " + folderUri + " " + notebookUri); if (folderUri != null && notebookUri != null) { try { boolean result = MindRaider.labelCustodian.moveNotebookDown( folderUri, notebookUri); // TODO PERFORMANCE move it just in the tree instead of refresh refresh(); return result; } catch (Exception e1) { logger.error("moveNotebookDown(String, String)", e1); JOptionPane.showMessageDialog(TrashJPanel.this, "Outline Manipulation Error", "Unable to move outline down: " + e1.getMessage(), JOptionPane.ERROR_MESSAGE); return false; } } logger.debug("Outline wont be added URIs are null!"); return false; } /** * Add notebook node to folder node (on new notebook creation). * * @param notebookUri * newly created notebook URI. */ public void addNotebookToFolder(String notebookUri) { logger.debug(" URI of created notebook is: " + notebookUri); if (notebookUri != null) { // add notebook to selected folder TreePath treePath = tree.getSelectionPath(); String folderUri = (String) treeNodeToResourceUriMap.get(treePath .getLastPathComponent()); logger.debug("Enclosing folder URI is: " + folderUri); if (folderUri != null) { try { // add notebook to folder MindRaider.labelCustodian.addOutline(folderUri, notebookUri); // now add it in the tree OutlineResource notebookResource = MindRaider.outlineCustodian .getActiveOutlineResource(); addNotebookNode((DefaultMutableTreeNode) treePath .getLastPathComponent(), notebookResource.resource .getMetadata().getUri().toASCIIString(), notebookResource.getLabel()); } catch (Exception e1) { logger.error("addNotebookToFolder(String)", e1); JOptionPane.showMessageDialog(TrashJPanel.this, "Outline Creation Error", "Unable to add Outline to folder: " + e1.getMessage(), JOptionPane.ERROR_MESSAGE); return; } } } else { logger .debug("Outline wont be added to folder - it's URI is null!"); } } /** * Remove all nodes except the root node. */ public void clear() { rootNode.removeAllChildren(); treeModel.reload(); treeNodeToResourceUriMap.clear(); } /** * Remove the currently selected node. */ public void removeCurrentNode() { TreePath currentSelection = tree.getSelectionPath(); if (currentSelection != null) { DefaultMutableTreeNode currentNode = (DefaultMutableTreeNode) (currentSelection .getLastPathComponent()); MutableTreeNode parent = (MutableTreeNode) (currentNode.getParent()); if (parent != null) { treeModel.removeNodeFromParent(currentNode); return; } } // Either there was no selection, or the root was selected. toolkit.beep(); } /** * Add child to the currently selected node. */ public DefaultMutableTreeNode addObject(Object child) { DefaultMutableTreeNode parentNode = null; TreePath parentPath = tree.getSelectionPath(); if (parentPath == null) { parentNode = rootNode; } else { parentNode = (DefaultMutableTreeNode) (parentPath .getLastPathComponent()); } return addObject(parentNode, child, true); } public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent, Object child) { return addObject(parent, child, false); } /** * Add folder node. * * @param uri * folder URI. * @return the node. */ public DefaultMutableTreeNode addFolderNode(String uri) { DefaultMutableTreeNode parent = null; // get label from URI FolderResource resource = new FolderResource(MindRaider.labelCustodian .get(uri)); Object child = resource.getLabel(); DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child); // store node to map to be able to get URI from node object treeNodeToResourceUriMap.put(childNode, uri); if (parent == null) { parent = rootNode; } treeModel.insertNodeInto(childNode, parent, parent.getChildCount()); return childNode; } /** * Add notebook node. * * @param parent * folder node. * @param uri * notebook URI. * @param label * notebook label. * @return the node. */ public DefaultMutableTreeNode addNotebookNode( DefaultMutableTreeNode parent, String uri, String label) { Object child = label; DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child); // store node to map to be able to get URI from node object treeNodeToResourceUriMap.put(childNode, uri); if (parent == null) { parent = rootNode; } treeModel.insertNodeInto(childNode, parent, parent.getChildCount()); return childNode; } /** * Add an child object to a parent object. * * @param parent * the parent object. * @param child * the child object. * @param shouldBeVisible * if <code>true</code> the object should be visible. * @return Returns a <code>DefaultMutableTreeNode</code> */ public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent, Object child, boolean shouldBeVisible) { DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child); if (parent == null) { parent = rootNode; } treeModel.insertNodeInto(childNode, parent, parent.getChildCount()); // Make sure the user can see the lovely new node. if (shouldBeVisible) { tree.scrollPathToVisible(new TreePath(childNode.getPath())); } return childNode; } /** * Custom MyTreeModelListerer class. */ class MyTreeModelListener implements TreeModelListener { /** * Logger for this class. */ private final Logger logger = Logger .getLogger(MyTreeModelListener.class); /** * @see javax.swing.event.TreeModelListener#treeNodesChanged(javax.swing.event.TreeModelEvent) */ public void treeNodesChanged(TreeModelEvent e) { DefaultMutableTreeNode node; node = (DefaultMutableTreeNode) (e.getTreePath() .getLastPathComponent()); /* * If the event lists children, then the changed node is the child * of the node we've already gotten. Otherwise, the changed node and * the specified node are the same. */ // ToDo try { int index = e.getChildIndices()[0]; node = (DefaultMutableTreeNode) (node.getChildAt(index)); } catch (NullPointerException exc) { // } logger.debug("The user has finished editing the node."); logger.debug("New value: " + node.getUserObject()); } public void treeNodesInserted(TreeModelEvent e) { } public void treeNodesRemoved(TreeModelEvent e) { } public void treeStructureChanged(TreeModelEvent e) { } } public void treeCollapsed(TreeExpansionEvent e) { logger.debug("Tree colapsed event..." + e.getPath()); } /** * @see javax.swing.event.TreeExpansionListener#treeExpanded(javax.swing.event.TreeExpansionEvent) */ public void treeExpanded(TreeExpansionEvent e) { logger.debug("Tree expanded event..." + e.getPath()); } /** * @see javax.swing.event.TreeWillExpandListener#treeWillCollapse(javax.swing.event.TreeExpansionEvent) */ public void treeWillCollapse(TreeExpansionEvent e) throws ExpandVetoException { logger.debug("Tree will collapse " + e.getPath()); } /** * @see javax.swing.event.TreeWillExpandListener#treeWillExpand(javax.swing.event.TreeExpansionEvent) */ public void treeWillExpand(TreeExpansionEvent e) throws ExpandVetoException { logger.debug("Tree will expand " + e.getPath()); /* * DefaultMutableTreeNode node = (DefaultMutableTreeNode) * tree.getLastSelectedPathComponent(); if (node == null) { return; } * logger.debug(""+node.getPath()[node.getLevel()]); // buttons * disabling switch(node.getLevel()) { case LEVEL_FOLDERS: // disconnect * childrens from the node Enumeration enumeration=node.children(); // * delete nodes itself while (enumeration.hasMoreElements()) { Object * object=enumeration.nextElement(); * treeNodeToResourceUriMap.remove(object); * treeModel.removeNodeFromParent((MutableTreeNode)object); } // get * folder URI logger.debug("Expanding folder: * "+treeNodeToResourceUriMap.get(node)); FolderResource folder =new * FolderResource(MindRaider.folderCustodian.get((String)treeNodeToResourceUriMap.get(node))); * String[] notebookUris=folder.getNotebookUris(); if (notebookUris != * null) { for (int i= 0; i < notebookUris.length; i++) { * NotebookResource notebook=new * NotebookResource(MindRider.notebookCustodian.get(notebookUris[i])); * addNotebookNode(node,notebook.resource.metadata.uri.toASCIIString(),notebook.getLabel()); } } } */ } /** * @see com.emental.LabelCustodianListener.folder.FolderCustodianListener#folderCreated() */ public void labelCreated(FolderResource folder) { Metadata meta = folder.getResource().getMetadata(); logger.debug("Folder created: " + meta.getUri().toASCIIString()); // handle creation of the folder addFolderNode(meta.getUri().toASCIIString()); } /** * @see java.awt.event.FocusListener#focusGained(java.awt.event.FocusEvent) */ public void focusGained(FocusEvent arg0) { // TODO Auto-generated method stub } /** * Change status in the toolbar buttons. * * @param level * The level could be <code>LEVEL_ROOT</code> or * <code>LEVEL_FOLDERS</code> */ protected void enableDisableToolbarButtons(int level) { // buttons disabling switch (level) { case LEVEL_ROOT: undoButton.setEnabled(false); deleteButton.setEnabled(false); emptyButton.setEnabled(true); break; case LEVEL_FOLDERS: undoButton.setEnabled(true); deleteButton.setEnabled(true); emptyButton.setEnabled(true); break; } } private static final long serialVersionUID = 5028293540089775890L; }
dvorka/mindraider
mr7/src/main/java/com/mindcognition/mindraider/ui/swing/trash/TrashJPanel.java
Java
apache-2.0
24,481
package com.fpliu.newton.ui.list; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.GridView; /** * @author 792793182@qq.com 2017-06-30. */ public interface IGrid<T, V extends GridView> extends ICommon<T> { V getGridView(); void setItemAdapter(ItemAdapter<T> itemAdapter); ItemAdapter<T> getItemAdapter(); void setOnItemClickListener(AdapterView.OnItemClickListener listener); int getItemViewTypeCount(); int getItemViewType(int position); View getItemView(int position, View convertView, ViewGroup parent); void notifyDataSetChanged(); void setNumColumns(int numColumns); }
leleliu008/Android-List
library/src/main/java/com/fpliu/newton/ui/list/IGrid.java
Java
apache-2.0
688
/******************************************************************************* * Copyright (c) 2012-2013 University of Stuttgart. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and the Apache License 2.0 which both accompany this distribution, * and are available at http://www.eclipse.org/legal/epl-v10.html * and http://www.apache.org/licenses/LICENSE-2.0 * * Contributors: * Oliver Kopp - initial API and implementation *******************************************************************************/ /** * This package contains the REST resources * * Mostly, they produces Viewables, where a JSP and the current resource is * passed As the JSP itself handles plain Java objects and not Responses, the * resources have also methods returning POJOs. This might be ugly design, but * was quick to implement. * * The package structure is mirrored in src/main/webapp/jsp to ease finding the * JSPs belonging to a resource. * * The resources are <em>not</em> in line with the resource model of the TOSCA * container. Especially, we do not employ HATEOAS here. */ package org.eclipse.winery.repository.resources;
YannicSowoidnich/winery
org.eclipse.winery.repository/src/main/java/org/eclipse/winery/repository/resources/package-info.java
Java
apache-2.0
1,234
/** * Package: MAG - VistA Imaging WARNING: Per VHA Directive 2004-038, this routine should not be modified. Date Created: Jul 10, 2012 Site Name: Washington OI Field Office, Silver Spring, MD Developer: VHAISWWERFEJ Description: ;; +--------------------------------------------------------------------+ ;; Property of the US Government. ;; No permission to copy or redistribute this software is given. ;; Use of unreleased versions of this software requires the user ;; to execute a written test agreement with the VistA Imaging ;; Development Office of the Department of Veterans Affairs, ;; telephone (301) 734-0100. ;; ;; The Food and Drug Administration classifies this software as ;; a Class II medical device. As such, it may not be changed ;; in any way. Modifications to this software may result in an ;; adulterated medical device under 21CFR820, the use of which ;; is considered to be a violation of US Federal Statutes. ;; +--------------------------------------------------------------------+ */ package gov.va.med.imaging.pathology.rest.translator; import java.util.Date; import org.junit.Test; import static org.junit.Assert.*; /** * @author VHAISWWERFEJ * */ public class PathologyRestTranslatorTest { @Test public void testDateTranslation() { try { Date date = PathologyRestTranslator.translateDate("201207101435"); System.out.println("Date: " + date); } catch(Exception ex) { ex.printStackTrace(); fail(ex.getMessage()); } } }
VHAINNOVATIONS/Telepathology
Source/Java/PathologyWebApp/main/test/java/gov/va/med/imaging/pathology/rest/translator/PathologyRestTranslatorTest.java
Java
apache-2.0
1,621
package io.quarkus.grpc.examples.hello; import static io.restassured.RestAssured.get; import static org.assertj.core.api.Assertions.assertThat; import org.junit.jupiter.api.Test; import io.quarkus.test.junit.QuarkusTest; @QuarkusTest class HelloWorldMutualTlsEndpointTest { @Test public void testHelloWorldServiceUsingBlockingStub() { String response = get("/hello/blocking/neo").asString(); assertThat(response).isEqualTo("Hello neo"); } @Test public void testHelloWorldServiceUsingMutinyStub() { String response = get("/hello/mutiny/neo-mutiny").asString(); assertThat(response).isEqualTo("Hello neo-mutiny"); } }
quarkusio/quarkus
integration-tests/grpc-mutual-auth/src/test/java/io/quarkus/grpc/examples/hello/HelloWorldMutualTlsEndpointTest.java
Java
apache-2.0
679
/** * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * under the License. */ package org.apache.hadoop.hbase.filter; import static org.junit.Assert.assertEquals; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; /** */ @Category(MediumTests.class) public class TestFuzzyRowAndColumnRangeFilter { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final Log LOG = LogFactory.getLog(this.getClass()); /** * @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); } /** * @throws java.lang.Exception */ @AfterClass public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } /** * @throws java.lang.Exception */ @Before public void setUp() throws Exception { // Nothing to do. } /** * @throws java.lang.Exception */ @After public void tearDown() throws Exception { // Nothing to do. } @Test public void Test() throws Exception { String cf = "f"; String table = "TestFuzzyAndColumnRangeFilterClient"; HTable ht = TEST_UTIL.createTable(Bytes.toBytes(table), Bytes.toBytes(cf), Integer.MAX_VALUE); // 10 byte row key - (2 bytes 4 bytes 4 bytes) // 4 byte qualifier // 4 byte value for (int i1 = 0; i1 < 2; i1++) { for (int i2 = 0; i2 < 5; i2++) { byte[] rk = new byte[10]; ByteBuffer buf = ByteBuffer.wrap(rk); buf.clear(); buf.putShort((short) 2); buf.putInt(i1); buf.putInt(i2); for (int c = 0; c < 5; c++) { byte[] cq = new byte[4]; Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4); Put p = new Put(rk); p.setDurability(Durability.SKIP_WAL); p.add(cf.getBytes(), cq, Bytes.toBytes(c)); ht.put(p); LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: " + Bytes.toStringBinary(cq)); } } } TEST_UTIL.flush(); // test passes runTest(ht, 0, 10); // test fails runTest(ht, 1, 8); } private void runTest(HTable hTable, int cqStart, int expectedSize) throws IOException { // [0, 2, ?, ?, ?, ?, 0, 0, 0, 1] byte[] fuzzyKey = new byte[10]; ByteBuffer buf = ByteBuffer.wrap(fuzzyKey); buf.clear(); buf.putShort((short) 2); for (int i = 0; i < 4; i++) buf.put((byte)63); buf.putInt((short)1); byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0}; Pair<byte[], byte[]> pair = new Pair<byte[], byte[]>(fuzzyKey, mask); FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair)); ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true , Bytes.toBytes(4), true); //regular test runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter); //reverse filter order test runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter); } private void runScanner(HTable hTable, int expectedSize, Filter... filters) throws IOException { String cf = "f"; Scan scan = new Scan(); scan.addFamily(cf.getBytes()); FilterList filterList = new FilterList(filters); scan.setFilter(filterList); ResultScanner scanner = hTable.getScanner(scan); List<Cell> results = new ArrayList<Cell>(); Result result; long timeBeforeScan = System.currentTimeMillis(); while ((result = scanner.next()) != null) { for (Cell kv : result.listCells()) { LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: " + Bytes.toStringBinary(CellUtil.cloneQualifier(kv))); results.add(kv); } } long scanTime = System.currentTimeMillis() - timeBeforeScan; scanner.close(); LOG.info("scan time = " + scanTime + "ms"); LOG.info("found " + results.size() + " results"); assertEquals(expectedSize, results.size()); } }
intel-hadoop/hbase-rhino
hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java
Java
apache-2.0
5,447
package pl.matisoft.soy.config; import com.google.template.soy.jssrc.SoyJsSrcOptions; import com.google.template.soy.tofu.SoyTofuOptions; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.web.context.support.ServletContextResource; import org.springframework.web.servlet.ViewResolver; import pl.matisoft.soy.ContentNegotiator; import pl.matisoft.soy.DefaultContentNegotiator; import pl.matisoft.soy.SoyTemplateViewResolver; import pl.matisoft.soy.bundle.DefaultSoyMsgBundleResolver; import pl.matisoft.soy.bundle.SoyMsgBundleResolver; import pl.matisoft.soy.compile.DefaultTofuCompiler; import pl.matisoft.soy.compile.TofuCompiler; import pl.matisoft.soy.data.DefaultToSoyDataConverter; import pl.matisoft.soy.data.ToSoyDataConverter; import pl.matisoft.soy.data.adjust.ModelAdjuster; import pl.matisoft.soy.data.adjust.SpringModelAdjuster; import pl.matisoft.soy.global.compile.CompileTimeGlobalModelResolver; import pl.matisoft.soy.global.compile.EmptyCompileTimeGlobalModelResolver; import pl.matisoft.soy.global.runtime.EmptyGlobalRuntimeModelResolver; import pl.matisoft.soy.global.runtime.GlobalRuntimeModelResolver; import pl.matisoft.soy.holder.CompiledTemplatesHolder; import pl.matisoft.soy.holder.DefaultCompiledTemplatesHolder; import pl.matisoft.soy.locale.LocaleProvider; import pl.matisoft.soy.locale.SpringLocaleProvider; import pl.matisoft.soy.render.DefaultTemplateRenderer; import pl.matisoft.soy.render.TemplateRenderer; import pl.matisoft.soy.template.DefaultTemplateFilesResolver; import pl.matisoft.soy.template.TemplateFilesResolver; import javax.inject.Inject; import javax.servlet.ServletContext; /** * Created with IntelliJ IDEA. * User: mati * Date: 12/11/2013 * Time: 19:55 */ @Configuration public class SpringSoyViewBaseConfig { @Value("${soy.hot.reload.mode:false}") private boolean hotReloadMode; @Value("${soy.templates.resolve.recursively:true}") private boolean recursive; @Value("${soy.templates.file.extension:soy}") private String fileExtension; @Value("${soy.templates.directory:/WEB-INF/templates}") private String templatesPath; @Value("${soy.i18n.xliff.path:xliffs/messages}") private String messagesPath; @Value("${soy.encoding:utf-8}") private String encoding; @Value("${soy.i18n.fallback.to.english:true}") private boolean fallbackToEnglish; @Value("${soy.preCompile.templates:false}") private boolean preCompileTemplates; @Value("${soy.indexView:index}") private String indexView; @Value("${soy.logical.prefix:soy:}") private String logicalPrefix; @Value("${soy.resolver.order:2147483647}") private int order; @Inject private ServletContext servletContext; @Bean public LocaleProvider soyLocaleProvider() { return new SpringLocaleProvider(); } @Bean public DefaultTemplateFilesResolver soyTemplateFilesResolver() throws Exception { final DefaultTemplateFilesResolver defaultTemplateFilesResolver = new DefaultTemplateFilesResolver(); defaultTemplateFilesResolver.setHotReloadMode(hotReloadMode); defaultTemplateFilesResolver.setRecursive(recursive); defaultTemplateFilesResolver.setFilesExtension(fileExtension); defaultTemplateFilesResolver.setTemplatesLocation(new ServletContextResource(servletContext, templatesPath)); return defaultTemplateFilesResolver; } @Bean public CompileTimeGlobalModelResolver soyCompileTimeGlobalModelResolver() { return new EmptyCompileTimeGlobalModelResolver(); } @Bean public ToSoyDataConverter soyToSoyDataConverter() { return new DefaultToSoyDataConverter(); } @Bean public SoyJsSrcOptions soyJsSourceOptions() { return new SoyJsSrcOptions(); } @Bean public SoyTofuOptions soyTofuOptions() { final SoyTofuOptions soyTofuOptions = new SoyTofuOptions(); soyTofuOptions.setUseCaching(!hotReloadMode); return soyTofuOptions; } @Bean public TofuCompiler soyTofuCompiler(final CompileTimeGlobalModelResolver compileTimeGlobalModelResolver, final SoyJsSrcOptions soyJsSrcOptions, final SoyTofuOptions soyTofuOptions) { final DefaultTofuCompiler defaultTofuCompiler = new DefaultTofuCompiler(); defaultTofuCompiler.setHotReloadMode(hotReloadMode); defaultTofuCompiler.setCompileTimeGlobalModelResolver(compileTimeGlobalModelResolver); defaultTofuCompiler.setSoyJsSrcOptions(soyJsSrcOptions); defaultTofuCompiler.setSoyTofuOptions(soyTofuOptions); return defaultTofuCompiler; } @Bean public SoyMsgBundleResolver soyMsgBundleResolver() { final DefaultSoyMsgBundleResolver defaultSoyMsgBundleResolver = new DefaultSoyMsgBundleResolver(); defaultSoyMsgBundleResolver.setHotReloadMode(hotReloadMode); defaultSoyMsgBundleResolver.setMessagesPath(messagesPath); defaultSoyMsgBundleResolver.setFallbackToEnglish(fallbackToEnglish); return defaultSoyMsgBundleResolver; } @Bean public CompiledTemplatesHolder soyTemplatesHolder(final TemplateFilesResolver templateFilesResolver, final TofuCompiler tofuCompiler) throws Exception { final DefaultCompiledTemplatesHolder defaultCompiledTemplatesHolder = new DefaultCompiledTemplatesHolder(); defaultCompiledTemplatesHolder.setHotReloadMode(hotReloadMode); defaultCompiledTemplatesHolder.setPreCompileTemplates(preCompileTemplates); defaultCompiledTemplatesHolder.setTemplatesFileResolver(templateFilesResolver); defaultCompiledTemplatesHolder.setTofuCompiler(tofuCompiler); return defaultCompiledTemplatesHolder; } @Bean public TemplateRenderer soyTemplateRenderer(final ToSoyDataConverter toSoyDataConverter) { final DefaultTemplateRenderer defaultTemplateRenderer = new DefaultTemplateRenderer(); defaultTemplateRenderer.setHotReloadMode(hotReloadMode); defaultTemplateRenderer.setToSoyDataConverter(toSoyDataConverter); return defaultTemplateRenderer; } @Bean public ModelAdjuster soySpringModelAdjuster() { return new SpringModelAdjuster(); } @Bean public GlobalRuntimeModelResolver soyGlobalRuntimeModelResolver() { return new EmptyGlobalRuntimeModelResolver(); } @Bean public ContentNegotiator contentNegotiator() { return new DefaultContentNegotiator(); } @Bean public ViewResolver soyViewResolver(final CompiledTemplatesHolder compiledTemplatesHolder, final ModelAdjuster modelAdjuster, final TemplateRenderer templateRenderer, final LocaleProvider localeProvider, final GlobalRuntimeModelResolver globalRuntimeModelResolver, final ContentNegotiator contentNegotiator, final SoyMsgBundleResolver msgBundleResolver) throws Exception { final SoyTemplateViewResolver soyTemplateViewResolver = new SoyTemplateViewResolver(); soyTemplateViewResolver.setSoyMsgBundleResolver(msgBundleResolver); soyTemplateViewResolver.setCompiledTemplatesHolder(compiledTemplatesHolder); soyTemplateViewResolver.setEncoding(encoding); soyTemplateViewResolver.setGlobalRuntimeModelResolver(globalRuntimeModelResolver); soyTemplateViewResolver.setHotReloadMode(hotReloadMode); soyTemplateViewResolver.setIndexView(indexView); soyTemplateViewResolver.setLocaleProvider(localeProvider); soyTemplateViewResolver.setModelAdjuster(modelAdjuster); soyTemplateViewResolver.setTemplateRenderer(templateRenderer); soyTemplateViewResolver.setPrefix(logicalPrefix); soyTemplateViewResolver.setOrder(order); soyTemplateViewResolver.setRedirectContextRelative(true); soyTemplateViewResolver.setRedirectHttp10Compatible(true); soyTemplateViewResolver.setContentNegotiator(contentNegotiator); return soyTemplateViewResolver; } }
matiwinnetou/spring-soy-view
spring-soy-view/src/main/java/pl/matisoft/soy/config/SpringSoyViewBaseConfig.java
Java
apache-2.0
8,365
package userstoreauth.servlets; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import userstoreauth.model.UserVer2; import userstoreauth.service.UserStoreMb; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.sql.Timestamp; import java.time.LocalDateTime; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; class EditUserTest { @BeforeEach void setUp() { UserStoreMb us = new UserStoreMb(); us.deleteAll(); } @Test void editUser() throws ServletException, IOException { EditUser editUser = new EditUser(); UserStoreMb us = new UserStoreMb(); HttpServletRequest request = mock(HttpServletRequest.class); HttpServletResponse response = mock(HttpServletResponse.class); when(request.getParameter("login")).thenReturn("login"); when(request.getParameter("password")).thenReturn("password0"); when(request.getParameter("name")).thenReturn("name0"); when(request.getParameter("email")).thenReturn("email0"); when(request.getParameter("role")).thenReturn("admin"); when(request.getParameter("country")).thenReturn("Россия"); when(request.getParameter("city")).thenReturn("Москва"); UserVer2 user = new UserVer2("login", "password", "name", "email", "Россия", "Москва", Timestamp.valueOf(LocalDateTime.now()), "user"); us.addUser(user); assertEquals(user, us.getByLogin("login")); editUser.doPost(request, response); user.setPassword("password0"); user.setName("name0"); user.setEmail("email0"); user.setRole("admin"); assertEquals(user, us.getByLogin("login")); } }
HeTyDeHer/ZapovA
chapter_009/src/test/java/userstoreauth/servlets/EditUserTest.java
Java
apache-2.0
1,929
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.execution.testframework.sm.runner; import com.intellij.execution.testframework.sm.SMTestRunnerConnectionUtil; import com.intellij.execution.testframework.sm.runner.events.*; import com.intellij.openapi.application.Application; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Key; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.annotations.TestOnly; import java.util.*; /** * This class fires events to SMTRunnerEventsListener in event dispatch thread. * * @author: Roman Chernyatchik */ public class GeneralToSMTRunnerEventsConvertor extends GeneralTestEventsProcessor { private final Map<String, SMTestProxy> myRunningTestsFullNameToProxy = ContainerUtil.newConcurrentMap(); private final TestSuiteStack mySuitesStack; private final Map<String, List<SMTestProxy>> myCurrentChildren = new HashMap<>(); private boolean myIsTestingFinished; public GeneralToSMTRunnerEventsConvertor(Project project, @NotNull SMTestProxy.SMRootTestProxy testsRootNode, @NotNull String testFrameworkName) { super(project, testFrameworkName, testsRootNode); mySuitesStack = new TestSuiteStack(testFrameworkName); } @Override protected SMTestProxy createProxy(String testName, String locationHint, String metaInfo, String id, String parentNodeId) { SMTestProxy proxy = super.createProxy(testName, locationHint, metaInfo, id, parentNodeId); SMTestProxy currentSuite = getCurrentSuite(); currentSuite.addChild(proxy); return proxy; } @Override protected SMTestProxy createSuite(String suiteName, String locationHint, String metaInfo, String id, String parentNodeId) { SMTestProxy newSuite = super.createSuite(suiteName, locationHint, metaInfo, id, parentNodeId); final SMTestProxy parentSuite = getCurrentSuite(); parentSuite.addChild(newSuite); mySuitesStack.pushSuite(newSuite); return newSuite; } @Override public void onSuiteTreeEnded(String suiteName) { myBuildTreeRunnables.add(() -> mySuitesStack.popSuite(suiteName)); super.onSuiteTreeEnded(suiteName); } @Override public void onStartTesting() { //fire mySuitesStack.pushSuite(myTestsRootProxy); myTestsRootProxy.setStarted(); //fire fireOnTestingStarted(myTestsRootProxy); } @Override public void onTestsReporterAttached() { fireOnTestsReporterAttached(myTestsRootProxy); } @Override public void onFinishTesting() { fireOnBeforeTestingFinished(myTestsRootProxy); // has been already invoked! // We don't know whether process was destroyed by user // or it finished after all tests have been run // Lets assume, if at finish all suites except root suite are passed // then all is ok otherwise process was terminated by user if (myIsTestingFinished) { // has been already invoked! return; } myIsTestingFinished = true; // We don't know whether process was destroyed by user // or it finished after all tests have been run // Lets assume, if at finish all suites except root suite are passed // then all is ok otherwise process was terminated by user if (!isTreeComplete(myRunningTestsFullNameToProxy.keySet(), myTestsRootProxy)) { myTestsRootProxy.setTerminated(); myRunningTestsFullNameToProxy.clear(); } mySuitesStack.clear(); myTestsRootProxy.setFinished(); myCurrentChildren.clear(); //fire events fireOnTestingFinished(myTestsRootProxy); super.onFinishTesting(); } @Override public void setPrinterProvider(@NotNull TestProxyPrinterProvider printerProvider) { } @Override public void onTestStarted(@NotNull final TestStartedEvent testStartedEvent) { //Duplicated event // creates test // adds to running tests map //Progress started //fire events final String testName = testStartedEvent.getName(); final String locationUrl = testStartedEvent.getLocationUrl(); final boolean isConfig = testStartedEvent.isConfig(); final String fullName = getFullTestName(testName); if (myRunningTestsFullNameToProxy.containsKey(fullName)) { //Duplicated event logProblem("Test [" + fullName + "] has been already started"); if (SMTestRunnerConnectionUtil.isInDebugMode()) { return; } } SMTestProxy parentSuite = getCurrentSuite(); SMTestProxy testProxy = findChild(parentSuite, locationUrl != null ? locationUrl : fullName, false); if (testProxy == null) { // creates test testProxy = new SMTestProxy(testName, false, locationUrl, testStartedEvent.getMetainfo(), false); testProxy.setConfig(isConfig); if (myTreeBuildBeforeStart) testProxy.setTreeBuildBeforeStart(); if (myLocator != null) { testProxy.setLocator(myLocator); } parentSuite.addChild(testProxy); } // adds to running tests map myRunningTestsFullNameToProxy.put(fullName, testProxy); //Progress started testProxy.setStarted(); //fire events fireOnTestStarted(testProxy); } @Override public void onSuiteStarted(@NotNull final TestSuiteStartedEvent suiteStartedEvent) { //new suite //Progress started //fire event final String suiteName = suiteStartedEvent.getName(); final String locationUrl = suiteStartedEvent.getLocationUrl(); SMTestProxy parentSuite = getCurrentSuite(); SMTestProxy newSuite = findChild(parentSuite, locationUrl != null ? locationUrl : suiteName, true); if (newSuite == null) { //new suite newSuite = new SMTestProxy(suiteName, true, locationUrl, suiteStartedEvent.getMetainfo(), parentSuite.isPreservePresentableName()); if (myTreeBuildBeforeStart) { newSuite.setTreeBuildBeforeStart(); } if (myLocator != null) { newSuite.setLocator(myLocator); } parentSuite.addChild(newSuite); } initCurrentChildren(newSuite, true); mySuitesStack.pushSuite(newSuite); //Progress started newSuite.setSuiteStarted(); //fire event fireOnSuiteStarted(newSuite); } private void initCurrentChildren(SMTestProxy newSuite, boolean preferSuite) { if (myTreeBuildBeforeStart) { for (SMTestProxy proxy : newSuite.getChildren()) { if (!proxy.isFinal() || preferSuite && proxy.isSuite()) { String url = proxy.getLocationUrl(); if (url != null) { myCurrentChildren.computeIfAbsent(url, l -> new ArrayList<>()).add(proxy); } myCurrentChildren.computeIfAbsent(proxy.getName(), l -> new ArrayList<>()).add(proxy); } } } } private SMTestProxy findChild(SMTestProxy parentSuite, String fullName, boolean preferSuite) { if (myTreeBuildBeforeStart) { Set<SMTestProxy> acceptedProxies = new LinkedHashSet<>(); Collection<? extends SMTestProxy> children = myCurrentChildren.get(fullName); if (children == null) { initCurrentChildren(parentSuite, preferSuite); children = myCurrentChildren.get(fullName); } if (children != null) { //null if child started second time for (SMTestProxy proxy : children) { if (!proxy.isFinal() || preferSuite && proxy.isSuite()) { acceptedProxies.add(proxy); } } if (!acceptedProxies.isEmpty()) { return acceptedProxies.stream() .filter(proxy -> proxy.isSuite() == preferSuite && proxy.getParent() == parentSuite) .findFirst() .orElse(acceptedProxies.iterator().next()); } } } return null; } @Override public void onTestFinished(@NotNull final TestFinishedEvent testFinishedEvent) { final String testName = testFinishedEvent.getName(); final Long duration = testFinishedEvent.getDuration(); final String fullTestName = getFullTestName(testName); final SMTestProxy testProxy = getProxyByFullTestName(fullTestName); if (testProxy == null) { logProblem("Test wasn't started! TestFinished event: name = {" + testName + "}. " + cannotFindFullTestNameMsg(fullTestName)); return; } testProxy.setDuration(duration != null ? duration : 0); testProxy.setFrameworkOutputFile(testFinishedEvent.getOutputFile()); testProxy.setFinished(); myRunningTestsFullNameToProxy.remove(fullTestName); clearCurrentChildren(fullTestName, testProxy); //fire events fireOnTestFinished(testProxy); } private void clearCurrentChildren(String fullTestName, SMTestProxy testProxy) { myCurrentChildren.remove(fullTestName); String url = testProxy.getLocationUrl(); if (url != null) { myCurrentChildren.remove(url); } } @Override public void onSuiteFinished(@NotNull final TestSuiteFinishedEvent suiteFinishedEvent) { //fire events final String suiteName = suiteFinishedEvent.getName(); final SMTestProxy mySuite = mySuitesStack.popSuite(suiteName); if (mySuite != null) { mySuite.setFinished(); myCurrentChildren.remove(suiteName); String locationUrl = mySuite.getLocationUrl(); if (locationUrl != null) { myCurrentChildren.remove(locationUrl); } //fire events fireOnSuiteFinished(mySuite); } } @Override public void onUncapturedOutput(@NotNull final String text, final Key outputType) { final SMTestProxy currentProxy = findCurrentTestOrSuite(); currentProxy.addOutput(text, outputType); } @Override public void onError(@NotNull final String localizedMessage, @Nullable final String stackTrace, final boolean isCritical) { final SMTestProxy currentProxy = findCurrentTestOrSuite(); currentProxy.addError(localizedMessage, stackTrace, isCritical); } @Override public void onTestFailure(@NotNull final TestFailedEvent testFailedEvent) { // if hasn't been already reported // 1. report // 2. add failure // fire event final String testName = testFailedEvent.getName(); if (testName == null) { logProblem("No test name specified in " + testFailedEvent); return; } final String localizedMessage = testFailedEvent.getLocalizedFailureMessage(); final String stackTrace = testFailedEvent.getStacktrace(); final boolean isTestError = testFailedEvent.isTestError(); final String comparisionFailureActualText = testFailedEvent.getComparisonFailureActualText(); final String comparisionFailureExpectedText = testFailedEvent.getComparisonFailureExpectedText(); final boolean inDebugMode = SMTestRunnerConnectionUtil.isInDebugMode(); final String fullTestName = getFullTestName(testName); SMTestProxy testProxy = getProxyByFullTestName(fullTestName); if (testProxy == null) { logProblem("Test wasn't started! TestFailure event: name = {" + testName + "}" + ", message = {" + localizedMessage + "}" + ", stackTrace = {" + stackTrace + "}. " + cannotFindFullTestNameMsg(fullTestName)); if (inDebugMode) { return; } else { // if hasn't been already reported // 1. report onTestStarted(new TestStartedEvent(testName, null)); // 2. add failure testProxy = getProxyByFullTestName(fullTestName); } } if (testProxy == null) { return; } if (comparisionFailureActualText != null && comparisionFailureExpectedText != null) { testProxy.setTestComparisonFailed(localizedMessage, stackTrace, comparisionFailureActualText, comparisionFailureExpectedText, testFailedEvent); } else if (comparisionFailureActualText == null && comparisionFailureExpectedText == null) { testProxy.setTestFailed(localizedMessage, stackTrace, isTestError); } else { testProxy.setTestFailed(localizedMessage, stackTrace, isTestError); logProblem("Comparison failure actual and expected texts should be both null or not null.\n" + "Expected:\n" + comparisionFailureExpectedText + "\n" + "Actual:\n" + comparisionFailureActualText); } // fire event fireOnTestFailed(testProxy); } @Override public void onTestIgnored(@NotNull final TestIgnoredEvent testIgnoredEvent) { // try to fix // 1. report test opened // 2. report failure // fire event final String testName = testIgnoredEvent.getName(); if (testName == null) { logProblem("TestIgnored event: no name"); } String ignoreComment = testIgnoredEvent.getIgnoreComment(); final String stackTrace = testIgnoredEvent.getStacktrace(); final String fullTestName = getFullTestName(testName); SMTestProxy testProxy = getProxyByFullTestName(fullTestName); if (testProxy == null) { final boolean debugMode = SMTestRunnerConnectionUtil.isInDebugMode(); logProblem("Test wasn't started! " + "TestIgnored event: name = {" + testName + "}, " + "message = {" + ignoreComment + "}. " + cannotFindFullTestNameMsg(fullTestName)); if (debugMode) { return; } else { // try to fix // 1. report test opened onTestStarted(new TestStartedEvent(testName, null)); // 2. report failure testProxy = getProxyByFullTestName(fullTestName); } } if (testProxy == null) { return; } testProxy.setTestIgnored(ignoreComment, stackTrace); // fire event fireOnTestIgnored(testProxy); } @Override public void onTestOutput(@NotNull final TestOutputEvent testOutputEvent) { final String testName = testOutputEvent.getName(); final String text = testOutputEvent.getText(); final Key outputType = testOutputEvent.getOutputType(); final String fullTestName = getFullTestName(testName); final SMTestProxy testProxy = getProxyByFullTestName(fullTestName); if (testProxy == null) { logProblem("Test wasn't started! TestOutput event: name = {" + testName + "}, " + "outputType = " + outputType + ", " + "text = {" + text + "}. " + cannotFindFullTestNameMsg(fullTestName)); return; } testProxy.addOutput(text, outputType); } @Override public void onTestsCountInSuite(final int count) { fireOnTestsCountInSuite(count); } @NotNull protected final SMTestProxy getCurrentSuite() { final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite(); if (currentSuite != null) { return currentSuite; } // current suite shouldn't be null otherwise test runner isn't correct // or may be we are in debug mode logProblem("Current suite is undefined. Root suite will be used."); return myTestsRootProxy; } protected String getFullTestName(final String testName) { // Test name should be unique return testName; } protected int getRunningTestsQuantity() { return myRunningTestsFullNameToProxy.size(); } @Nullable protected SMTestProxy getProxyByFullTestName(final String fullTestName) { return myRunningTestsFullNameToProxy.get(fullTestName); } @TestOnly protected void clearInternalSuitesStack() { mySuitesStack.clear(); } private String cannotFindFullTestNameMsg(String fullTestName) { return "Cant find running test for [" + fullTestName + "]. Current running tests: {" + dumpRunningTestsNames() + "}"; } private StringBuilder dumpRunningTestsNames() { final Set<String> names = myRunningTestsFullNameToProxy.keySet(); final StringBuilder namesDump = new StringBuilder(); for (String name : names) { namesDump.append('[').append(name).append(']').append(','); } return namesDump; } /* * Remove listeners, etc */ @Override public void dispose() { super.dispose(); if (!myRunningTestsFullNameToProxy.isEmpty()) { final Application application = ApplicationManager.getApplication(); if (!application.isHeadlessEnvironment() && !application.isUnitTestMode()) { logProblem("Not all events were processed! " + dumpRunningTestsNames()); } } myRunningTestsFullNameToProxy.clear(); mySuitesStack.clear(); } private SMTestProxy findCurrentTestOrSuite() { //if we can locate test - we will send output to it, otherwise to current test suite SMTestProxy currentProxy = null; Iterator<SMTestProxy> iterator = myRunningTestsFullNameToProxy.values().iterator(); if (iterator.hasNext()) { //current test currentProxy = iterator.next(); if (iterator.hasNext()) { //if there are multiple tests running call put output to the suite currentProxy = null; } } if (currentProxy == null) { //current suite // // ProcessHandler can fire output available event before processStarted event final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite(); currentProxy = currentSuite != null ? currentSuite : myTestsRootProxy; } return currentProxy; } }
msebire/intellij-community
platform/smRunner/src/com/intellij/execution/testframework/sm/runner/GeneralToSMTRunnerEventsConvertor.java
Java
apache-2.0
17,626
package br.copacabana; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.logging.Level; import javax.cache.Cache; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.springframework.web.servlet.ModelAndView; import br.com.copacabana.cb.entities.Address; import br.com.copacabana.cb.entities.Client; import br.com.copacabana.cb.entities.MealOrder; import br.com.copacabana.cb.entities.OrderedPlate; import br.com.copacabana.cb.entities.Plate; import br.com.copacabana.cb.entities.Restaurant; import br.com.copacabana.cb.entities.TurnType; import br.com.copacabana.cb.entities.WorkingHours.DayOfWeek; import br.copacabana.order.paypal.PayPalProperties.PayPalConfKeys; import br.copacabana.spring.AddressManager; import br.copacabana.spring.ClientManager; import br.copacabana.spring.ConfigurationManager; import br.copacabana.spring.PlateManager; import br.copacabana.spring.RestaurantManager; import br.copacabana.usecase.control.UserActionManager; import br.copacabana.util.TimeController; import com.google.appengine.api.datastore.Key; import com.google.appengine.api.datastore.KeyFactory; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonArray; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.JsonPrimitive; /** * @author Rafael Coutinho */ public class PlaceOrderController extends JsonViewController { private String formView; private String successView; @Override protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { Map<String, Object> model = new HashMap<String, Object>(); model.put("mode", "view"); try { Cache cache = CacheController.getCache(); if (cache.get(PayPalConfKeys.pppFixedRate.name()) == null) { ConfigurationManager cm = new ConfigurationManager(); cache.put(PayPalConfKeys.pppFixedRate.name(), cm.getConfigurationValue(PayPalConfKeys.pppFixedRate.name())); cache.put(PayPalConfKeys.pppPercentageValue.name(), cm.getConfigurationValue(PayPalConfKeys.pppPercentageValue.name())); } if (!Authentication.isUserLoggedIn(request.getSession())) { String orderData = request.getParameter("orderData"); request.getSession().setAttribute("orderData", orderData); model.put("forwardUrl", "/continueOrder.jsp"); UserActionManager.startOrderNotLogged(orderData, request.getSession().getId()); return new ModelAndView(getFormView(), model); } else { String orderData = ""; JsonObject user = Authentication.getLoggedUser(request.getSession()); String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString(); if (request.getParameter("orderData") == null) { orderData = (String) request.getSession().getAttribute("orderData"); } else { orderData = request.getParameter("orderData"); } log.log(Level.INFO, "OrderJSon: {0}", orderData); JsonParser pa = new JsonParser(); JsonObject orderDataJson = (JsonObject) pa.parse(orderData); ClientManager cman = new ClientManager(); Client c = cman.find(KeyFactory.stringToKey(loggedUserId), Client.class); MealOrder mo = getMealOrder(c, orderDataJson); request.getSession().setAttribute("clientPhone", ""); DateSerializer dateSerializer = new DateSerializer(request); DateDeSerializer dateDeSerializer = new DateDeSerializer(request); GsonBuilder gsonBuilder = GsonBuilderFactory.getInstance();// new // GsonBuilder().setPrettyPrinting().serializeNulls().excludeFieldsWithoutExposeAnnotation(); gsonBuilder.registerTypeAdapter(Date.class, dateSerializer); gsonBuilder.registerTypeAdapter(Date.class, dateDeSerializer); gsonBuilder.registerTypeAdapter(Key.class, new KeyDeSerializer()); gsonBuilder.registerTypeAdapter(Key.class, new KeySerializer()); Gson gson = gsonBuilder.create(); model.putAll(updateModelData(mo, c, gson)); String json = gson.toJson(mo); // Or use new json = GsonBuilderFactory.escapeString(json); request.getSession().setAttribute("orderData", json); UserActionManager.startOrder(json, loggedUserId, request.getSession().getId()); return new ModelAndView(getSuccessView(), model); } } catch (Exception e) { log.log(Level.SEVERE, "Failed to place order."); try { String orderData = ""; log.log(Level.SEVERE, "Checking logged user."); JsonObject user = Authentication.getLoggedUser(request.getSession()); if (user == null) { log.log(Level.SEVERE, "user is not logged in."); } String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString(); log.log(Level.SEVERE, "logged user id {0}", loggedUserId); if (request.getParameter("orderData") == null) { log.log(Level.SEVERE, "Order is not in request, checking session"); orderData = (String) request.getSession().getAttribute("orderData"); } else { log.log(Level.SEVERE, "Order is in request"); orderData = request.getParameter("orderData"); } if (orderData == null) { log.log(Level.SEVERE, "Order was null!"); } log.log(Level.SEVERE, "Order is order :" + orderData); log.log(Level.SEVERE, "Exception was {0}.", e); log.log(Level.SEVERE, "Error was {0}.", e.getMessage()); UserActionManager.registerMajorError(request, e, loggedUserId, request.getSession().getId(), "placing order"); } catch (Exception ex) { log.log(Level.SEVERE, "Failed during loggin of error was {0}.", e); UserActionManager.registerMajorError(request, e, "placing order 2"); } throw e; } } public static Map<String, Object> updateModelData(MealOrder mo, Client c, Gson gson) { Map<String, Object> model = new HashMap<String, Object>(); RestaurantManager rman = new RestaurantManager(); Restaurant r = rman.getRestaurant(mo.getRestaurant()); Boolean b = r.getOnlyForRetrieval(); if (b != null && true == b) { model.put("onlyForRetrieval", Boolean.TRUE); } else { model.put("onlyForRetrieval", Boolean.FALSE); } model.put("restaurantAddressKey", KeyFactory.keyToString(r.getAddress())); model.put("clientCpf", c.getCpf()); model.put("level", c.getLevel().ordinal()); JsonObject json = new JsonObject(); ConfigurationManager cm = new ConfigurationManager(); String hasSpecificLogic = cm.getConfigurationValue("hasSpecificLogic"); model.put("noTakeAwayOrders", "false"); if (hasSpecificLogic != null && hasSpecificLogic.endsWith("true")) { json = getSteakHouseSpecificData(mo, c, gson); getMakisSpecificLogic(mo, c, gson, json); getPapagaiosSpecificLogic(mo, c, gson, json); getPizzadoroSpecificLogic(mo,c,gson,json); if (noTakeAwayOrders(mo) == true) { model.put("noTakeAwayOrders", "true"); } } model.put("hasSpecificLogic", json.toString()); if (json.get("javascript") != null && json.get("javascript").getAsString().length() > 0) { model.put("hasSpecificLogicJavascript", json.get("javascript").getAsString()); } Address restAddress = new AddressManager().getAddress(r.getAddress()); model.put("restaurantAddress", gson.toJson(restAddress)); return model; } private static boolean noTakeAwayOrders(MealOrder mo) { ConfigurationManager cm = new ConfigurationManager(); String ids = cm.getConfigurationValue("no.takeaway.ids"); String restId = KeyFactory.keyToString(mo.getRestaurant()); if (ids.contains(restId)) { return true; } return false; } private static void getPapagaiosSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) { ConfigurationManager cm = new ConfigurationManager(); String idStr = cm.getConfigurationValue("papagaios.id"); if (idStr != null && idStr.length() > 0) { Key k = KeyFactory.stringToKey(idStr); if (k.equals(mo.getRestaurant())) { json.add("javascript", new JsonPrimitive("/scripts/custom/papagaios.js")); } } } private static void getPizzadoroSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) { ConfigurationManager cm = new ConfigurationManager(); String idStr = cm.getConfigurationValue("pizzadoro.id"); if (idStr != null && idStr.length() > 0) { Key k = KeyFactory.stringToKey(idStr); if (k.equals(mo.getRestaurant())) { json.add("javascript", new JsonPrimitive("/scripts/custom/pizzadoro.js")); } } } private static void getMakisSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) { try { ConfigurationManager cm = new ConfigurationManager(); PlateManager pm = new PlateManager(); String makisIdStr = cm.getConfigurationValue("makis.Id"); if (makisIdStr != null && makisIdStr.length() > 0) { Key makis = KeyFactory.stringToKey(makisIdStr); if (makis != null && makis.equals(mo.getRestaurant())) { String packageId = cm.getConfigurationValue("makis.package.id"); if (packageId != null && packageId.length() > 0) { json.add("makisPackageCostId", new JsonPrimitive(packageId)); json.add("makisMsg", new JsonPrimitive(cm.getConfigurationValue("makis.msg"))); boolean isIncluded = false; Key packageKey = KeyFactory.stringToKey(packageId); for (Iterator<OrderedPlate> iterator = mo.getPlates().iterator(); iterator.hasNext();) { OrderedPlate plate = (OrderedPlate) iterator.next(); if (Boolean.FALSE.equals(plate.getIsFraction()) && plate.getPlate().equals(packageKey)) { isIncluded = true; break; } } if (isIncluded == false) { Plate packagePlate = pm.get(packageKey); OrderedPlate oplate = new OrderedPlate(); oplate.setName(packagePlate.getName()); oplate.setPrice(packagePlate.getPrice()); oplate.setPriceInCents(packagePlate.getPriceInCents()); oplate.setQty(1); oplate.setPlate(packageKey); mo.getPlates().add(oplate); } } } } } catch (Exception e) { log.log(Level.SEVERE, "failed to add makis specific logic", e); } } private static JsonObject getSteakHouseSpecificData(MealOrder mo, Client c, Gson gson) { JsonObject json = new JsonObject(); json.add("freeDelivery", new JsonPrimitive("false")); try { ConfigurationManager cm = new ConfigurationManager(); String steakIdStr = cm.getConfigurationValue("steakHouse.Id"); if (steakIdStr != null && steakIdStr.length() > 0) { Key steak = KeyFactory.stringToKey(steakIdStr); if (steak.equals(mo.getRestaurant())) { if (!TimeController.getDayOfWeek().equals(DayOfWeek.SATURDAY) && !TimeController.getDayOfWeek().equals(DayOfWeek.SUNDAY)) { if (TimeController.getCurrentTurn().equals(TurnType.LUNCH)) { String foodCatsStr = cm.getConfigurationValue("steakHouse.FoodCats"); if (foodCatsStr != null && foodCatsStr.length() > 0) { String[] foodCatsArray = foodCatsStr.split("\\|"); Set<Key> foodCats = new HashSet<Key>(); for (int i = 0; i < foodCatsArray.length; i++) { if (foodCatsArray[i].length() > 0) { foodCats.add(KeyFactory.stringToKey(foodCatsArray[i])); } } List<OrderedPlate> plates = mo.getPlates(); PlateManager pm = new PlateManager(); for (Iterator iterator = plates.iterator(); iterator.hasNext();) { OrderedPlate orderedPlate = (OrderedPlate) iterator.next(); Plate p = null; if (Boolean.TRUE.equals(orderedPlate.getIsFraction())) { p = pm.getPlate(orderedPlate.getFractionPlates().iterator().next()); } else { p = pm.getPlate(orderedPlate.getPlate()); } if (!foodCats.contains(p.getFoodCategory())) { json.add("freeDelivery", new JsonPrimitive("false")); return json; } } json.add("freeDelivery", new JsonPrimitive("true")); json.add("msg", new JsonPrimitive(cm.getConfigurationValue("steakHouse.msg"))); } } } } } } catch (Exception e) { log.log(Level.SEVERE, "Could not set up things for SteakHouse", e); } return json; } public MealOrder getMealOrder(Client c, JsonObject sessionOderData) { MealOrder mo = new MealOrder(); mo.setClient(c); if (c.getContact() != null) { mo.setClientPhone(c.getContact().getPhone()); } mo.setAddress(getAddress(sessionOderData, c)); mo.setObservation(getObservation(sessionOderData)); mo.setRestaurant(getRestKey(sessionOderData)); mo.setPlates(getPlates(sessionOderData)); return mo; } private Key getAddress(JsonObject sessionOderData, Client c) { try { if (sessionOderData.get("address") == null) { if (c.getMainAddress() != null) { return c.getMainAddress(); } else { return null; } } else { if (sessionOderData.get("address") != null && !sessionOderData.get("address").isJsonNull() ) { return KeyFactory.stringToKey(sessionOderData.get("address").getAsString()); }else{ return null; } } } catch (Exception e) { log.log(Level.SEVERE, "no address da sessão havia {0}", sessionOderData.get("address")); log.log(Level.SEVERE, "Error ao buscar endereço de cliente ou em sessão", e); return null; } } public List<OrderedPlate> getPlates(JsonObject sessionOderData) { List<OrderedPlate> orderedPlates = new ArrayList<OrderedPlate>(); JsonArray array = sessionOderData.get("plates").getAsJsonArray(); for (int i = 0; i < array.size(); i++) { JsonObject pjson = array.get(i).getAsJsonObject(); orderedPlates.add(getOrdered(pjson)); } return orderedPlates; } private OrderedPlate getOrdered(JsonObject pjson) { OrderedPlate oplate = new OrderedPlate(); oplate.setName(pjson.get("name").getAsString()); oplate.setPrice(pjson.get("price").getAsDouble()); oplate.setPriceInCents(Double.valueOf(pjson.get("price").getAsDouble() * 100.0).intValue()); oplate.setQty(pjson.get("qty").getAsInt()); if (pjson.get("isFraction").getAsBoolean() == true) { oplate.setIsFraction(Boolean.TRUE); Set<Key> fractionPlates = new HashSet<Key>(); JsonArray fractionKeys = pjson.get("fractionKeys").getAsJsonArray(); for (int i = 0; i < fractionKeys.size(); i++) { Key fractionKey = KeyFactory.stringToKey(fractionKeys.get(i).getAsString()); fractionPlates.add(fractionKey); } oplate.setFractionPlates(fractionPlates); return oplate; } else { String pkey = ""; if (pjson.get("plate").isJsonObject()) { pkey = pjson.get("plate").getAsJsonObject().get("id").getAsString(); } else { pkey = pjson.get("plate").getAsString(); } oplate.setPlate(KeyFactory.stringToKey(pkey)); return oplate; } } public Key getRestKey(JsonObject sessionOderData) { String restKey; if (sessionOderData.get("restaurant") != null) { if (sessionOderData.get("restaurant").isJsonObject()) { restKey = sessionOderData.get("restaurant").getAsJsonObject().get("id").getAsString(); } else { restKey = sessionOderData.get("restaurant").getAsString(); } } else { restKey = sessionOderData.get("plates").getAsJsonArray().get(0).getAsJsonObject().get("plate").getAsJsonObject().get("value").getAsJsonObject().get("restaurant").getAsString(); } return KeyFactory.stringToKey(restKey); } public String getObservation(JsonObject sessionOderData) { return sessionOderData.get("observation").getAsString(); } public String getFormView() { return formView; } public void setFormView(String formView) { this.formView = formView; } public String getSuccessView() { return successView; } public void setSuccessView(String successView) { this.successView = successView; } }
rafaelcoutinho/comendobemdelivery
src/br/copacabana/PlaceOrderController.java
Java
apache-2.0
16,224
package hska.iwi.eShopMaster.model.businessLogic.manager.impl; import com.fasterxml.jackson.databind.ObjectMapper; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import hska.iwi.eShopMaster.model.businessLogic.manager.CategoryManager; import hska.iwi.eShopMaster.model.businessLogic.manager.entity.Category; import hska.iwi.eShopMaster.model.businessLogic.manager.entity.User; import java.util.List; import javax.ws.rs.core.MediaType; import org.apache.log4j.Logger; public class CategoryManagerImpl implements CategoryManager { private final static String BASIS_URL_CATEGORY = "http://localhost:8081/api/catalog/category/"; private final Logger logger = Logger.getLogger(CategoryManagerImpl.class); private final ObjectMapper parser = new ObjectMapper(); private final User currentUser; public CategoryManagerImpl(User currentUser) { this.currentUser = currentUser; } @Override public List<Category> getCategories() { List<Category> categories = null; try { Client client = Client.create(); WebResource webResource = client .resource(BASIS_URL_CATEGORY); ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE) .get(ClientResponse.class); categories = parser.readValue(response.getEntity(String.class), List.class); } catch (Exception ex) { logger.error(ex); } return categories; } @Override public Category getCategory(int id) { Category category = null; try { Client client = Client.create(); WebResource webResource = client .resource(BASIS_URL_CATEGORY) .path(String.valueOf(id)); ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE) .get(ClientResponse.class); category = parser.readValue(response.getEntity(String.class), Category.class); } catch (Exception ex) { logger.error(ex); } return category; } @Override public void addCategory(String name) { Category category = new Category(name); try { Client client = Client.create(); WebResource webResource = client .resource(BASIS_URL_CATEGORY); webResource.type(MediaType.APPLICATION_JSON_TYPE) .accept(MediaType.APPLICATION_JSON_TYPE) .header("usr", currentUser.getUsername()) .header("pass", currentUser.getPassword()) .post(ClientResponse.class, parser.writeValueAsString(category)); } catch (Exception ex) { logger.error(ex); } } @Override public void delCategoryById(int id) { try { Client client = Client.create(); WebResource webResource = client .resource(BASIS_URL_CATEGORY) .path(String.valueOf(id)); webResource.accept(MediaType.APPLICATION_JSON_TYPE) .header("usr", currentUser.getUsername()) .header("pass", currentUser.getPassword()) .delete(); } catch (Exception ex) { logger.error(ex); } } }
Am3o/eShop
WebShopStart/src/main/java/hska/iwi/eShopMaster/model/businessLogic/manager/impl/CategoryManagerImpl.java
Java
apache-2.0
3,084
/** * Created by txs on 2016/10/17. */ public class Student { String name; int grade; @Override public String toString() { String temp = ""; temp += "name: " + name + "\n"; temp += "grade: " + grade + "\n"; return temp; } @Override public boolean equals(Object obj) { if(this==obj) return true; boolean r = false; if(obj instanceof Student){ Student temp = (Student)obj; if(this.name.equals(temp.name) && this.grade == temp.grade) r = true; } return r; } }
txs72/BUPTJava
slides/06/overrding/Student.java
Java
apache-2.0
625
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.vcs; import com.intellij.execution.ui.ConsoleView; import com.intellij.execution.ui.ConsoleViewContentType; import com.intellij.util.containers.ContainerUtil; import consulo.util.lang.Pair; import consulo.util.lang.StringUtil; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.Collections; import java.util.List; public final class VcsConsoleLine { private final List<Pair<String, ConsoleViewContentType>> myChunks; private VcsConsoleLine(@Nonnull List<Pair<String, ConsoleViewContentType>> chunks) { myChunks = chunks; } public void print(@Nonnull ConsoleView console) { ConsoleViewContentType lastType = ConsoleViewContentType.NORMAL_OUTPUT; for (Pair<String, ConsoleViewContentType> chunk : myChunks) { console.print(chunk.first, chunk.second); lastType = chunk.second; } console.print("\n", lastType); } @Nullable public static VcsConsoleLine create(@Nullable String message, @Nonnull ConsoleViewContentType contentType) { return create(Collections.singletonList(Pair.create(message, contentType))); } @Nullable public static VcsConsoleLine create(@Nonnull List<Pair<String, ConsoleViewContentType>> lineChunks) { List<Pair<String, ConsoleViewContentType>> chunks = ContainerUtil.filter(lineChunks, it -> !StringUtil.isEmptyOrSpaces(it.first)); if (chunks.isEmpty()) return null; return new VcsConsoleLine(chunks); } }
consulo/consulo
modules/base/vcs-api/src/main/java/com/intellij/openapi/vcs/VcsConsoleLine.java
Java
apache-2.0
1,604
/** * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.brixcms.web.nodepage; import org.apache.wicket.IRequestTarget; import org.apache.wicket.Page; import org.apache.wicket.PageParameters; import org.apache.wicket.RequestCycle; import org.apache.wicket.model.IModel; import org.apache.wicket.request.target.component.IPageRequestTarget; import org.apache.wicket.util.lang.Objects; import org.apache.wicket.util.string.StringValue; import org.brixcms.exception.BrixException; import org.brixcms.jcr.wrapper.BrixNode; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; public class BrixPageParameters implements Serializable { // ------------------------------ FIELDS ------------------------------ private static final long serialVersionUID = 1L; private List<String> indexedParameters = null; ; private List<QueryStringParameter> queryStringParameters = null; // -------------------------- STATIC METHODS -------------------------- public static boolean equals(BrixPageParameters p1, BrixPageParameters p2) { if (Objects.equal(p1, p2)) { return true; } if (p1 == null && p2.getIndexedParamsCount() == 0 && p2.getQueryParamKeys().isEmpty()) { return true; } if (p2 == null && p1.getIndexedParamsCount() == 0 && p1.getQueryParamKeys().isEmpty()) { return true; } return false; } public int getIndexedParamsCount() { return indexedParameters != null ? indexedParameters.size() : 0; } public static BrixPageParameters getCurrent() { IRequestTarget target = RequestCycle.get().getRequestTarget(); // this is required for getting current page parameters from page constructor // (the actual page instance is not constructed yet. if (target instanceof PageParametersRequestTarget) { return ((PageParametersRequestTarget) target).getPageParameters(); } else { return getCurrentPage().getBrixPageParameters(); } } // --------------------------- CONSTRUCTORS --------------------------- public BrixPageParameters() { } public BrixPageParameters(PageParameters params) { if (params != null) { for (String name : params.keySet()) { addQueryParam(name, params.get(name)); } } } public void addQueryParam(String name, Object value) { addQueryParam(name, value, -1); } public BrixPageParameters(BrixPageParameters copy) { if (copy == null) { throw new IllegalArgumentException("Copy argument may not be null."); } if (copy.indexedParameters != null) this.indexedParameters = new ArrayList<String>(copy.indexedParameters); if (copy.queryStringParameters != null) this.queryStringParameters = new ArrayList<QueryStringParameter>( copy.queryStringParameters); } // ------------------------ CANONICAL METHODS ------------------------ @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof BrixPageParameters == false) { return false; } BrixPageParameters rhs = (BrixPageParameters) obj; if (!Objects.equal(indexedParameters, rhs.indexedParameters)) { return false; } if (queryStringParameters == null || rhs.queryStringParameters == null) { return rhs.queryStringParameters == queryStringParameters; } if (queryStringParameters.size() != rhs.queryStringParameters.size()) { return false; } for (String key : getQueryParamKeys()) { List<StringValue> values1 = getQueryParams(key); Set<String> v1 = new TreeSet<String>(); List<StringValue> values2 = rhs.getQueryParams(key); Set<String> v2 = new TreeSet<String>(); for (StringValue sv : values1) { v1.add(sv.toString()); } for (StringValue sv : values2) { v2.add(sv.toString()); } if (v1.equals(v2) == false) { return false; } } return true; } public Set<String> getQueryParamKeys() { if (queryStringParameters == null || queryStringParameters.isEmpty()) { return Collections.emptySet(); } Set<String> set = new TreeSet<String>(); for (QueryStringParameter entry : queryStringParameters) { set.add(entry.key); } return Collections.unmodifiableSet(set); } public List<StringValue> getQueryParams(String name) { if (name == null) { throw new IllegalArgumentException("Parameter name may not be null."); } if (queryStringParameters != null) { List<StringValue> result = new ArrayList<StringValue>(); for (QueryStringParameter entry : queryStringParameters) { if (entry.key.equals(name)) { result.add(StringValue.valueOf(entry.value)); } } return Collections.unmodifiableList(result); } else { return Collections.emptyList(); } } // -------------------------- OTHER METHODS -------------------------- public void addQueryParam(String name, Object value, int index) { if (name == null) { throw new IllegalArgumentException("Parameter name may not be null."); } if (value == null) { throw new IllegalArgumentException("Parameter value may not be null."); } if (queryStringParameters == null) queryStringParameters = new ArrayList<QueryStringParameter>(1); QueryStringParameter entry = new QueryStringParameter(name, value.toString()); if (index == -1) queryStringParameters.add(entry); else queryStringParameters.add(index, entry); } void assign(BrixPageParameters other) { if (this != other) { this.indexedParameters = other.indexedParameters; this.queryStringParameters = other.queryStringParameters; } } public void clearIndexedParams() { this.indexedParameters = null; } public void clearQueryParams() { this.queryStringParameters = null; } public StringValue getIndexedParam(int index) { if (indexedParameters != null) { if (index >= 0 && index < indexedParameters.size()) { String value = indexedParameters.get(index); return StringValue.valueOf(value); } } return StringValue.valueOf((String) null); } public StringValue getQueryParam(String name) { if (name == null) { throw new IllegalArgumentException("Parameter name may not be null."); } if (queryStringParameters != null) { for (QueryStringParameter entry : queryStringParameters) { if (entry.key.equals(name)) { return StringValue.valueOf(entry.value); } } } return StringValue.valueOf((String) null); } public List<QueryStringParameter> getQueryStringParams() { if (queryStringParameters == null) { return Collections.emptyList(); } else { return Collections.unmodifiableList(new ArrayList<QueryStringParameter>( queryStringParameters)); } } ; public void removeIndexedParam(int index) { if (indexedParameters != null) { if (index >= 0 && index < indexedParameters.size()) { indexedParameters.remove(index); } } } public void setIndexedParam(int index, Object object) { if (indexedParameters == null) indexedParameters = new ArrayList<String>(index); for (int i = indexedParameters.size(); i <= index; ++i) { indexedParameters.add(null); } String value = object != null ? object.toString() : null; indexedParameters.set(index, value); } public void setQueryParam(String name, Object value) { setQueryParam(name, value, -1); } public void setQueryParam(String name, Object value, int index) { removeQueryParam(name); if (value != null) { addQueryParam(name, value); } } public void removeQueryParam(String name) { if (name == null) { throw new IllegalArgumentException("Parameter name may not be null."); } if (queryStringParameters != null) { for (Iterator<QueryStringParameter> i = queryStringParameters.iterator(); i.hasNext();) { QueryStringParameter e = i.next(); if (e.key.equals(name)) { i.remove(); } } } } public String toCallbackURL() { return urlFor(getCurrentPage()); } /** * Constructs a url to the specified page appending these page parameters * * @param page * @return url */ public String urlFor(BrixNodeWebPage page) { IRequestTarget target = new BrixNodeRequestTarget(page, this); return RequestCycle.get().urlFor(target).toString(); } static BrixNodeWebPage getCurrentPage() { IRequestTarget target = RequestCycle.get().getRequestTarget(); BrixNodeWebPage page = null; if (target != null && target instanceof IPageRequestTarget) { Page p = ((IPageRequestTarget) target).getPage(); if (p instanceof BrixNodeWebPage) { page = (BrixNodeWebPage) p; } } if (page == null) { throw new BrixException( "Couldn't obtain the BrixNodeWebPage instance from RequestTarget."); } return page; } /** * Constructs a url to the specified page appending these page parameters * * @param * @return url */ public String urlFor(IModel<BrixNode> node) { IRequestTarget target = new BrixNodeRequestTarget(node, this); return RequestCycle.get().urlFor(target).toString(); } // -------------------------- INNER CLASSES -------------------------- public static class QueryStringParameter implements Serializable { private static final long serialVersionUID = 1L; private final String key; private final String value; public QueryStringParameter(String key, String value) { this.key = key; this.value = value; } public String getKey() { return key; } public String getValue() { return value; } } }
kbachl/brix-cms-backup
brix-core/src/main/java/org/brixcms/web/nodepage/BrixPageParameters.java
Java
apache-2.0
11,587
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.test.recovery; import org.apache.flink.api.common.JobID; import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.java.ExecutionEnvironment; import org.apache.flink.api.java.io.DiscardingOutputFormat; import org.apache.flink.client.program.ProgramInvocationException; import org.apache.flink.configuration.AkkaOptions; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.JobManagerOptions; import org.apache.flink.runtime.akka.AkkaUtils; import org.apache.flink.runtime.client.JobStatusMessage; import org.apache.flink.runtime.highavailability.HighAvailabilityServices; import org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils; import org.apache.flink.runtime.jobmanager.JobManager; import org.apache.flink.runtime.jobmanager.MemoryArchivist; import org.apache.flink.runtime.messages.JobManagerMessages; import org.apache.flink.runtime.metrics.NoOpMetricRegistry; import org.apache.flink.runtime.testingUtils.TestingUtils; import org.apache.flink.runtime.testutils.CommonTestUtils; import org.apache.flink.util.NetUtils; import org.apache.flink.util.TestLogger; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.pattern.Patterns; import akka.util.Timeout; import org.junit.Test; import java.io.File; import java.io.StringWriter; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import scala.Option; import scala.Some; import scala.Tuple2; import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.FiniteDuration; import static org.apache.flink.runtime.testutils.CommonTestUtils.getCurrentClasspath; import static org.apache.flink.runtime.testutils.CommonTestUtils.getJavaCommandPath; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** * This test makes sure that jobs are canceled properly in cases where * the task manager went down and did not respond to cancel messages. */ @SuppressWarnings("serial") public class ProcessFailureCancelingITCase extends TestLogger { @Test public void testCancelingOnProcessFailure() throws Exception { final StringWriter processOutput = new StringWriter(); ActorSystem jmActorSystem = null; Process taskManagerProcess = null; HighAvailabilityServices highAvailabilityServices = null; try { // check that we run this test only if the java command // is available on this machine String javaCommand = getJavaCommandPath(); if (javaCommand == null) { System.out.println("---- Skipping Process Failure test : Could not find java executable ----"); return; } // create a logging file for the process File tempLogFile = File.createTempFile(getClass().getSimpleName() + "-", "-log4j.properties"); tempLogFile.deleteOnExit(); CommonTestUtils.printLog4jDebugConfig(tempLogFile); // find a free port to start the JobManager final int jobManagerPort = NetUtils.getAvailablePort(); // start a JobManager Tuple2<String, Object> localAddress = new Tuple2<String, Object>("localhost", jobManagerPort); Configuration jmConfig = new Configuration(); jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_INTERVAL, "5 s"); jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_PAUSE, "2000 s"); jmConfig.setInteger(AkkaOptions.WATCH_THRESHOLD, 10); jmConfig.setString(AkkaOptions.ASK_TIMEOUT, "100 s"); jmConfig.setString(JobManagerOptions.ADDRESS, localAddress._1()); jmConfig.setInteger(JobManagerOptions.PORT, jobManagerPort); highAvailabilityServices = HighAvailabilityServicesUtils.createHighAvailabilityServices( jmConfig, TestingUtils.defaultExecutor(), HighAvailabilityServicesUtils.AddressResolution.NO_ADDRESS_RESOLUTION); jmActorSystem = AkkaUtils.createActorSystem(jmConfig, new Some<>(localAddress)); ActorRef jmActor = JobManager.startJobManagerActors( jmConfig, jmActorSystem, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), highAvailabilityServices, new NoOpMetricRegistry(), Option.empty(), JobManager.class, MemoryArchivist.class)._1(); // the TaskManager java command String[] command = new String[] { javaCommand, "-Dlog.level=DEBUG", "-Dlog4j.configuration=file:" + tempLogFile.getAbsolutePath(), "-Xms80m", "-Xmx80m", "-classpath", getCurrentClasspath(), AbstractTaskManagerProcessFailureRecoveryTest.TaskManagerProcessEntryPoint.class.getName(), String.valueOf(jobManagerPort) }; // start the first two TaskManager processes taskManagerProcess = new ProcessBuilder(command).start(); new CommonTestUtils.PipeForwarder(taskManagerProcess.getErrorStream(), processOutput); // we wait for the JobManager to have the two TaskManagers available // since some of the CI environments are very hostile, we need to give this a lot of time (2 minutes) waitUntilNumTaskManagersAreRegistered(jmActor, 1, 120000); final Throwable[] errorRef = new Throwable[1]; // start the test program, which infinitely blocks Runnable programRunner = new Runnable() { @Override public void run() { try { ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort); env.setParallelism(2); env.setRestartStrategy(RestartStrategies.noRestart()); env.getConfig().disableSysoutLogging(); env.generateSequence(0, Long.MAX_VALUE) .map(new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { synchronized (this) { wait(); } return 0L; } }) .output(new DiscardingOutputFormat<Long>()); env.execute(); } catch (Throwable t) { errorRef[0] = t; } } }; Thread programThread = new Thread(programRunner); // kill the TaskManager taskManagerProcess.destroy(); taskManagerProcess = null; // immediately submit the job. this should hit the case // where the JobManager still thinks it has the TaskManager and tries to send it tasks programThread.start(); // try to cancel the job cancelRunningJob(jmActor); // we should see a failure within reasonable time (10s is the ask timeout). // since the CI environment is often slow, we conservatively give it up to 2 minutes, // to fail, which is much lower than the failure time given by the heartbeats ( > 2000s) programThread.join(120000); assertFalse("The program did not cancel in time (2 minutes)", programThread.isAlive()); Throwable error = errorRef[0]; assertNotNull("The program did not fail properly", error); assertTrue(error instanceof ProgramInvocationException); // all seems well :-) } catch (Exception e) { printProcessLog("TaskManager", processOutput.toString()); throw e; } catch (Error e) { printProcessLog("TaskManager 1", processOutput.toString()); throw e; } finally { if (taskManagerProcess != null) { taskManagerProcess.destroy(); } if (jmActorSystem != null) { jmActorSystem.shutdown(); } if (highAvailabilityServices != null) { highAvailabilityServices.closeAndCleanupAllData(); } } } private void cancelRunningJob(ActorRef jobManager) throws Exception { final FiniteDuration askTimeout = new FiniteDuration(10, TimeUnit.SECONDS); // try at most for 30 seconds final long deadline = System.currentTimeMillis() + 30000; JobID jobId = null; do { Future<Object> response = Patterns.ask(jobManager, JobManagerMessages.getRequestRunningJobsStatus(), new Timeout(askTimeout)); Object result; try { result = Await.result(response, askTimeout); } catch (Exception e) { throw new Exception("Could not retrieve running jobs from the JobManager.", e); } if (result instanceof JobManagerMessages.RunningJobsStatus) { List<JobStatusMessage> jobs = ((JobManagerMessages.RunningJobsStatus) result).getStatusMessages(); if (jobs.size() == 1) { jobId = jobs.get(0).getJobId(); break; } } } while (System.currentTimeMillis() < deadline); if (jobId == null) { // we never found it running, must have failed already return; } // tell the JobManager to cancel the job jobManager.tell( new JobManagerMessages.LeaderSessionMessage( HighAvailabilityServices.DEFAULT_LEADER_ID, new JobManagerMessages.CancelJob(jobId)), ActorRef.noSender()); } private void waitUntilNumTaskManagersAreRegistered(ActorRef jobManager, int numExpected, long maxDelay) throws Exception { final long deadline = System.currentTimeMillis() + maxDelay; while (true) { long remaining = deadline - System.currentTimeMillis(); if (remaining <= 0) { fail("The TaskManagers did not register within the expected time (" + maxDelay + "msecs)"); } FiniteDuration timeout = new FiniteDuration(remaining, TimeUnit.MILLISECONDS); try { Future<?> result = Patterns.ask(jobManager, JobManagerMessages.getRequestNumberRegisteredTaskManager(), new Timeout(timeout)); Integer numTMs = (Integer) Await.result(result, timeout); if (numTMs == numExpected) { break; } } catch (TimeoutException e) { // ignore and retry } catch (ClassCastException e) { fail("Wrong response: " + e.getMessage()); } } } private void printProcessLog(String processName, String log) { if (log == null || log.length() == 0) { return; } System.out.println("-----------------------------------------"); System.out.println(" BEGIN SPAWNED PROCESS LOG FOR " + processName); System.out.println("-----------------------------------------"); System.out.println(log); System.out.println("-----------------------------------------"); System.out.println(" END SPAWNED PROCESS LOG"); System.out.println("-----------------------------------------"); } }
zimmermatt/flink
flink-tests/src/test/java/org/apache/flink/test/recovery/ProcessFailureCancelingITCase.java
Java
apache-2.0
10,977
// // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2011.09.09 at 01:22:27 PM CEST // package test; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.bind.annotation.XmlValue; /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;attribute name="content-type" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" /> * &lt;attribute name="seq" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" /> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "content" }) @XmlRootElement(name = "fpage") public class Fpage { @XmlValue protected String content; @XmlAttribute(name = "content-type") @XmlSchemaType(name = "anySimpleType") protected String contentType; @XmlAttribute @XmlSchemaType(name = "anySimpleType") protected String seq; /** * Gets the value of the content property. * * @return * possible object is * {@link String } * */ public String getContent() { return content; } /** * Sets the value of the content property. * * @param value * allowed object is * {@link String } * */ public void setContent(String value) { this.content = value; } /** * Gets the value of the contentType property. * * @return * possible object is * {@link String } * */ public String getContentType() { return contentType; } /** * Sets the value of the contentType property. * * @param value * allowed object is * {@link String } * */ public void setContentType(String value) { this.contentType = value; } /** * Gets the value of the seq property. * * @return * possible object is * {@link String } * */ public String getSeq() { return seq; } /** * Sets the value of the seq property. * * @param value * allowed object is * {@link String } * */ public void setSeq(String value) { this.seq = value; } }
BlueBrain/bluima
modules/bluima_xml/src/test/Fpage.java
Java
apache-2.0
3,031
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oozie.action.hadoop; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.oozie.action.ActionExecutorException; import org.apache.oozie.util.XLog; import org.jdom.Element; import org.jdom.Namespace; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.List; public abstract class ScriptLanguageActionExecutor extends JavaActionExecutor { public ScriptLanguageActionExecutor(String type) { super(type); } @Override public List<Class<?>> getLauncherClasses() { return null; } protected boolean shouldAddScriptToCache(){ return true; } @Override protected Configuration setupLauncherConf(Configuration conf, Element actionXml, Path appPath, Context context) throws ActionExecutorException { super.setupLauncherConf(conf, actionXml, appPath, context); if(shouldAddScriptToCache()) { addScriptToCache(conf, actionXml, appPath, context); } return conf; } protected void addScriptToCache(Configuration conf, Element actionXml, Path appPath, Context context) throws ActionExecutorException { Namespace ns = actionXml.getNamespace(); String script = actionXml.getChild("script", ns).getTextTrim(); String name = new Path(script).getName(); String scriptContent = context.getProtoActionConf().get(this.getScriptName()); Path scriptFile = null; if (scriptContent != null) { // Create script on filesystem if this is // an http submission job; FSDataOutputStream dos = null; try { Path actionPath = context.getActionDir(); scriptFile = new Path(actionPath, script); FileSystem fs = context.getAppFileSystem(); dos = fs.create(scriptFile); dos.write(scriptContent.getBytes(StandardCharsets.UTF_8)); addToCache(conf, actionPath, script + "#" + name, false); } catch (Exception ex) { throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FAILED_OPERATION", XLog .format("Not able to write script file {0} on hdfs", scriptFile), ex); } finally { try { if (dos != null) { dos.close(); } } catch (IOException ex) { XLog.getLog(getClass()).error("Error: " + ex.getMessage()); } } } else { addToCache(conf, appPath, script + "#" + name, false); } } protected abstract String getScriptName(); }
cbaenziger/oozie
core/src/main/java/org/apache/oozie/action/hadoop/ScriptLanguageActionExecutor.java
Java
apache-2.0
3,702
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang.ant.config.execution; import com.intellij.execution.filters.Filter; import com.intellij.execution.filters.OpenFileHyperlinkInfo; import com.intellij.execution.filters.TextConsoleBuilder; import com.intellij.execution.filters.TextConsoleBuilderFactory; import com.intellij.execution.process.ProcessHandler; import com.intellij.execution.process.ProcessOutputTypes; import com.intellij.execution.ui.ConsoleView; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.Key; import com.intellij.openapi.vfs.LocalFileSystem; import com.intellij.openapi.vfs.VirtualFile; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.io.File; import java.io.OutputStream; public final class PlainTextView implements AntOutputView { private final ConsoleView myConsole; private final Project myProject; private String myCommandLine; private final LightProcessHandler myProcessHandler = new LightProcessHandler(); public PlainTextView(Project project) { myProject = project; TextConsoleBuilder builder = TextConsoleBuilderFactory.getInstance().createBuilder(project); builder.addFilter(new AntMessageFilter()); builder.addFilter(new JUnitFilter()); myConsole = builder.getConsole(); myConsole.attachToProcess(myProcessHandler); } public void dispose() { Disposer.dispose(myConsole); } @Override public String getId() { return "_text_view_"; } @Override public JComponent getComponent() { return myConsole.getComponent(); } @Override @Nullable public Object addMessage(AntMessage message) { print(message.getText() + "\n", ProcessOutputTypes.STDOUT); return null; } private void print(String text, Key type) { myProcessHandler.notifyTextAvailable(text, type); } public void addMessages(AntMessage[] messages) { for (AntMessage message : messages) { addMessage(message); } } @Override public void addJavacMessage(AntMessage message, String url) { if (message.getLine() > 0) { String msg = TreeView.printMessage(message, url); print(msg, ProcessOutputTypes.STDOUT); } print(message.getText(), ProcessOutputTypes.STDOUT); } @Override public void addException(AntMessage exception, boolean showFullTrace) { String text = exception.getText(); if (!showFullTrace) { int index = text.indexOf("\r\n"); if (index != -1) { text = text.substring(0, index) + "\n"; } } print(text, ProcessOutputTypes.STDOUT); } public void clearAllMessages() { myConsole.clear(); } @Override public void startBuild(AntMessage message) { print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM); addMessage(message); } @Override public void buildFailed(AntMessage message) { print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM); addMessage(message); } @Override public void startTarget(AntMessage message) { addMessage(message); } @Override public void startTask(AntMessage message) { addMessage(message); } @Override public void finishBuild(String messageText) { print("\n" + messageText + "\n", ProcessOutputTypes.SYSTEM); } @Override public void finishTarget() { } @Override public void finishTask() { } @Override @Nullable public Object getData(@NotNull String dataId) { return null; } public void setBuildCommandLine(String commandLine) { myCommandLine = commandLine; } private final class JUnitFilter implements Filter { @Override @Nullable public Result applyFilter(String line, int entireLength) { HyperlinkUtil.PlaceInfo placeInfo = HyperlinkUtil.parseJUnitMessage(myProject, line); if (placeInfo == null) { return null; } int textStartOffset = entireLength - line.length(); int highlightStartOffset = textStartOffset + placeInfo.getLinkStartIndex(); int highlightEndOffset = textStartOffset + placeInfo.getLinkEndIndex() + 1; OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, placeInfo.getFile(), placeInfo.getLine(), placeInfo.getColumn()); return new Result(highlightStartOffset, highlightEndOffset, info); } } private final class AntMessageFilter implements Filter { @Override public Result applyFilter(String line, int entireLength) { int afterLineNumberIndex = line.indexOf(": "); // end of file_name_and_line_number sequence if (afterLineNumberIndex == -1) { return null; } String fileAndLineNumber = line.substring(0, afterLineNumberIndex); int index = fileAndLineNumber.lastIndexOf(':'); if (index == -1) { return null; } final String fileName = fileAndLineNumber.substring(0, index); String lineNumberStr = fileAndLineNumber.substring(index + 1).trim(); int lineNumber; try { lineNumber = Integer.parseInt(lineNumberStr); } catch (NumberFormatException e) { return null; } final VirtualFile file = LocalFileSystem.getInstance().findFileByPath(fileName.replace(File.separatorChar, '/')); if (file == null) { return null; } int textStartOffset = entireLength - line.length(); int highlightEndOffset = textStartOffset + afterLineNumberIndex; OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, file, lineNumber - 1); return new Result(textStartOffset, highlightEndOffset, info); } } private static class LightProcessHandler extends ProcessHandler { @Override protected void destroyProcessImpl() { throw new UnsupportedOperationException(); } @Override protected void detachProcessImpl() { throw new UnsupportedOperationException(); } @Override public boolean detachIsDefault() { return false; } @Override @Nullable public OutputStream getProcessInput() { return null; } } }
mdanielwork/intellij-community
plugins/ant/src/com/intellij/lang/ant/config/execution/PlainTextView.java
Java
apache-2.0
6,686
/* * Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.orientechnologies.orient.core.command.script; import java.util.Map; import java.util.Map.Entry; import javax.script.Bindings; import javax.script.Invocable; import javax.script.ScriptContext; import javax.script.ScriptEngine; import javax.script.ScriptException; import com.orientechnologies.orient.core.Orient; import com.orientechnologies.orient.core.command.OCommandExecutorAbstract; import com.orientechnologies.orient.core.command.OCommandRequest; import com.orientechnologies.orient.core.db.record.ODatabaseRecordTx; import com.orientechnologies.orient.core.metadata.function.OFunction; /** * Executes Script Commands. * * @see OCommandScript * @author Luca Garulli * */ public class OCommandExecutorFunction extends OCommandExecutorAbstract { protected OCommandFunction request; public OCommandExecutorFunction() { } @SuppressWarnings("unchecked") public OCommandExecutorFunction parse(final OCommandRequest iRequest) { request = (OCommandFunction) iRequest; return this; } public Object execute(final Map<Object, Object> iArgs) { return executeInContext(null, iArgs); } public Object executeInContext(final Map<String, Object> iContext, final Map<Object, Object> iArgs) { parserText = request.getText(); final ODatabaseRecordTx db = (ODatabaseRecordTx) getDatabase(); final OFunction f = db.getMetadata().getFunctionLibrary().getFunction(parserText); final OScriptManager scriptManager = Orient.instance().getScriptManager(); final ScriptEngine scriptEngine = scriptManager.getEngine(f.getLanguage()); final Bindings binding = scriptManager.bind(scriptEngine, db, iContext, iArgs); try { scriptEngine.setBindings(binding, ScriptContext.ENGINE_SCOPE); // COMPILE FUNCTION LIBRARY scriptEngine.eval(scriptManager.getLibrary(db, f.getLanguage())); if (scriptEngine instanceof Invocable) { // INVOKE AS FUNCTION. PARAMS ARE PASSED BY POSITION final Invocable invocableEngine = (Invocable) scriptEngine; Object[] args = null; if (iArgs != null) { args = new Object[iArgs.size()]; int i = 0; for (Entry<Object, Object> arg : iArgs.entrySet()) args[i++] = arg.getValue(); } return invocableEngine.invokeFunction(parserText, args); } else { // INVOKE THE CODE SNIPPET return scriptEngine.eval(invokeFunction(f, iArgs.values().toArray()), binding); } } catch (ScriptException e) { throw new OCommandScriptException("Error on execution of the script", request.getText(), e.getColumnNumber(), e); } catch (NoSuchMethodException e) { throw new OCommandScriptException("Error on execution of the script", request.getText(), 0, e); } finally { scriptManager.unbind(binding); } } public boolean isIdempotent() { return false; } @Override protected void throwSyntaxErrorException(String iText) { throw new OCommandScriptException("Error on execution of the script: " + iText, request.getText(), 0); } protected String invokeFunction(final OFunction f, Object[] iArgs) { final StringBuilder code = new StringBuilder(); code.append(f.getName()); code.append('('); int i = 0; for (Object a : iArgs) { if (i++ > 0) code.append(','); code.append(a); } code.append(");"); return code.toString(); } }
redox/OrientDB
core/src/main/java/com/orientechnologies/orient/core/command/script/OCommandExecutorFunction.java
Java
apache-2.0
4,208
import java.util.Scanner; /** * @author Oleg Cherednik * @since 13.07.2018 */ public class Solution { static int palindromeIndex(String s) { for (int i = 0, j = s.length() - 1; i < j; i++, j--) { if (s.charAt(i) == s.charAt(j)) continue; for (int k = i, m = j - 1; k < m; k++, m--) if (s.charAt(k) != s.charAt(m)) return i; return j; } return -1; } private static final Scanner scanner = new Scanner(System.in); public static void main(String[] args) { int q = scanner.nextInt(); scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?"); for (int qItr = 0; qItr < q; qItr++) { String s = scanner.nextLine(); int result = palindromeIndex(s); System.out.println(String.valueOf(result)); } scanner.close(); } }
oleg-cherednik/hackerrank
Algorithms/Strings/Palindrome Index/Solution.java
Java
apache-2.0
921
package de.newsarea.homecockpit.connector.facade.registration.util; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.net.URL; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; public final class ClassLoaderHelper { private static Logger log = LoggerFactory.getLogger(ClassLoaderHelper.class); private ClassLoaderHelper() { } public static Constructor<?> determineFirstConstructor(Class<?> clazz) { try { for(Constructor<?> constructor : clazz.getConstructors()) { return constructor; } } catch (SecurityException e) { log.error(e.getMessage(), e); } return null; } public static Constructor<?> determineConstructorByArgumentTypes(Class<?> clazz, Class<?>[] argumentTypes) { try { for(Constructor<?> constructor : clazz.getConstructors()) { if(isAssignableFrom(constructor, argumentTypes)) { return constructor; } } } catch (SecurityException e) { log.error(e.getMessage(), e); } return null; } private static boolean isAssignableFrom(Constructor<?> constructor, Class<?>[] argumentTypes) { Class<?>[] constructorArgTypes = constructor.getParameterTypes(); if(constructorArgTypes.length != argumentTypes.length) { return false; } // ~ for(int i=0; i < argumentTypes.length; i++) { if(!argumentTypes[i].isAssignableFrom(constructorArgTypes[i])) { return false; } } return true; } public static List<Class<?>> determineClasses(String packageName) throws ClassNotFoundException, IOException { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); assert classLoader != null; String path = packageName.replace('.', '/'); Enumeration<URL> resources = classLoader.getResources(path); List<File> dirs = new ArrayList<>(); while (resources.hasMoreElements()) { URL resource = resources.nextElement(); dirs.add(new File(resource.getFile().replaceAll("%20", " "))); } ArrayList<Class<?>> classes = new ArrayList<>(); for (File directory : dirs) { classes.addAll(findClasses(directory, packageName)); } return classes; } public static List<Class<?>> findClasses(File directory, String packageName) throws ClassNotFoundException { List<Class<?>> classes = new ArrayList<>(); if (!directory.exists()) { return classes; } File[] files = directory.listFiles(); for (File file : files) { if (file.isDirectory()) { assert !file.getName().contains("."); classes.addAll(findClasses(file, packageName + "." + file.getName())); } else if (file.getName().endsWith(".class")) { classes.add(Class.forName(packageName + '.' + file.getName().substring(0, file.getName().length() - 6))); } } return classes; } public static Method determineSetterMethod(Class<?> clazz, String name) { for(Method method : clazz.getMethods()) { if(method.getName().equalsIgnoreCase("set" + name)) { return method; } } return null; } }
RBernhardt/homecockpit-connectors
connectors-facade/src/main/java/de/newsarea/homecockpit/connector/facade/registration/util/ClassLoaderHelper.java
Java
apache-2.0
3,378
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @author Upendra Jariya * @sponsor Douglas Johnson * @version 1.0 * @since 2014-11-10 */ package tools.datasync.utils; import org.apache.commons.codec.digest.DigestUtils; import org.apache.log4j.Logger; import tools.datasync.api.utils.HashGenerator; public class Md5HashGenerator implements HashGenerator { private static Md5HashGenerator instance = null; private static Logger LOG = Logger.getLogger(Md5HashGenerator.class .getName()); private Md5HashGenerator() { } public static synchronized Md5HashGenerator getInstance() { if (instance == null) { instance = new Md5HashGenerator(); } return instance; } public String generate(String data) { try { byte[] digest = DigestUtils.md5(data); return (DigestUtils.md5Hex(digest)); } catch (Exception e) { LOG.warn("Error while generating checksum on value [" + data + "]", e); return null; } } public boolean validate(String data, String hash) { String newHash = generate(data); return newHash.equals(hash); } }
datasynctools/sync-tools-prototype
data-sync-tools-core/src/main/java/tools/datasync/utils/Md5HashGenerator.java
Java
apache-2.0
1,858
package com.github.database.rider.core.script; import org.assertj.core.api.SoftAssertions; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import javax.script.ScriptException; import static org.assertj.core.api.Java6Assertions.assertThat; public class ScriptEngineManagerWrapperTest { @Rule public ExpectedException exceptionRule = ExpectedException.none(); private ScriptEngineManagerWrapper scriptEngineManager = ScriptEngineManagerWrapper.getInstance(); private SoftAssertions softly = new SoftAssertions(); @Before public void init() { softly = new SoftAssertions(); } @Test public void shouldGetJsScriptResult() throws ScriptException { Object scriptResult = ScriptEngineManagerWrapper.getInstance().getScriptResult("js: 1+1"); assertThat(scriptResult).isEqualTo(2); } @Test public void shouldGetGroovyScriptResult() throws ScriptException { Object scriptResult = scriptEngineManager.getScriptResult("groovy: 1+1"); assertThat(scriptResult).isEqualTo(2); } @Test public void shouldNotGetScriptResultFromUnknownEngine() throws ScriptException { exceptionRule.expect(RuntimeException.class); exceptionRule.expectMessage("Could not find script engine by name 'kotlin'"); scriptEngineManager.getScriptResult("kotlin: 1+1"); } @Test public void shouldAssertValueGreaterThanZero() throws ScriptException { String script = "js:(value > 0)"; softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("js script with value=2").isTrue(); softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("js script with value=0").isFalse(); softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("js script with value=-1").isFalse(); script = "groovy:(value > 0)"; softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("groovy script with value=2").isTrue(); softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("groovy script with value=0").isFalse(); softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("groovy script with value=-1").isFalse(); softly.assertAll(); } @Test public void shouldAssertNullValue() throws ScriptException { SoftAssertions soft = new SoftAssertions(); String script = "js:(value == null)"; soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("js script with null value").isTrue(); soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("js script with non-null value").isFalse(); script = "groovy:(value == null)"; soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("groovy script with null value").isTrue(); soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("groovy script with non-null value").isFalse(); soft.assertAll(); } @Test public void shouldAssertContainsValue() throws ScriptException { SoftAssertions soft = new SoftAssertions(); String script = "js:(value.contains('dbunit'))"; soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("js script with 'dbunit rules' value").isTrue(); soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("js script 'database rider' value").isFalse(); script = "groovy:(value.contains('dbunit'))"; soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("groovy script with 'dbunit rules' value").isTrue(); soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("groovy script 'database rider' value").isFalse(); soft.assertAll(); } @Test public void shouldNotAssertInvalidScript() throws ScriptException { exceptionRule.expect(ScriptException.class); exceptionRule.expectMessage("value.includes is not a function"); String script = "js:(value.includes('dbunit'))"; scriptEngineManager.getScriptAssert(script, "dbunit rules"); } }
database-rider/database-rider
rider-core/src/test/java/com/github/database/rider/core/script/ScriptEngineManagerWrapperTest.java
Java
apache-2.0
4,246
/* * Copyright 2018 Google LLC. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.google.cloud.tools.jib.api; import java.util.Objects; /** Holds credentials (username and password). */ public class Credential { // If the username is set to <token>, the secret would be a refresh token. // https://github.com/docker/cli/blob/master/docs/reference/commandline/login.md#credential-helper-protocol public static final String OAUTH2_TOKEN_USER_NAME = "<token>"; /** * Gets a {@link Credential} configured with a username and password. * * @param username the username * @param password the password * @return a new {@link Credential} */ public static Credential from(String username, String password) { return new Credential(username, password); } private final String username; private final String password; private Credential(String username, String password) { this.username = username; this.password = password; } /** * Gets the username. * * @return the username */ public String getUsername() { return username; } /** * Gets the password. * * @return the password */ public String getPassword() { return password; } /** * Check whether this credential is an OAuth 2.0 refresh token. * * @return true if this credential is an OAuth 2.0 refresh token. */ public boolean isOAuth2RefreshToken() { return OAUTH2_TOKEN_USER_NAME.equals(username); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (!(other instanceof Credential)) { return false; } Credential otherCredential = (Credential) other; return username.equals(otherCredential.username) && password.equals(otherCredential.password); } @Override public int hashCode() { return Objects.hash(username, password); } @Override public String toString() { return username + ":" + password; } }
GoogleContainerTools/jib
jib-core/src/main/java/com/google/cloud/tools/jib/api/Credential.java
Java
apache-2.0
2,498
/* * Copyright (C) 2016 Mkhytar Mkhoian * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.justplay1.shoppist.interactor.units; import com.justplay1.shoppist.executor.PostExecutionThread; import com.justplay1.shoppist.executor.ThreadExecutor; import com.justplay1.shoppist.models.UnitModel; import com.justplay1.shoppist.repository.UnitsRepository; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import java.util.Collections; import java.util.List; import static com.justplay1.shoppist.ModelUtil.createFakeUnitModel; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; public class UpdateUnitsTest { private UpdateUnits useCase; @Mock private ThreadExecutor mockThreadExecutor; @Mock private PostExecutionThread mockPostExecutionThread; @Mock private UnitsRepository mockUnitsRepository; private List<UnitModel> models; @Before public void setUp() { MockitoAnnotations.initMocks(this); useCase = new UpdateUnits(mockUnitsRepository, mockThreadExecutor, mockPostExecutionThread); models = Collections.singletonList(createFakeUnitModel()); useCase.init(models); } @Test public void updateUnitsUseCase_HappyCase() { useCase.buildUseCaseObservable().subscribe(); verify(mockUnitsRepository).update(models); verifyNoMoreInteractions(mockUnitsRepository); verifyZeroInteractions(mockThreadExecutor); verifyZeroInteractions(mockPostExecutionThread); } }
justplay1/Shoppist
domain/src/test/java/com/justplay1/shoppist/interactor/units/UpdateUnitsTest.java
Java
apache-2.0
2,190
package fi.rivermouth.talous.auth; import java.util.ArrayList; import java.util.List; import org.springframework.security.authentication.AuthenticationManager; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.GrantedAuthority; import org.springframework.security.core.authority.SimpleGrantedAuthority; import fi.rivermouth.talous.domain.User; public class UserAuthenticationManager implements AuthenticationManager { @Override public Authentication authenticate(Authentication authentication) { List<GrantedAuthority> grantedAuths = new ArrayList<GrantedAuthority>(); grantedAuths.add(new SimpleGrantedAuthority(User.ROLE)); return new UsernamePasswordAuthenticationToken(authentication.getName(), authentication.getCredentials(), grantedAuths); } }
Rivermouth/Rivermouth-Talous
src/main/java/fi/rivermouth/talous/auth/UserAuthenticationManager.java
Java
apache-2.0
906
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; /** * The REST handler for get alias and head alias APIs. */ public class RestGetAliasesAction extends BaseRestHandler { @Override public List<Route> routes() { return List.of( new Route(GET, "/_alias"), new Route(GET, "/_aliases"), new Route(GET, "/_alias/{name}"), new Route(HEAD, "/_alias/{name}"), new Route(GET, "/{index}/_alias"), new Route(HEAD, "/{index}/_alias"), new Route(GET, "/{index}/_alias/{name}"), new Route(HEAD, "/{index}/_alias/{name}")); } @Override public String getName() { return "get_aliases_action"; } static RestResponse buildRestResponse(boolean aliasesExplicitlyRequested, String[] requestedAliases, ImmutableOpenMap<String, List<AliasMetadata>> responseAliasMap, Map<String, List<DataStreamAlias>> dataStreamAliases, XContentBuilder builder) throws Exception { final Set<String> indicesToDisplay = new HashSet<>(); final Set<String> returnedAliasNames = new HashSet<>(); for (final ObjectObjectCursor<String, List<AliasMetadata>> cursor : responseAliasMap) { for (final AliasMetadata aliasMetadata : cursor.value) { if (aliasesExplicitlyRequested) { // only display indices that have aliases indicesToDisplay.add(cursor.key); } returnedAliasNames.add(aliasMetadata.alias()); } } // compute explicitly requested aliases that have are not returned in the result final SortedSet<String> missingAliases = new TreeSet<>(); // first wildcard index, leading "-" as an alias name after this index means // that it is an exclusion int firstWildcardIndex = requestedAliases.length; for (int i = 0; i < requestedAliases.length; i++) { if (Regex.isSimpleMatchPattern(requestedAliases[i])) { firstWildcardIndex = i; break; } } for (int i = 0; i < requestedAliases.length; i++) { if (Metadata.ALL.equals(requestedAliases[i]) || Regex.isSimpleMatchPattern(requestedAliases[i]) || (i > firstWildcardIndex && requestedAliases[i].charAt(0) == '-')) { // only explicitly requested aliases will be called out as missing (404) continue; } // check if aliases[i] is subsequently excluded int j = Math.max(i + 1, firstWildcardIndex); for (; j < requestedAliases.length; j++) { if (requestedAliases[j].charAt(0) == '-') { // this is an exclude pattern if (Regex.simpleMatch(requestedAliases[j].substring(1), requestedAliases[i]) || Metadata.ALL.equals(requestedAliases[j].substring(1))) { // aliases[i] is excluded by aliases[j] break; } } } if (j == requestedAliases.length) { // explicitly requested aliases[i] is not excluded by any subsequent "-" wildcard in expression if (false == returnedAliasNames.contains(requestedAliases[i])) { // aliases[i] is not in the result set missingAliases.add(requestedAliases[i]); } } } final RestStatus status; builder.startObject(); { if (missingAliases.isEmpty()) { status = RestStatus.OK; } else { status = RestStatus.NOT_FOUND; final String message; if (missingAliases.size() == 1) { message = String.format(Locale.ROOT, "alias [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases)); } else { message = String.format(Locale.ROOT, "aliases [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases)); } builder.field("error", message); builder.field("status", status.getStatus()); } for (final var entry : responseAliasMap) { if (aliasesExplicitlyRequested == false || (aliasesExplicitlyRequested && indicesToDisplay.contains(entry.key))) { builder.startObject(entry.key); { builder.startObject("aliases"); { for (final AliasMetadata alias : entry.value) { AliasMetadata.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); } } builder.endObject(); } builder.endObject(); } } for (var entry : dataStreamAliases.entrySet()) { builder.startObject(entry.getKey()); { builder.startObject("aliases"); { for (DataStreamAlias alias : entry.getValue()) { builder.startObject(alias.getName()); builder.endObject(); } } builder.endObject(); } builder.endObject(); } } builder.endObject(); return new BytesRestResponse(status, builder); } @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here // in when 8.0 becomes the new version in the master branch. final boolean namesProvided = request.hasParam("name"); final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); //we may want to move this logic to TransportGetAliasesAction but it is based on the original provided aliases, which will //not always be available there (they may get replaced so retrieving request.aliases is not quite the same). return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener<GetAliasesResponse>(channel) { @Override public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder); } }); } }
robin13/elasticsearch
server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java
Java
apache-2.0
9,110
package com.ejlchina.searcher.implement; import com.ejlchina.searcher.*; import com.ejlchina.searcher.bean.InheritType; import java.lang.reflect.Field; import java.util.*; import java.lang.reflect.Modifier; import java.util.concurrent.ConcurrentHashMap; /*** * 默认元信息解析器 * @author Troy.Zhou @ 2021-10-30 * @since v3.0.0 */ public class DefaultMetaResolver implements MetaResolver { private final Map<Class<?>, BeanMeta<?>> cache = new ConcurrentHashMap<>(); private SnippetResolver snippetResolver = new DefaultSnippetResolver(); private DbMapping dbMapping; public DefaultMetaResolver() { this(new DefaultDbMapping()); } public DefaultMetaResolver(DbMapping dbMapping) { this.dbMapping = dbMapping; } @Override public <T> BeanMeta<T> resolve(Class<T> beanClass) { @SuppressWarnings("unchecked") BeanMeta<T> beanMeta = (BeanMeta<T>) cache.get(beanClass); if (beanMeta != null) { return beanMeta; } synchronized (cache) { beanMeta = resolveMetadata(beanClass); cache.put(beanClass, beanMeta); return beanMeta; } } protected <T> BeanMeta<T> resolveMetadata(Class<T> beanClass) { DbMapping.Table table = dbMapping.table(beanClass); if (table == null) { throw new SearchException("The class [" + beanClass.getName() + "] can not be searched, because it can not be resolved by " + dbMapping.getClass()); } BeanMeta<T> beanMeta = new BeanMeta<>(beanClass, table.getDataSource(), snippetResolver.resolve(table.getTables()), snippetResolver.resolve(table.getJoinCond()), snippetResolver.resolve(table.getGroupBy()), table.isDistinct()); // 字段解析 Field[] fields = getBeanFields(beanClass); for (int index = 0; index < fields.length; index++) { Field field = fields[index]; if (Modifier.isStatic(field.getModifiers())) { continue; } DbMapping.Column column = dbMapping.column(beanClass, fields[index]); if (column == null) { continue; } field.setAccessible(true); SqlSnippet snippet = snippetResolver.resolve(column.getFieldSql()); // 注意:Oracle 数据库的别名不能以下划线开头 FieldMeta fieldMeta = new FieldMeta(beanMeta, field, snippet, "c_" + index, column.isConditional(), column.getOnlyOn()); beanMeta.addFieldMeta(field.getName(), fieldMeta); } if (beanMeta.getFieldCount() == 0) { throw new SearchException("[" + beanClass.getName() + "] is not a valid SearchBean, because there is no field mapping to database."); } return beanMeta; } protected Field[] getBeanFields(Class<?> beanClass) { InheritType iType = dbMapping.inheritType(beanClass); List<Field> fieldList = new ArrayList<>(); Set<String> fieldNames = new HashSet<>(); while (beanClass != Object.class) { for (Field field : beanClass.getDeclaredFields()) { String name = field.getName(); int modifiers = field.getModifiers(); if (field.isSynthetic() || Modifier.isStatic(modifiers) || Modifier.isTransient(modifiers) || fieldNames.contains(name)) { continue; } fieldList.add(field); fieldNames.add(name); } if (iType != InheritType.FIELD && iType != InheritType.ALL) { break; } beanClass = beanClass.getSuperclass(); } return fieldList.toArray(new Field[0]); } public SnippetResolver getSnippetResolver() { return snippetResolver; } public void setSnippetResolver(SnippetResolver snippetResolver) { this.snippetResolver = Objects.requireNonNull(snippetResolver); } public DbMapping getDbMapping() { return dbMapping; } public void setDbMapping(DbMapping dbMapping) { this.dbMapping = Objects.requireNonNull(dbMapping); } }
ejlchina/bean-searcher
bean-searcher/src/main/java/com/ejlchina/searcher/implement/DefaultMetaResolver.java
Java
apache-2.0
4,320
/* * Copyright 2017-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.lettuce.core.cluster.api.async; import java.util.List; import java.util.Set; import io.lettuce.core.GeoAddArgs; import io.lettuce.core.GeoArgs; import io.lettuce.core.GeoCoordinates; import io.lettuce.core.GeoRadiusStoreArgs; import io.lettuce.core.GeoSearch; import io.lettuce.core.GeoValue; import io.lettuce.core.GeoWithin; import io.lettuce.core.Value; /** * Asynchronous executed commands on a node selection for the Geo-API. * * @author Mark Paluch * @since 4.0 * @generated by io.lettuce.apigenerator.CreateAsyncNodeSelectionClusterApi */ public interface NodeSelectionGeoAsyncCommands<K, V> { /** * Single geo add. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param member the member to add. * @return Long integer-reply the number of elements that were added to the set. */ AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member); /** * Single geo add. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param member the member to add. * @param args additional arguments. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member, GeoAddArgs args); /** * Multi geo add. * * @param key the key of the geo set. * @param lngLatMember triplets of double longitude, double latitude and V member. * @return Long integer-reply the number of elements that were added to the set. */ AsyncExecutions<Long> geoadd(K key, Object... lngLatMember); /** * Multi geo add. * * @param key the key of the geo set. * @param values {@link io.lettuce.core.GeoValue} values to add. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoValue<V>... values); /** * Multi geo add. * * @param key the key of the geo set. * @param args additional arguments. * @param lngLatMember triplets of double longitude, double latitude and V member. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, Object... lngLatMember); /** * Multi geo add. * * @param key the key of the geo set. * @param args additional arguments. * @param values {@link io.lettuce.core.GeoValue} values to add. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, GeoValue<V>... values); /** * Retrieve distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is * returned. Default in meters by, otherwise according to {@code unit} * * @param key the key of the geo set. * @param from from member. * @param to to member. * @param unit distance unit. * @return distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is * returned. */ AsyncExecutions<Double> geodist(K key, V from, V to, GeoArgs.Unit unit); /** * Retrieve Geohash strings representing the position of one or more elements in a sorted set value representing a * geospatial index. * * @param key the key of the geo set. * @param members the members. * @return bulk reply Geohash strings in the order of {@code members}. Returns {@code null} if a member is not found. */ AsyncExecutions<List<Value<String>>> geohash(K key, V... members); /** * Get geo coordinates for the {@code members}. * * @param key the key of the geo set. * @param members the members. * @return a list of {@link GeoCoordinates}s representing the x,y position of each element specified in the arguments. For * missing elements {@code null} is returned. */ AsyncExecutions<List<GeoCoordinates>> geopos(K key, V... members); /** * Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @return bulk reply. */ AsyncExecutions<Set<V>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit); /** * Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. */ AsyncExecutions<List<GeoWithin<V>>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoArgs geoArgs); /** * Perform a {@link #georadius(Object, double, double, double, GeoArgs.Unit, GeoArgs)} query and store the results in a * sorted set. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with * their locations a sorted set. * @return Long integer-reply the number of elements in the result. */ AsyncExecutions<Long> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs); /** * Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the * results. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @return set of members. */ AsyncExecutions<Set<V>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit); /** * Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the * results. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. */ AsyncExecutions<List<GeoWithin<V>>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoArgs geoArgs); /** * Perform a {@link #georadiusbymember(Object, Object, double, GeoArgs.Unit, GeoArgs)} query and store the results in a * sorted set. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with * their locations a sorted set. * @return Long integer-reply the number of elements in the result. */ AsyncExecutions<Long> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs); /** * Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}. * Use {@link GeoSearch} to create reference and predicate objects. * * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @return bulk reply. * @since 6.1 */ AsyncExecutions<Set<V>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate); /** * Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}. * Use {@link GeoSearch} to create reference and predicate objects. * * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. * @since 6.1 */ AsyncExecutions<List<GeoWithin<V>>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs); /** * Perform a {@link #geosearch(Object, GeoSearch.GeoRef, GeoSearch.GeoPredicate, GeoArgs)} query and store the results in a * sorted set. * * @param destination the destination where to store results. * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @param geoArgs args to control the result. * @param storeDist stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape. * @return Long integer-reply the number of elements in the result. * @since 6.1 */ AsyncExecutions<Long> geosearchstore(K destination, K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs, boolean storeDist); }
lettuce-io/lettuce-core
src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionGeoAsyncCommands.java
Java
apache-2.0
11,078
/** * Copyright (C) 2015 The Gravitee team (http://gravitee.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gravitee.gateway.services.sync.cache; import com.hazelcast.core.HazelcastInstance; import org.springframework.beans.factory.annotation.Autowired; import java.util.Map; /** * @author David BRASSELY (david.brassely at graviteesource.com) * @author GraviteeSource Team */ public final class CacheManager { @Autowired private HazelcastInstance hzInstance; public <K, V> Map<K, V> getCache(String name) { return hzInstance.getMap(name); } }
gravitee-io/gateway
gravitee-gateway-services/gravitee-gateway-services-sync/src/main/java/io/gravitee/gateway/services/sync/cache/CacheManager.java
Java
apache-2.0
1,110
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.codecommit.model; import javax.annotation.Generated; /** * <p> * The number of approvals required for the approval rule exceeds the maximum number allowed. * </p> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class MaximumNumberOfApprovalsExceededException extends com.amazonaws.services.codecommit.model.AWSCodeCommitException { private static final long serialVersionUID = 1L; /** * Constructs a new MaximumNumberOfApprovalsExceededException with the specified error message. * * @param message * Describes the error encountered. */ public MaximumNumberOfApprovalsExceededException(String message) { super(message); } }
aws/aws-sdk-java
aws-java-sdk-codecommit/src/main/java/com/amazonaws/services/codecommit/model/MaximumNumberOfApprovalsExceededException.java
Java
apache-2.0
1,318
package org.myrobotlab.framework; import static org.myrobotlab.framework.StatusLevel.DEBUG; import static org.myrobotlab.framework.StatusLevel.ERROR; import static org.myrobotlab.framework.StatusLevel.INFO; import static org.myrobotlab.framework.StatusLevel.SUCCESS; import static org.myrobotlab.framework.StatusLevel.WARN; import java.io.IOException; import java.io.PrintWriter; import java.io.Serializable; import java.io.StringWriter; import java.util.Objects; import org.myrobotlab.codec.CodecUtils; import org.myrobotlab.logging.Level; import org.myrobotlab.logging.LoggerFactory; import org.myrobotlab.logging.LoggingFactory; import org.slf4j.Logger; /** * Goal is to have a very simple Pojo with only a few (native Java helper * methods) WARNING !!! - this class used to extend Exception or Throwable - but * the gson serializer would stack overflow with self reference issue * * TODO - allow radix tree searches for "keys" ??? * */ public class Status implements Serializable {// extends Exception { private static final long serialVersionUID = 1L; public final static Logger log = LoggerFactory.getLogger(Status.class); public String name; // service name ??? /** * FIXME - should probably be an enum now that serialization mostly works now * with enums [debug|info|warn|error|success] - yes the last part is different * than "logging" but could still be a status... * */ public String level; /** * The key is the non changing part and good identifier of what went on... For * Exceptions I would recommend the Exception.class.getSimpleName() for the * key, whilst the "detail" is for "changing" detail. This becomes important * when Stati are aggregated - and humans are interested in "high" counts of * specific Status while the details are not important unless diagnosing one. * * Violating Servo limits is a good example - "key" can be "Outside servo * limits". The key can contain spaces and punctuation - the important part is * that it is STATIC. * * "details" contain dynamic specifics - for example: "key":"Outside servo * limits", "detail":"servo01 moveTo(75) limit is greater than 100" */ public String key; /** * Dynamic of verbose explanation of the status. e.g. "detail":"servo01 * moveTo(75) limit is greater than 100" or complete stack trace from an * exception */ public String detail; /** * optional source of status */ public Object source; // --- static creation of typed Status objects ---- public static Status debug(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = DEBUG; return status; } public static Status error(Exception e) { Status s = new Status(e); s.level = ERROR; return s; } public static Status error(String msg) { Status s = new Status(msg); s.level = ERROR; return s; } public static Status error(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = ERROR; return status; } public static Status warn(String msg) { Status s = new Status(msg); s.level = ERROR; return s; } public static Status warn(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = WARN; return status; } public static Status info(String msg) { Status s = new Status(msg); s.level = INFO; return s; } public static Status info(String format, Object... args) { String formattedInfo = String.format(format, args); Status status = new Status(formattedInfo); status.level = INFO; return status; } public final static String stackToString(final Throwable e) { StringWriter sw; try { sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); } catch (Exception e2) { return "bad stackToString"; } return "------\r\n" + sw.toString() + "------\r\n"; } public Status(Exception e) { this.level = ERROR; StringWriter sw; try { sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); detail = sw.toString(); } catch (Exception e2) { } this.key = String.format("%s - %s", e.getClass().getSimpleName(), e.getMessage()); } public Status(Status s) { if (s == null) { return; } this.name = s.name; this.level = s.level; this.key = s.key; this.detail = s.detail; } /** * for minimal amount of information error is assumed, and info is detail of * an ERROR * * @param detail * d */ public Status(String detail) { this.level = ERROR; this.detail = detail; } public Status(String name, String level, String key, String detail) { this.name = name; this.level = level; this.key = key; this.detail = detail; } public boolean isDebug() { return DEBUG.equals(level); } public boolean isError() { return ERROR.equals(level); } public boolean isInfo() { return INFO.equals(level); } public boolean isWarn() { return WARN.equals(level); } @Override public String toString() { StringBuffer sb = new StringBuffer(); if (name != null) { sb.append(name); sb.append(" "); } if (level != null) { sb.append(level); sb.append(" "); } if (key != null) { sb.append(key); sb.append(" "); } if (detail != null) { sb.append(detail); } return sb.toString(); } static public final Status newInstance(String name, String level, String key, String detail) { Status s = new Status(name, level, key, detail); return s; } @Override public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Status)) { return false; } Status status = (Status) o; return Objects.equals(name, status.name) && Objects.equals(level, status.level) && Objects.equals(key, status.key) && Objects.equals(detail, status.detail); } @Override public int hashCode() { return Objects.hash(name, level, key, detail); } public static void main(String[] args) throws IOException, InterruptedException { LoggingFactory.init(Level.INFO); Status test = new Status("i am pessimistic"); // Status subTest = new Status("i am sub pessimistic"); // test.add(subTest); String json = CodecUtils.toJson(test); Status z = CodecUtils.fromJson(json, Status.class); log.info(json); log.info(z.toString()); } public static Status success() { Status s = new Status(SUCCESS); s.level = SUCCESS; return s; } public boolean isSuccess() { return SUCCESS.equals(level); } public static Status success(String detail) { Status s = new Status(SUCCESS); s.level = SUCCESS; s.detail = detail; return s; } }
MyRobotLab/myrobotlab
src/main/java/org/myrobotlab/framework/Status.java
Java
apache-2.0
7,254
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tez.dag.api; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.tez.common.TezCommonUtils; import org.apache.tez.dag.api.records.DAGProtos.TezEntityDescriptorProto; import org.junit.Assert; import org.junit.Test; public class TestDagTypeConverters { @Test(timeout = 5000) public void testTezEntityDescriptorSerialization() throws IOException { UserPayload payload = UserPayload.create(ByteBuffer.wrap(new String("Foobar").getBytes()), 100); String historytext = "Bar123"; EntityDescriptor entityDescriptor = InputDescriptor.create("inputClazz").setUserPayload(payload) .setHistoryText(historytext); TezEntityDescriptorProto proto = DagTypeConverters.convertToDAGPlan(entityDescriptor); Assert.assertEquals(payload.getVersion(), proto.getTezUserPayload().getVersion()); Assert.assertArrayEquals(payload.deepCopyAsArray(), proto.getTezUserPayload().getUserPayload().toByteArray()); Assert.assertTrue(proto.hasHistoryText()); Assert.assertNotEquals(historytext, proto.getHistoryText()); Assert.assertEquals(historytext, new String( TezCommonUtils.decompressByteStringToByteArray(proto.getHistoryText()))); // Ensure that the history text is not deserialized InputDescriptor inputDescriptor = DagTypeConverters.convertInputDescriptorFromDAGPlan(proto); Assert.assertNull(inputDescriptor.getHistoryText()); } }
Altiscale/tez
tez-api/src/test/java/org/apache/tez/dag/api/TestDagTypeConverters.java
Java
apache-2.0
2,256
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.jetty; import java.util.Map; import org.apache.camel.Exchange; import org.apache.camel.Processor; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.impl.JndiRegistry; import org.junit.Test; /** * @version */ public class HttpFilterCamelHeadersTest extends BaseJettyTest { @Test public void testFilterCamelHeaders() throws Exception { Exchange out = template.send("http://localhost:{{port}}/test/filter", new Processor() { public void process(Exchange exchange) throws Exception { exchange.getIn().setBody("Claus"); exchange.getIn().setHeader("bar", 123); } }); assertNotNull(out); assertEquals("Hi Claus", out.getOut().getBody(String.class)); // there should be no internal Camel headers // except for the response code Map<String, Object> headers = out.getOut().getHeaders(); for (String key : headers.keySet()) { if (!key.equalsIgnoreCase(Exchange.HTTP_RESPONSE_CODE)) { assertTrue("Should not contain any Camel internal headers", !key.toLowerCase().startsWith("camel")); } else { assertEquals(200, headers.get(Exchange.HTTP_RESPONSE_CODE)); } } } @Override protected JndiRegistry createRegistry() throws Exception { JndiRegistry jndi = super.createRegistry(); jndi.bind("foo", new MyFooBean()); return jndi; } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { from("jetty:http://localhost:{{port}}/test/filter").beanRef("foo"); } }; } public static class MyFooBean { public String hello(String name) { return "Hi " + name; } } }
everttigchelaar/camel-svn
components/camel-jetty/src/test/java/org/apache/camel/component/jetty/HttpFilterCamelHeadersTest.java
Java
apache-2.0
2,847
package org.lightadmin.core.view.preparer; import org.apache.tiles.AttributeContext; import org.apache.tiles.context.TilesRequestContext; import org.lightadmin.core.config.domain.DomainTypeAdministrationConfiguration; public class FormViewPreparer extends ConfigurationAwareViewPreparer { @Override protected void execute(final TilesRequestContext tilesContext, final AttributeContext attributeContext, final DomainTypeAdministrationConfiguration configuration) { super.execute(tilesContext, attributeContext, configuration); addAttribute(attributeContext, "fields", configuration.getFormViewFragment().getFields()); } }
pramoth/light-admin
lightadmin-core/src/main/java/org/lightadmin/core/view/preparer/FormViewPreparer.java
Java
apache-2.0
652
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ package org.unitime.timetable.solver.exam.ui; import java.io.PrintWriter; import java.io.Serializable; import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.Vector; import javax.servlet.jsp.JspWriter; import org.cpsolver.exam.model.Exam; import org.cpsolver.exam.model.ExamDistributionConstraint; import org.cpsolver.exam.model.ExamInstructor; import org.cpsolver.exam.model.ExamPlacement; import org.cpsolver.exam.model.ExamRoom; import org.cpsolver.exam.model.ExamRoomPlacement; import org.cpsolver.exam.model.ExamStudent; import org.cpsolver.ifs.extension.AssignedValue; import org.cpsolver.ifs.extension.ConflictStatistics; import org.cpsolver.ifs.model.Constraint; import org.dom4j.Element; import org.unitime.timetable.model.PreferenceLevel; import org.unitime.timetable.solver.ui.TimetableInfo; import org.unitime.timetable.webutil.timegrid.ExamGridTable; /** * @author Tomas Muller */ public class ExamConflictStatisticsInfo implements TimetableInfo, Serializable { private static final long serialVersionUID = 7L; public static int sVersion = 7; // to be able to do some changes in the future public static final int sConstraintTypeRoom = 1; public static final int sConstraintTypeInstructor = 2; public static final int sConstraintTypeGroup = 3; public static final int sConstraintTypeStudent = 4; private Hashtable iVariables = new Hashtable(); public Collection getCBS() { return iVariables.values(); } public CBSVariable getCBS(Long classId) { return (CBSVariable)iVariables.get(classId); } public void load(ConflictStatistics cbs) { load(cbs, null); } public ExamConflictStatisticsInfo getConflictStatisticsSubInfo(Vector variables) { ExamConflictStatisticsInfo ret = new ExamConflictStatisticsInfo(); for (Enumeration e=variables.elements();e.hasMoreElements();) { Exam exam = (Exam)e.nextElement(); CBSVariable var = (CBSVariable)iVariables.get(exam.getId()); if (var!=null) ret.iVariables.put(exam.getId(),var); } return ret; } public void merge(ExamConflictStatisticsInfo info) { if (info!=null) iVariables.putAll(info.iVariables); } public void load(ConflictStatistics cbs, Long examId) { iVariables.clear(); for (Iterator i1=cbs.getNoGoods().entrySet().iterator();i1.hasNext();) { Map.Entry entry = (Map.Entry)i1.next(); AssignedValue assignment = (AssignedValue)entry.getKey(); ExamPlacement placement = (ExamPlacement)assignment.getValue(); Exam exam = (Exam)placement.variable(); if (examId!=null && !examId.equals(exam.getId())) continue; CBSVariable var = (CBSVariable)iVariables.get(exam.getId()); if (var==null) { String pref = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(exam,null); var = new CBSVariable(exam.getId(),exam.getName(),pref); iVariables.put(exam.getId(),var); } Vector roomIds = new Vector(); Vector roomNames = new Vector(); Vector roomPrefs = new Vector(); for (Iterator i=new TreeSet(placement.getRoomPlacements()).iterator();i.hasNext();) { ExamRoomPlacement room = (ExamRoomPlacement)i.next(); roomIds.add(room.getId()); roomNames.add(room.getName()); roomPrefs.add(exam.getRoomPlacements().size()==placement.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(placement.getPeriod())); } CBSValue val = new CBSValue(var, placement.getPeriod().getId(), placement.getPeriod().getDayStr()+" "+placement.getPeriod().getTimeStr(), (exam.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:placement.getPeriodPlacement().getPenalty()), roomIds, roomNames, roomPrefs); var.values().add(val); List noGoods = (List)entry.getValue(); Hashtable constr2assignments = new Hashtable(); for (Iterator e2=noGoods.iterator();e2.hasNext();) { AssignedValue noGood = (AssignedValue)e2.next(); if (noGood.getConstraint()==null) continue; Vector aaa = (Vector)constr2assignments.get(noGood.getConstraint()); if (aaa == null) { aaa = new Vector(); constr2assignments.put(noGood.getConstraint(), aaa); } aaa.addElement(noGood); } for (Iterator i2=constr2assignments.entrySet().iterator();i2.hasNext();) { Map.Entry entry2 = (Map.Entry)i2.next(); Constraint constraint = (Constraint)entry2.getKey(); Vector noGoodsThisConstraint = (Vector)entry2.getValue(); CBSConstraint con = null; if (constraint instanceof ExamRoom) { con = new CBSConstraint(val, sConstraintTypeRoom, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamInstructor) { con = new CBSConstraint(val, sConstraintTypeInstructor, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamStudent) { con = new CBSConstraint(val, sConstraintTypeStudent, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamDistributionConstraint) { con = new CBSConstraint(val, sConstraintTypeGroup, constraint.getId(), ((ExamDistributionConstraint)constraint).getTypeString(), (constraint.isHard()?PreferenceLevel.sRequired:PreferenceLevel.int2prolog(((ExamDistributionConstraint)constraint).getWeight()))); } else { con = new CBSConstraint(val, -1, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } val.constraints().add(con); for (Enumeration e3=noGoodsThisConstraint.elements();e3.hasMoreElements();) { AssignedValue ass = (AssignedValue)e3.nextElement(); ExamPlacement p = (ExamPlacement)ass.getValue(); Exam x = (Exam)p.variable(); String pr = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(x,p); Vector aroomIds = new Vector(); Vector aroomNames = new Vector(); Vector aroomPrefs = new Vector(); for (Iterator i=new TreeSet(p.getRoomPlacements()).iterator();i.hasNext();) { ExamRoomPlacement room = (ExamRoomPlacement)i.next(); aroomIds.add(room.getId()); aroomNames.add(room.getName()); aroomPrefs.add(x.getRoomPlacements().size()==p.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(p.getPeriod())); } CBSAssignment a = new CBSAssignment(con, x.getId(), x.getName(), pr, p.getPeriod().getId(), p.getPeriod().getDayStr()+" "+p.getPeriod().getTimeStr(), (x.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:p.getPeriodPlacement().getPenalty()), aroomIds, aroomNames, aroomPrefs); con.assignments().add(a); a.incCounter((int)ass.getCounter(0)); } } } } public void load(Element root) { int version = Integer.parseInt(root.attributeValue("version")); if (version==sVersion) { iVariables.clear(); for (Iterator i1=root.elementIterator("var");i1.hasNext();) { CBSVariable var = new CBSVariable((Element)i1.next()); iVariables.put(Long.valueOf(var.getId()),var); } } } public void save(Element root) { root.addAttribute("version", String.valueOf(sVersion)); for (Iterator i1=iVariables.values().iterator();i1.hasNext();) { ((CBSVariable)i1.next()).save(root.addElement("var")); } } public static interface Counter { public int getCounter(); public void incCounter(int value); } public static class CBSVariable implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; int iCounter = 0; long iExamId; String iName; HashSet iValues = new HashSet(); CBSConstraint iConstraint = null; String iPref = null; CBSVariable(long examId, String name, String pref) { iExamId = examId; iName = name; iPref = pref; } CBSVariable(CBSConstraint constraint, long classId, String examId, String pref) { iConstraint = constraint; iExamId = classId; iName = examId; iPref = pref; } CBSVariable(Element element) { iExamId = Long.parseLong(element.attributeValue("exam")); iName = element.attributeValue("name"); iPref = element.attributeValue("pref"); for (Iterator i=element.elementIterator("val");i.hasNext();) iValues.add(new CBSValue(this,(Element)i.next())); } public long getId() { return iExamId; } public int getCounter() { return iCounter; } public String getName() { return iName; } public String getPref() { return iPref; } public void incCounter(int value) { iCounter+=value; if (iConstraint!=null) iConstraint.incCounter(value); } public Set values() { return iValues; } public int hashCode() { return (Long.valueOf(iExamId)).hashCode(); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSVariable)) return false; return ((CBSVariable)o).getId()==getId(); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSVariable)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSVariable)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public String toString() { return iName; } public void save(Element element) { element.addAttribute("exam",String.valueOf(iExamId)); element.addAttribute("name", iName); if (iPref!=null) element.addAttribute("pref", iPref); for (Iterator i=iValues.iterator();i.hasNext();) ((CBSValue)i.next()).save(element.addElement("val")); } } public static class CBSValue implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; int iCounter = 0; Long iPeriodId; String iPeriodName; int iPeriodPref; Vector iRoomIds; String iInstructorName = null; Vector iRoomNames; Vector iRoomPrefs; CBSVariable iVariable = null; HashSet iConstraints = new HashSet(); HashSet iAssignments = new HashSet(); int iLength; CBSValue(CBSVariable var, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) { iVariable = var; iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs; iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref; } CBSValue(CBSVariable var, Element element) { iVariable = var; iPeriodId = Long.valueOf(element.attributeValue("period")); iPeriodName = element.attributeValue("name"); iPeriodPref = Integer.parseInt(element.attributeValue("pref")); iRoomIds = new Vector(); iRoomNames = new Vector(); iRoomPrefs = new Vector(); for (Iterator i=element.elementIterator("room");i.hasNext();) { Element r = (Element)i.next(); iRoomIds.addElement(Integer.valueOf(r.attributeValue("id"))); iRoomNames.addElement(r.attributeValue("name")); iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref"))); } for (Iterator i=element.elementIterator("cons");i.hasNext();) iConstraints.add(new CBSConstraint(this,(Element)i.next())); } public CBSVariable variable() { return iVariable; } public Long getPeriodId() { return iPeriodId; } public String getPeriodName() { return iPeriodName; } public int getPeriodPref() { return iPeriodPref; } public Vector getRoomNames() { return iRoomNames; } public Vector getRoomPrefs() { return iRoomPrefs; } public String toString() { return iPeriodName+" "+iRoomNames; } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iVariable!=null) iVariable.incCounter(value); } public Vector getRoomIds() { return iRoomIds; } public Set constraints() { return iConstraints; } public Set assignments() { return iAssignments; } public int hashCode() { return combine(iPeriodId.hashCode(), (iRoomIds==null?0:iRoomIds.hashCode())); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSValue)) return false; CBSValue v = (CBSValue)o; return v.getRoomIds().equals(getRoomIds()) && v.getPeriodId().equals(getPeriodId()); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSValue)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSValue)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("period",String.valueOf(iPeriodId)); element.addAttribute("pref",String.valueOf(iPeriodPref)); element.addAttribute("name", iPeriodName); for (int i=0;i<iRoomIds.size();i++) { Element r = element.addElement("room"); r.addAttribute("id",iRoomIds.elementAt(i).toString()); r.addAttribute("name",iRoomNames.elementAt(i).toString()); r.addAttribute("pref",iRoomPrefs.elementAt(i).toString()); } for (Iterator i=iConstraints.iterator();i.hasNext();) ((CBSConstraint)i.next()).save(element.addElement("cons")); } } public static class CBSConstraint implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; CBSValue iValue; int iCounter = 0; long iId; String iName = null; int iType; HashSet iAssignments = new HashSet(); HashSet iVariables = new HashSet(); String iPref; CBSConstraint(int type, long id, String name, String pref) { iId = id; iType = type; iName = name; iPref = pref; } CBSConstraint(CBSValue value, int type, long id, String name, String pref) { iId = id; iType = type; iValue = value; iName = name; iPref = pref; } CBSConstraint(CBSValue value, Element element) { iValue = value; iId = Integer.parseInt(element.attributeValue("id")); iType = Integer.parseInt(element.attributeValue("type")); iName = element.attributeValue("name"); iPref = element.attributeValue("pref"); for (Iterator i=element.elementIterator("nogood");i.hasNext();) iAssignments.add(new CBSAssignment(this,(Element)i.next())); } public long getId() { return iId; } public int getType() { return iType; } public String getName() { return iName; } public CBSValue value() { return iValue; } public Set variables() { return iVariables; } public Set assignments() { return iAssignments; } public String getPref() { return iPref; } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iValue!=null) iValue.incCounter(value); } public int hashCode() { return combine((int)iId,iType); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSConstraint)) return false; CBSConstraint c = (CBSConstraint)o; return c.getId()==getId() && c.getType()==getType(); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSConstraint)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSConstraint)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("id",String.valueOf(iId)); element.addAttribute("type",String.valueOf(iType)); if (iName!=null) element.addAttribute("name", iName); if (iPref!=null) element.addAttribute("pref", iPref); for (Iterator i=iAssignments.iterator();i.hasNext();) ((CBSAssignment)i.next()).save(element.addElement("nogood")); } } public static class CBSAssignment implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; CBSConstraint iConstraint; Long iExamId; String iExamName; String iExamPref; Long iPeriodId; String iPeriodName; int iPeriodPref; int iCounter = 0; Vector iRoomIds; Vector iRoomPrefs; Vector iRoomNames; CBSAssignment(CBSConstraint constraint, Long examId, String examName, String examPref, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) { iExamId = examId; iExamName = examName; iExamPref = examPref; iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref; iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs; iConstraint = constraint; } CBSAssignment(CBSConstraint constraint, Element element) { iConstraint = constraint; iExamId = Long.valueOf(element.attributeValue("exam")); iExamName = element.attributeValue("name"); iExamPref = element.attributeValue("pref"); iRoomIds = new Vector(); iRoomNames = new Vector(); iRoomPrefs = new Vector(); for (Iterator i=element.elementIterator("room");i.hasNext();) { Element r = (Element)i.next(); iRoomIds.addElement(Integer.valueOf(r.attributeValue("id"))); iRoomNames.addElement(r.attributeValue("name")); iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref"))); } iPeriodId = Long.valueOf(element.attributeValue("period")); iPeriodName = element.attributeValue("periodName"); iPeriodPref = Integer.parseInt(element.attributeValue("periodPref")); incCounter(Integer.parseInt(element.attributeValue("cnt"))); } public Long getId() { return iExamId; } public CBSConstraint getConstraint() { return iConstraint; } public String getName() { return iExamName; } public String getPref() { return iExamPref; } public Long getPeriodId() { return iPeriodId; } public String getPeriodName() { return iPeriodName; } public int getPeriodPref() { return iPeriodPref; } public String toString() { return iExamName+" "+iPeriodName+" "+iRoomNames; } public Vector getRoomNames() { return iRoomNames; } public Vector getRoomIds() { return iRoomIds; } public Vector getRoomPrefs() { return iRoomPrefs; } public int hashCode() { return combine(iExamId.hashCode(),combine(iRoomIds.hashCode(),iPeriodId.hashCode())); } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iConstraint!=null) iConstraint.incCounter(value); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSAssignment)) return false; CBSAssignment a = (CBSAssignment)o; return a.getId().equals(getId()) && a.getRoomIds().equals(getRoomIds()) && a.getPeriodId().equals(getPeriodId()); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSAssignment)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSAssignment)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("exam",String.valueOf(iExamId)); element.addAttribute("name",iExamName); element.addAttribute("pref",iExamPref); for (int i=0;i<iRoomIds.size();i++) { Element r = element.addElement("room"); r.addAttribute("id",iRoomIds.elementAt(i).toString()); r.addAttribute("name",iRoomNames.elementAt(i).toString()); r.addAttribute("pref",iRoomPrefs.elementAt(i).toString()); } element.addAttribute("period", String.valueOf(iPeriodId)); element.addAttribute("periodName", iPeriodName); element.addAttribute("periodPref", String.valueOf(iPeriodPref)); element.addAttribute("cnt", String.valueOf(iCounter)); } } private static int combine(int a, int b) { int ret = 0; for (int i=0;i<15;i++) ret = ret | ((a & (1<<i))<<i) | ((b & (1<<i))<<(i+1)); return ret; } //--------- toHtml ------------------------------------------------- private static String IMG_BASE = "images/"; private static String IMG_EXPAND = IMG_BASE+"expand_node_btn.gif"; private static String IMG_COLLAPSE = IMG_BASE+"collapse_node_btn.gif"; private static String IMG_LEAF = IMG_BASE+"end_node_btn.gif"; public static int TYPE_VARIABLE_BASED = 0; public static int TYPE_CONSTRAINT_BASED = 1; private void menu_item(PrintWriter out, String id, String name, String description, String page, boolean isCollapsed) { out.println("<div style=\"margin-left:5px;\">"); out.println("<A style=\"border:0;background:0\" id=\"__idMenu"+id+"\" href=\"javascript:toggle('"+id+"')\" name=\""+name+"\">"); out.println("<img id=\"__idMenuImg"+id+"\" border=\"0\" src=\""+(isCollapsed ? IMG_EXPAND : IMG_COLLAPSE)+"\" align=\"absmiddle\"></A>"); out.println("&nbsp;<A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page+" onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+ name+(description == null?"":" <font color='gray'>[" + description + "]</font>")+"</A><br>"); out.println("</div>"); out.println("<div ID=\"__idMenuDiv"+id+"\" style=\"display:"+(isCollapsed ? "none" : "block")+";position:relative;margin-left:18px;\">"); } private void leaf_item(PrintWriter out, String name, String description, String page) { out.println("<div style=\"margin-left:5px;\">"); out.println("<img border=\"0\" src=\""+IMG_LEAF+"\" align=\"absmiddle\">"); out.println("&nbsp;<A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page + " onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+name+(description == null ? "" : " <font color='gray'>[" + description + "]</font>")+"</A><br>"); out.println("</div>"); } private void end_item(PrintWriter out) { out.println("</div>"); } private void unassignedVariableMenuItem(PrintWriter out, String menuId, CBSVariable variable, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(variable.getPref())+"'>"+ variable.getName()+ "</font>"; String description = null; String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+variable.getId()+"&op=Reset','900','90%');\""; menu_item(out, menuId, variable.getCounter() + "&times; " + name, description, onClick, true); } private void unassignmentMenuItem(PrintWriter out, String menuId, CBSValue value, boolean clickable) { String name = "<font color='"+PreferenceLevel.int2color(value.getPeriodPref())+"'>"+ value.getPeriodName()+ "</font> "; String roomLink = ""; for (int i=0;i<value.getRoomIds().size();i++) { name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)value.getRoomPrefs().elementAt(i)).intValue())+"'>"+ value.getRoomNames().elementAt(i)+"</font>"; roomLink += (i>0?":":"")+value.getRoomIds().elementAt(i); } String description = null; String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+value.variable().getId()+"&period="+value.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\""; menu_item(out, menuId, value.getCounter() + "&times; " + name, description, onClick, true); } private void constraintMenuItem(PrintWriter out, String menuId, CBSConstraint constraint, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(constraint.getPref())+"'>"; String link = null; switch (constraint.getType()) { case sConstraintTypeGroup : name += "Distribution "+constraint.getName(); break; case sConstraintTypeInstructor : name += "Instructor "+constraint.getName(); if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceInstructor+"&op=Cbs"; break; case sConstraintTypeRoom : name += "Room "+constraint.getName(); if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceRoom+"&op=Cbs"; break; case sConstraintTypeStudent : name += "Student "+constraint.getName(); break; default : name += (constraint.getName()==null?"Unknown":constraint.getName()); } name += "</font>"; String description = null; String onClick = null; if (link!=null) onClick = "href=\""+link+"\""; menu_item(out, menuId, constraint.getCounter() + "&times; " + name, description, onClick, true); } private void assignmentLeafItem(PrintWriter out, CBSAssignment assignment, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(assignment.getPref())+"'>"+ assignment.getName()+ "</font> &larr; "+ "<font color='"+PreferenceLevel.int2color(assignment.getPeriodPref())+"'>"+ assignment.getPeriodName()+ "</font> "; String roomLink = ""; for (int i=0;i<assignment.getRoomIds().size();i++) { name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)assignment.getRoomPrefs().elementAt(i)).intValue())+"'>"+ assignment.getRoomNames().elementAt(i)+"</font>"; roomLink += (i>0?":":"")+assignment.getRoomIds().elementAt(i); } String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+assignment.getId()+"&period="+assignment.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\""; leaf_item(out, assignment.getCounter()+"&times; "+name, null, onClick); } public static void printHtmlHeader(JspWriter jsp) { PrintWriter out = new PrintWriter(jsp); printHtmlHeader(out, false); } public static void printHtmlHeader(PrintWriter out, boolean style) { if (style) { out.println("<style type=\"text/css\">"); out.println("<!--"); out.println("A:link { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:visited { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:active { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:hover { color: blue; text-decoration: none; border:0; background:0; }"); out.println(".TextBody { background-color: white; color:black; font-size: 12px; }"); out.println(".WelcomeHead { color: black; margin-top: 0px; margin-left: 0px; font-weight: bold; text-align: right; font-size: 30px; font-family: Comic Sans MS}"); out.println("-->"); out.println("</style>"); out.println(); } out.println("<script language=\"javascript\" type=\"text/javascript\">"); out.println("function toggle(item) {"); out.println(" obj=document.getElementById(\"__idMenuDiv\"+item);"); out.println(" visible=(obj.style.display!=\"none\");"); out.println(" img=document.getElementById(\"__idMenuImg\" + item);"); out.println(" menu=document.getElementById(\"__idMenu\" + item);"); out.println(" if (visible) {obj.style.display=\"none\";img.src=\""+IMG_EXPAND+"\";}"); out.println(" else {obj.style.display=\"block\";img.src=\""+IMG_COLLAPSE+"\";}"); out.println("}"); out.println("</script>"); out.flush(); } private Vector filter(Collection counters, double limit) { Vector cnt = new Vector(counters); Collections.sort(cnt); int total = 0; for (Enumeration e=cnt.elements();e.hasMoreElements();) total += ((Counter)e.nextElement()).getCounter(); int totalLimit = (int)Math.ceil(limit*total); int current = 0; Vector ret = new Vector(); for (Enumeration e=cnt.elements();e.hasMoreElements();) { Counter c = (Counter)e.nextElement(); ret.addElement(c); current += c.getCounter(); if (current>=totalLimit) break; } return ret; } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, double limit, int type, boolean clickable) { printHtml(jsp, null, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, double limit, int type, boolean clickable) { printHtml(out, null, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, double[] limit, int type, boolean clickable) { printHtml(jsp, null, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, double[] limit, int type, boolean clickable) { printHtml(out, null, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, Long classId, double limit, int type, boolean clickable) { printHtml(jsp, classId, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, Long classId, double limit, int type, boolean clickable) { printHtml(out, classId, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, Long classId, double[] limit, int type, boolean clickable) { PrintWriter out = new PrintWriter(jsp); printHtml(out, classId, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, Long classId, double[] limit, int type, boolean clickable) { if (type == TYPE_VARIABLE_BASED) { Vector vars = filter(iVariables.values(), limit[0]); if (classId!=null) { CBSVariable var = (CBSVariable)iVariables.get(classId); vars.clear(); if (var!=null) vars.add(var); } for (Enumeration e1 = vars.elements(); e1.hasMoreElements();) { CBSVariable variable = (CBSVariable)e1.nextElement(); String m1 = String.valueOf(variable.getId()); if (classId==null) unassignedVariableMenuItem(out,m1,variable, clickable); Vector vals = filter(variable.values(), limit[1]); int id = 0; for (Enumeration e2 = vals.elements();e2.hasMoreElements();) { CBSValue value = (CBSValue)e2.nextElement(); String m2 = m1+"."+(id++); unassignmentMenuItem(out,m2,value, clickable); Vector constraints =filter(value.constraints(),limit[2]); for (Enumeration e3 = constraints.elements(); e3.hasMoreElements();) { CBSConstraint constraint = (CBSConstraint)e3.nextElement(); String m3 = m2 + constraint.getType()+"."+constraint.getId(); constraintMenuItem(out,m3,constraint, clickable); Vector assignments = filter(constraint.assignments(),limit[3]); for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) { CBSAssignment assignment = (CBSAssignment)e4.nextElement(); assignmentLeafItem(out, assignment, clickable); } end_item(out); } end_item(out); } end_item(out); } } else if (type == TYPE_CONSTRAINT_BASED) { Hashtable constraints = new Hashtable(); for (Enumeration e1 = iVariables.elements(); e1.hasMoreElements();) { CBSVariable variable = (CBSVariable)e1.nextElement(); if (classId!=null && classId.longValue()!=variable.getId()) continue; for (Iterator e2=variable.values().iterator();e2.hasNext();) { CBSValue value = (CBSValue)e2.next(); for (Iterator e3=value.constraints().iterator();e3.hasNext();) { CBSConstraint constraint = (CBSConstraint)e3.next(); CBSConstraint xConstraint = (CBSConstraint)constraints.get(constraint.getType()+"."+constraint.getId()); if (xConstraint==null) { xConstraint = new CBSConstraint(constraint.getType(),constraint.getId(),constraint.getName(),constraint.getPref()); constraints.put(constraint.getType()+"."+constraint.getId(),xConstraint); } CBSVariable xVariable = null; for (Iterator i=xConstraint.variables().iterator();i.hasNext();) { CBSVariable v = (CBSVariable)i.next(); if (v.getId()==variable.getId()) { xVariable = v; break; } } if (xVariable==null) { xVariable = new CBSVariable(xConstraint,variable.getId(),variable.getName(),variable.getPref()); xConstraint.variables().add(xVariable); } CBSValue xValue = new CBSValue(xVariable, value.getPeriodId(), value.getPeriodName(), value.getPeriodPref(), value.getRoomIds(), value.getRoomNames(), value.getRoomPrefs()); xVariable.values().add(xValue); for (Iterator e4=constraint.assignments().iterator();e4.hasNext();) { CBSAssignment assignment = (CBSAssignment)e4.next(); xValue.assignments().add(assignment); xValue.incCounter(assignment.getCounter()); } } } } Vector consts = filter(constraints.values(), limit[0]); for (Enumeration e1 = consts.elements(); e1.hasMoreElements();) { CBSConstraint constraint = (CBSConstraint)e1.nextElement(); String m1 = constraint.getType()+"."+constraint.getId(); constraintMenuItem(out,m1,constraint, clickable); Vector variables = filter(constraint.variables(), limit[1]); Collections.sort(variables); for (Enumeration e2 = variables.elements(); e2.hasMoreElements();) { CBSVariable variable = (CBSVariable)e2.nextElement(); String m2 = m1+"."+variable.getId(); if (classId==null) unassignedVariableMenuItem(out,m2,variable, clickable); Vector vals = filter(variable.values(), limit[2]); int id = 0; for (Enumeration e3 = vals.elements();e3.hasMoreElements();) { CBSValue value = (CBSValue)e3.nextElement(); String m3 = m2+"."+(id++); unassignmentMenuItem(out,m3,value, clickable); Vector assignments = filter(value.assignments(), limit[3]); for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) { CBSAssignment assignment = (CBSAssignment)e4.nextElement(); assignmentLeafItem(out, assignment, clickable); } end_item(out); } if (classId==null) end_item(out); } end_item(out); } } out.flush(); } public boolean saveToFile() { return true; } }
UniTime/unitime
JavaSource/org/unitime/timetable/solver/exam/ui/ExamConflictStatisticsInfo.java
Java
apache-2.0
36,624
package no.nb.nna.veidemann.chrome.client.ws; import no.nb.nna.veidemann.chrome.client.ws.GetBrowserVersionCmd.Response; public class GetBrowserVersionCmd extends Command<Response> { public GetBrowserVersionCmd(Cdp client) { super(client, "Browser", "getVersion", Response.class); } public static class Response { private String protocolVersion; private String product; private String revision; private String userAgent; private String jsVersion; /** * Protocol version. */ public String protocolVersion() { return protocolVersion; } /** * Product name. */ public String product() { return product; } /** * Product revision. */ public String revision() { return revision; } /** * User-Agent. */ public String userAgent() { return userAgent; } /** * V8 version. */ public String jsVersion() { return jsVersion; } public String toString() { return "Version{protocolVersion=" + protocolVersion + ", product=" + product + ", revision=" + revision + ", userAgent=" + userAgent + ", jsVersion=" + jsVersion + "}"; } } }
nlnwa/broprox
veidemann-chrome-client/src/main/java/no/nb/nna/veidemann/chrome/client/ws/GetBrowserVersionCmd.java
Java
apache-2.0
1,385
package fr.javatronic.blog.massive.annotation1; import fr.javatronic.blog.processor.Annotation_001; @Annotation_001 public class Class_914 { }
lesaint/experimenting-annotation-processing
experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation1/Class_914.java
Java
apache-2.0
145
package org.wikipedia.concurrency; // Copied from Android 4.4.2_r2 source // so we can use executeOnExecutor :P // // https://android.googlesource.com/platform/frameworks/base/+/android-4.4.2_r2/core/java/android/os/AsyncTask.java /* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import android.os.Handler; import android.os.Message; import android.os.Process; import android.support.annotation.NonNull; import java.util.ArrayDeque; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** * <p>AsyncTask enables proper and easy use of the UI thread. This class allows to * perform background operations and publish results on the UI thread without * having to manipulate threads and/or handlers.</p> * * <p>AsyncTask is designed to be a helper class around {@link Thread} and {@link Handler} * and does not constitute a generic threading framework. AsyncTasks should ideally be * used for short operations (a few seconds at the most.) If you need to keep threads * running for long periods of time, it is highly recommended you use the various APIs * provided by the <code>java.util.concurrent</code> pacakge such as {@link Executor}, * {@link ThreadPoolExecutor} and {@link FutureTask}.</p> * * <p>An asynchronous task is defined by a computation that runs on a background thread and * whose result is published on the UI thread. An asynchronous task is defined by 3 generic * types, called <code>Params</code>, <code>Progress</code> and <code>Result</code>, * and 4 steps, called <code>onPreExecute</code>, <code>doInBackground</code>, * <code>onProgressUpdate</code> and <code>onPostExecute</code>.</p> * * <div class="special reference"> * <h3>Developer Guides</h3> * <p>For more information about using tasks and threads, read the * <a href="{@docRoot}guide/topics/fundamentals/processes-and-threads.html">Processes and * Threads</a> developer guide.</p> * </div> * * <h2>Usage</h2> * <p>AsyncTask must be subclassed to be used. The subclass will override at least * one method ({@link #doInBackground}), and most often will override a * second one ({@link #onPostExecute}.)</p> * * <p>Here is an example of subclassing:</p> * <pre class="prettyprint"> * private class DownloadFilesTask extends AsyncTask&lt;URL, Integer, Long&gt; { * protected Long doInBackground(URL... urls) { * int count = urls.length; * long totalSize = 0; * for (int i = 0; i < count; i++) { * totalSize += Downloader.downloadFile(urls[i]); * publishProgress((int) ((i / (float) count) * 100)); * // Escape early if cancel() is called * if (isCancelled()) break; * } * return totalSize; * } * * protected void onProgressUpdate(Integer... progress) { * setProgressPercent(progress[0]); * } * * protected void onPostExecute(Long result) { * showDialog("Downloaded " + result + " bytes"); * } * } * </pre> * * <p>Once created, a task is executed very simply:</p> * <pre class="prettyprint"> * new DownloadFilesTask().execute(url1, url2, url3); * </pre> * * <h2>AsyncTask's generic types</h2> * <p>The three types used by an asynchronous task are the following:</p> * <ol> * <li><code>Params</code>, the type of the parameters sent to the task upon * execution.</li> * <li><code>Progress</code>, the type of the progress units published during * the background computation.</li> * <li><code>Result</code>, the type of the result of the background * computation.</li> * </ol> * <p>Not all types are always used by an asynchronous task. To mark a type as unused, * simply use the type {@link Void}:</p> * <pre> * private class MyTask extends AsyncTask&lt;Void, Void, Void&gt; { ... } * </pre> * * <h2>The 4 steps</h2> * <p>When an asynchronous task is executed, the task goes through 4 steps:</p> * <ol> * <li>{@link #onPreExecute()}, invoked on the UI thread before the task * is executed. This step is normally used to setup the task, for instance by * showing a progress bar in the user interface.</li> * <li>{@link #doInBackground}, invoked on the background thread * immediately after {@link #onPreExecute()} finishes executing. This step is used * to perform background computation that can take a long time. The parameters * of the asynchronous task are passed to this step. The result of the computation must * be returned by this step and will be passed back to the last step. This step * can also use {@link #publishProgress} to publish one or more units * of progress. These values are published on the UI thread, in the * {@link #onProgressUpdate} step.</li> * <li>{@link #onProgressUpdate}, invoked on the UI thread after a * call to {@link #publishProgress}. The timing of the execution is * undefined. This method is used to display any form of progress in the user * interface while the background computation is still executing. For instance, * it can be used to animate a progress bar or show logs in a text field.</li> * <li>{@link #onPostExecute}, invoked on the UI thread after the background * computation finishes. The result of the background computation is passed to * this step as a parameter.</li> * </ol> * * <h2>Cancelling a task</h2> * <p>A task can be cancelled at any time by invoking {@link #cancel(boolean)}. Invoking * this method will cause subsequent calls to {@link #isCancelled()} to return true. * After invoking this method, {@link #onCancelled(Object)}, instead of * {@link #onPostExecute(Object)} will be invoked after {@link #doInBackground(Object[])} * returns. To ensure that a task is cancelled as quickly as possible, you should always * check the return value of {@link #isCancelled()} periodically from * {@link #doInBackground(Object[])}, if possible (inside a loop for instance.)</p> * * <h2>Threading rules</h2> * <p>There are a few threading rules that must be followed for this class to * work properly:</p> * <ul> * <li>The AsyncTask class must be loaded on the UI thread. This is done * automatically as of {@link android.os.Build.VERSION_CODES#JELLY_BEAN}.</li> * <li>The task instance must be created on the UI thread.</li> * <li>{@link #execute} must be invoked on the UI thread.</li> * <li>Do not call {@link #onPreExecute()}, {@link #onPostExecute}, * {@link #doInBackground}, {@link #onProgressUpdate} manually.</li> * <li>The task can be executed only once (an exception will be thrown if * a second execution is attempted.)</li> * </ul> * * <h2>Memory observability</h2> * <p>AsyncTask guarantees that all callback calls are synchronized in such a way that the following * operations are safe without explicit synchronizations.</p> * <ul> * <li>Set member fields in the constructor or {@link #onPreExecute}, and refer to them * in {@link #doInBackground}. * <li>Set member fields in {@link #doInBackground}, and refer to them in * {@link #onProgressUpdate} and {@link #onPostExecute}. * </ul> * * <h2>Order of execution</h2> * <p>When first introduced, AsyncTasks were executed serially on a single background * thread. Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed * to a pool of threads allowing multiple tasks to operate in parallel. Starting with * {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are executed on a single * thread to avoid common application errors caused by parallel execution.</p> * <p>If you truly want parallel execution, you can invoke * {@link #executeOnExecutor(java.util.concurrent.Executor, Object[])} with * {@link #THREAD_POOL_EXECUTOR}.</p> */ public abstract class AsyncTask<Params, Progress, Result> { private static final String LOG_TAG = "AsyncTask"; private static final int CPU_COUNT = Runtime.getRuntime().availableProcessors(); private static final int CORE_POOL_SIZE = CPU_COUNT + 1; private static final int MAXIMUM_POOL_SIZE = CPU_COUNT * 2 + 1; private static final int KEEP_ALIVE = 1; private static final ThreadFactory sThreadFactory = new ThreadFactory() { private final AtomicInteger mCount = new AtomicInteger(1); public Thread newThread(@NonNull Runnable r) { return new Thread(r, "AsyncTask #" + mCount.getAndIncrement()); } }; private static final BlockingQueue<Runnable> sPoolWorkQueue = new LinkedBlockingQueue<>(128); /** * An {@link Executor} that can be used to execute tasks in parallel. */ public static final Executor THREAD_POOL_EXECUTOR = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE, TimeUnit.SECONDS, sPoolWorkQueue, sThreadFactory); /** * An {@link Executor} that executes tasks one at a time in serial * order. This serialization is global to a particular process. */ public static final Executor SERIAL_EXECUTOR = new SerialExecutor(); private static final int MESSAGE_POST_RESULT = 0x1; private static final int MESSAGE_POST_PROGRESS = 0x2; private static final InternalHandler sHandler = new InternalHandler(); private static volatile Executor sDefaultExecutor = SERIAL_EXECUTOR; private final WorkerRunnable<Params, Result> mWorker; private final FutureTask<Result> mFuture; private volatile Status mStatus = Status.PENDING; private final AtomicBoolean mCancelled = new AtomicBoolean(); private final AtomicBoolean mTaskInvoked = new AtomicBoolean(); private static class SerialExecutor implements Executor { final ArrayDeque<Runnable> mTasks = new ArrayDeque<>(); Runnable mActive; public synchronized void execute(@NonNull final Runnable r) { mTasks.offer(new Runnable() { public void run() { try { r.run(); } finally { scheduleNext(); } } }); if (mActive == null) { scheduleNext(); } } protected synchronized void scheduleNext() { if ((mActive = mTasks.poll()) != null) { THREAD_POOL_EXECUTOR.execute(mActive); } } } /** * Indicates the current status of the task. Each status will be set only once * during the lifetime of a task. */ public enum Status { /** * Indicates that the task has not been executed yet. */ PENDING, /** * Indicates that the task is running. */ RUNNING, /** * Indicates that {@link AsyncTask#onPostExecute} has finished. */ FINISHED, } /** @hide Used to force static handler to be created. */ public static void init() { sHandler.getLooper(); } /** @hide */ public static void setDefaultExecutor(Executor exec) { sDefaultExecutor = exec; } /** * Creates a new asynchronous task. This constructor must be invoked on the UI thread. */ public AsyncTask() { mWorker = new WorkerRunnable<Params, Result>() { public Result call() throws Exception { mTaskInvoked.set(true); Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND); //noinspection unchecked return postResult(doInBackground(mParams)); } }; mFuture = new FutureTask<Result>(mWorker) { @Override protected void done() { try { postResultIfNotInvoked(get()); } catch (InterruptedException e) { android.util.Log.w(LOG_TAG, e); } catch (ExecutionException e) { throw new RuntimeException("An error occured while executing doInBackground()", e.getCause()); } catch (CancellationException e) { postResultIfNotInvoked(null); } } }; } private void postResultIfNotInvoked(Result result) { final boolean wasTaskInvoked = mTaskInvoked.get(); if (!wasTaskInvoked) { postResult(result); } } private Result postResult(Result result) { @SuppressWarnings("unchecked") Message message = sHandler.obtainMessage(MESSAGE_POST_RESULT, new AsyncTaskResult<>(this, result)); message.sendToTarget(); return result; } /** * Returns the current status of this task. * * @return The current status. */ public final Status getStatus() { return mStatus; } /** * Override this method to perform a computation on a background thread. The * specified parameters are the parameters passed to {@link #execute} * by the caller of this task. * * This method can call {@link #publishProgress} to publish updates * on the UI thread. * * @param params The parameters of the task. * * @return A result, defined by the subclass of this task. * * @see #onPreExecute() * @see #onPostExecute * @see #publishProgress */ protected abstract Result doInBackground(Params... params); /** * Runs on the UI thread before {@link #doInBackground}. * * @see #onPostExecute * @see #doInBackground */ protected void onPreExecute() { } /** * <p>Runs on the UI thread after {@link #doInBackground}. The * specified result is the value returned by {@link #doInBackground}.</p> * * <p>This method won't be invoked if the task was cancelled.</p> * * @param result The result of the operation computed by {@link #doInBackground}. * * @see #onPreExecute * @see #doInBackground * @see #onCancelled(Object) */ @SuppressWarnings({"UnusedDeclaration"}) protected void onPostExecute(Result result) { } /** * Runs on the UI thread after {@link #publishProgress} is invoked. * The specified values are the values passed to {@link #publishProgress}. * * @param values The values indicating progress. * * @see #publishProgress * @see #doInBackground */ @SuppressWarnings({"UnusedDeclaration"}) protected void onProgressUpdate(Progress... values) { } /** * <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and * {@link #doInBackground(Object[])} has finished.</p> * * <p>The default implementation simply invokes {@link #onCancelled()} and * ignores the result. If you write your own implementation, do not call * <code>super.onCancelled(result)</code>.</p> * * @param result The result, if any, computed in * {@link #doInBackground(Object[])}, can be null * * @see #cancel(boolean) * @see #isCancelled() */ @SuppressWarnings({"UnusedParameters"}) protected void onCancelled(Result result) { onCancelled(); } /** * <p>Applications should preferably override {@link #onCancelled(Object)}. * This method is invoked by the default implementation of * {@link #onCancelled(Object)}.</p> * * <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and * {@link #doInBackground(Object[])} has finished.</p> * * @see #onCancelled(Object) * @see #cancel(boolean) * @see #isCancelled() */ protected void onCancelled() { } /** * Returns <tt>true</tt> if this task was cancelled before it completed * normally. If you are calling {@link #cancel(boolean)} on the task, * the value returned by this method should be checked periodically from * {@link #doInBackground(Object[])} to end the task as soon as possible. * * @return <tt>true</tt> if task was cancelled before it completed * * @see #cancel(boolean) */ public final boolean isCancelled() { return mCancelled.get(); } /** * <p>Attempts to cancel execution of this task. This attempt will * fail if the task has already completed, already been cancelled, * or could not be cancelled for some other reason. If successful, * and this task has not started when <tt>cancel</tt> is called, * this task should never run. If the task has already started, * then the <tt>mayInterruptIfRunning</tt> parameter determines * whether the thread executing this task should be interrupted in * an attempt to stop the task.</p> * * <p>Calling this method will result in {@link #onCancelled(Object)} being * invoked on the UI thread after {@link #doInBackground(Object[])} * returns. Calling this method guarantees that {@link #onPostExecute(Object)} * is never invoked. After invoking this method, you should check the * value returned by {@link #isCancelled()} periodically from * {@link #doInBackground(Object[])} to finish the task as early as * possible.</p> * * @param mayInterruptIfRunning <tt>true</tt> if the thread executing this * task should be interrupted; otherwise, in-progress tasks are allowed * to complete. * * @return <tt>false</tt> if the task could not be cancelled, * typically because it has already completed normally; * <tt>true</tt> otherwise * * @see #isCancelled() * @see #onCancelled(Object) */ public final boolean cancel(boolean mayInterruptIfRunning) { mCancelled.set(true); return mFuture.cancel(mayInterruptIfRunning); } /** * Waits if necessary for the computation to complete, and then * retrieves its result. * * @return The computed result. * * @throws CancellationException If the computation was cancelled. * @throws ExecutionException If the computation threw an exception. * @throws InterruptedException If the current thread was interrupted * while waiting. */ public final Result get() throws InterruptedException, ExecutionException { return mFuture.get(); } /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result. * * @param timeout Time to wait before cancelling the operation. * @param unit The time unit for the timeout. * * @return The computed result. * * @throws CancellationException If the computation was cancelled. * @throws ExecutionException If the computation threw an exception. * @throws InterruptedException If the current thread was interrupted * while waiting. * @throws TimeoutException If the wait timed out. */ public final Result get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return mFuture.get(timeout, unit); } /** * Executes the task with the specified parameters. The task returns * itself (this) so that the caller can keep a reference to it. * * <p>Note: this function schedules the task on a queue for a single background * thread or pool of threads depending on the platform version. When first * introduced, AsyncTasks were executed serially on a single background thread. * Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed * to a pool of threads allowing multiple tasks to operate in parallel. Starting * {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are back to being * executed on a single thread to avoid common application errors caused * by parallel execution. If you truly want parallel execution, you can use * the {@link #executeOnExecutor} version of this method * with {@link #THREAD_POOL_EXECUTOR}; however, see commentary there for warnings * on its use. * * <p>This method must be invoked on the UI thread. * * @param params The parameters of the task. * * @return This instance of AsyncTask. * * @throws IllegalStateException If {@link #getStatus()} returns either * {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}. * * @see #executeOnExecutor(java.util.concurrent.Executor, Object[]) * @see #execute(Runnable) */ public final AsyncTask<Params, Progress, Result> execute(Params... params) { return executeOnExecutor(sDefaultExecutor, params); } /** * Executes the task with the specified parameters. The task returns * itself (this) so that the caller can keep a reference to it. * * <p>This method is typically used with {@link #THREAD_POOL_EXECUTOR} to * allow multiple tasks to run in parallel on a pool of threads managed by * AsyncTask, however you can also use your own {@link Executor} for custom * behavior. * * <p><em>Warning:</em> Allowing multiple tasks to run in parallel from * a thread pool is generally <em>not</em> what one wants, because the order * of their operation is not defined. For example, if these tasks are used * to modify any state in common (such as writing a file due to a button click), * there are no guarantees on the order of the modifications. * Without careful work it is possible in rare cases for the newer version * of the data to be over-written by an older one, leading to obscure data * loss and stability issues. Such changes are best * executed in serial; to guarantee such work is serialized regardless of * platform version you can use this function with {@link #SERIAL_EXECUTOR}. * * <p>This method must be invoked on the UI thread. * * @param exec The executor to use. {@link #THREAD_POOL_EXECUTOR} is available as a * convenient process-wide thread pool for tasks that are loosely coupled. * @param params The parameters of the task. * * @return This instance of AsyncTask. * * @throws IllegalStateException If {@link #getStatus()} returns either * {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}. * * @see #execute(Object[]) */ public final AsyncTask<Params, Progress, Result> executeOnExecutor(Executor exec, Params... params) { if (mStatus != Status.PENDING) { switch (mStatus) { case RUNNING: throw new IllegalStateException("Cannot execute task:" + " the task is already running."); case FINISHED: throw new IllegalStateException("Cannot execute task:" + " the task has already been executed " + "(a task can be executed only once)"); } } mStatus = Status.RUNNING; onPreExecute(); mWorker.mParams = params; exec.execute(mFuture); return this; } /** * Convenience version of {@link #execute(Object...)} for use with * a simple Runnable object. See {@link #execute(Object[])} for more * information on the order of execution. * * @see #execute(Object[]) * @see #executeOnExecutor(java.util.concurrent.Executor, Object[]) */ public static void execute(Runnable runnable) { sDefaultExecutor.execute(runnable); } /** * This method can be invoked from {@link #doInBackground} to * publish updates on the UI thread while the background computation is * still running. Each call to this method will trigger the execution of * {@link #onProgressUpdate} on the UI thread. * * {@link #onProgressUpdate} will note be called if the task has been * canceled. * * @param values The progress values to update the UI with. * * @see #onProgressUpdate * @see #doInBackground */ protected final void publishProgress(Progress... values) { if (!isCancelled()) { sHandler.obtainMessage(MESSAGE_POST_PROGRESS, new AsyncTaskResult<>(this, values)).sendToTarget(); } } private void finish(Result result) { if (isCancelled()) { onCancelled(result); } else { onPostExecute(result); } mStatus = Status.FINISHED; } private static class InternalHandler extends Handler { @SuppressWarnings({"unchecked", "RawUseOfParameterizedType"}) @Override public void handleMessage(Message msg) { AsyncTaskResult result = (AsyncTaskResult) msg.obj; switch (msg.what) { case MESSAGE_POST_RESULT: // There is only one result result.mTask.finish(result.mData[0]); break; case MESSAGE_POST_PROGRESS: result.mTask.onProgressUpdate(result.mData); break; } } } private static abstract class WorkerRunnable<Params, Result> implements Callable<Result> { Params[] mParams; } @SuppressWarnings({"RawUseOfParameterizedType"}) private static class AsyncTaskResult<Data> { final AsyncTask mTask; final Data[] mData; AsyncTaskResult(AsyncTask task, Data... data) { mTask = task; mData = data; } } }
reproio/apps-android-wikipedia
wikipedia/src/main/java/org/wikipedia/concurrency/AsyncTask.java
Java
apache-2.0
26,978
package cn.edu.hhu.reg.vo; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; @Entity @Table(name="doctor_login") public class DoctorLogin { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(length = 16) private Integer id; /** * 医生id */ @Column(name="doctor_id",length=16) private Integer doctorId; /** * 医生登录名 */ @Column(name="login_name",length=50) private String loginName; /** * 医生登录密码 */ @Column(name="password",length=50) private String password; public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public Integer getDoctorId() { return doctorId; } public void setDoctorId(Integer doctorId) { this.doctorId = doctorId; } public String getLoginName() { return loginName; } public void setLoginName(String loginName) { this.loginName = loginName; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public DoctorLogin() { } }
pqpo/registration_api
src/cn/edu/hhu/reg/vo/DoctorLogin.java
Java
apache-2.0
1,216
/* * Copyright 2018 Aleksander Jagiełło * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pl.themolka.arcade.team; import org.bukkit.ChatColor; import pl.themolka.arcade.command.CommandException; import pl.themolka.arcade.command.CommandUtils; import pl.themolka.arcade.command.Sender; import pl.themolka.arcade.game.GamePlayer; import pl.themolka.arcade.match.Observers; import pl.themolka.arcade.parser.Context; import pl.themolka.arcade.util.Color; import java.util.ArrayList; import java.util.Collection; public class TeamCommands { private final TeamsGame game; public TeamCommands(TeamsGame game) { this.game = game; } // // Commands // public void clearCommand(Sender sender, String teamId) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot clear observers."); } Observers observers = this.game.getMatch().getObservers(); int result = 0; for (GamePlayer player : new ArrayList<>(team.getOnlineMembers())) { observers.joinForce(player); result++; } if (result > 0) { sender.sendSuccess(team.getName() + " has been cleared (" + result + " players) and moved to " + observers.getName() + "."); } else { sender.sendError("No players to clear."); } } public void forceCommand(Sender sender, String username, String teamId) { GamePlayer player = this.fetchPlayer(username); Team team = this.fetchTeam(teamId); if (team.contains(player)) { throw new CommandException(player.getUsername() + " is already member of " + team.getName() + "."); } team.joinForce(player); sender.sendSuccess(player.getUsername() + " has been moved to " + team.getName() + "."); } public void friendlyCommand(Sender sender, String teamId, boolean friendly) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } if (friendly == team.isFriendlyFire()) { if (friendly) { throw new CommandException(team.getName() + " is already in friendly-fire."); } else { throw new CommandException(team.getName() + " is already not in friendly-fire"); } } Team oldState = new Team(team); team.setFriendlyFire(friendly); this.callEditEvent(team, oldState, TeamEditEvent.Reason.FRIENDLY_FIRE); if (friendly) { sender.sendSuccess(oldState.getName() + " is now in friendly-fire."); } else { sender.sendSuccess(oldState.getName() + " is now not in friendly-fire."); } } public void infoCommand(Sender sender) { Collection<Team> teams = this.game.getTeams(); CommandUtils.sendTitleMessage(sender, "Teams", Integer.toString(teams.size())); for (Team team : teams) { sender.send(String.format("%s - %s/%s - %s minimal to play and %s overfill", team.getPrettyName() + ChatColor.GRAY, ChatColor.GOLD.toString() + team.getOnlineMembers().size() + ChatColor.GRAY, Integer.toString(team.getSlots()), ChatColor.GREEN.toString() + team.getMinPlayers() + ChatColor.GRAY, ChatColor.RED.toString() + team.getMaxPlayers() + ChatColor.GRAY)); } } public void kickCommand(Sender sender, String username) { GamePlayer player = this.fetchPlayer(username); Team team = this.game.getTeam(player); if (team.isObservers()) { throw new CommandException("Cannot kick from observers."); } team.leaveForce(player); team.getMatch().getObservers().joinForce(player); sender.sendSuccess(player.getUsername() + " has been kicked from " + team.getName() + "."); } public void minCommand(Sender sender, String teamId, int min) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } else if (min < 0) { throw new CommandException("Number cannot be negative."); } Team oldState = new Team(team); team.setMinPlayers(min); this.callEditEvent(team, oldState, TeamEditEvent.Reason.MIN_PLAYERS); sender.sendSuccess(oldState.getName() + " has been edited."); } public void overfillCommand(Sender sender, String teamId, int overfill) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } // set to unlimited if zero or negative int max = Integer.MAX_VALUE; if (overfill > 0) { max = overfill; } Team oldState = new Team(team); team.setMaxPlayers(max); if (max > team.getSlots()) { team.setSlots(max); // slots } this.callEditEvent(team, oldState, TeamEditEvent.Reason.MAX_PLAYERS); sender.sendSuccess(oldState.getName() + " has been edited."); } public void paintCommand(Sender sender, String teamId, String paint) { Team team = this.fetchTeam(teamId); ChatColor color = Color.parseChat(new Context(this.game.getPlugin()), paint); if (color == null) { StringBuilder colors = new StringBuilder(); for (int i = 0; i < ChatColor.values().length; i++) { ChatColor value = ChatColor.values()[i]; if (i != 0) { colors.append(", "); } ChatColor result = ChatColor.RED; if (!value.equals(ChatColor.MAGIC)) { result = value; } colors.append(result).append(value.name().toLowerCase().replace("_", "-")) .append(ChatColor.RESET).append(ChatColor.RED); } throw new CommandException("Available colors: " + colors.toString() + "."); } Team oldState = new Team(team); team.setChatColor(color); this.callEditEvent(team, oldState, TeamEditEvent.Reason.PAINT); sender.sendSuccess(oldState.getName() + " has been painted from " + oldState.getChatColor().name().toLowerCase().replace("_", "-") + " to " + team.getChatColor().name().toLowerCase().replace("_", "-") + "."); } public void renameCommand(Sender sender, String teamId, String name) { Team team = this.fetchTeam(teamId); if (name == null) { throw new CommandException("New name not given."); } else if (name.length() > Team.NAME_MAX_LENGTH) { throw new CommandException("Name too long (greater than " + Team.NAME_MAX_LENGTH + " characters)."); } else if (team.getName().equals(name)) { throw new CommandException("Already named '" + team.getName() + "'."); } Team oldState = new Team(team); team.setName(name); this.callEditEvent(team, oldState, TeamEditEvent.Reason.RENAME); sender.sendSuccess(oldState.getName() + " has been renamed to " + team.getName() + "."); } public void slotsCommand(Sender sender, String teamId, int slots) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } // set to unlimited if zero or negative int max = Integer.MAX_VALUE; if (slots > 0) { max = slots; } Team oldState = new Team(team); team.setSlots(max); if (max > team.getMaxPlayers()) { team.setMaxPlayers(max); // overfill } this.callEditEvent(team, oldState, TeamEditEvent.Reason.SLOTS); sender.sendSuccess(oldState.getName() + " has been edited."); } // // Command Utilities // private void callEditEvent(Team newState, Team oldState, TeamEditEvent.Reason reason) { this.game.getPlugin().getEventBus().publish(new TeamEditEvent( this.game.getPlugin(), newState, oldState, reason)); } private GamePlayer fetchPlayer(String player) { if (player != null && !player.isEmpty()) { GamePlayer result = this.game.getGame().findPlayer(player); if (result != null) { return result; } } throw new CommandException("Player not found."); } private Team fetchTeam(String team) { if (team != null && !team.isEmpty()) { Team result = this.game.findTeamById(team); if (result != null) { return result; } } throw new CommandException("Team not found."); } }
ShootGame/Arcade2
src/main/java/pl/themolka/arcade/team/TeamCommands.java
Java
apache-2.0
9,514
package problems; import java.util.Arrays; import java.util.PriorityQueue; /** * Leetcode: Super Ugly Number * Created by alan on 2/24/2016. */ public class SuperUglyNumber { class Node implements Comparable<Node> { int val; final int prime_index; public Node(int value, int prime_idx) { this.val = value; this.prime_index = prime_idx; } public int compareTo(Node a) { return this.val - a.val; } } public int[] nthSuperUglyNumber(int n, int[] primes) { int[] nums = new int[n]; nums[0] = 1; int[] index = new int[primes.length]; PriorityQueue<Node> pq = new PriorityQueue<>(); for (int i = 0; i < primes.length; i++) pq.add(new Node(primes[i], i)); for (int i = 1; i < n; i++) { Node node = pq.poll(); while (node.val == nums[i - 1]) { node.val = nums[++index[node.prime_index]] * primes[node.prime_index]; pq.add(node); node = pq.poll(); } nums[i] = node.val; node.val = nums[++index[node.prime_index]] * primes[node.prime_index]; pq.add(node); } return nums; } public static void main(String[] args) { SuperUglyNumber sn = new SuperUglyNumber(); int[] primes = {2, 7, 13, 19}; System.out.println(Arrays.toString(primes)); System.out.println(Arrays.toString(sn.nthSuperUglyNumber(12, primes))); } }
alyiwang/LeetCode
src/problems/SuperUglyNumber.java
Java
apache-2.0
1,548
package yuku.alkitab.base.util; import android.app.Activity; import android.app.Dialog; import android.content.Intent; import android.database.Cursor; import android.database.DatabaseUtils; import android.database.sqlite.SQLiteDatabase; import android.os.AsyncTask; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.util.Xml; import com.afollestad.materialdialogs.MaterialDialog; import gnu.trove.list.TIntList; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.TIntLongHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.ext.DefaultHandler2; import yuku.alkitab.base.App; import yuku.alkitab.base.IsiActivity; import yuku.alkitab.base.S; import yuku.alkitab.base.storage.Db; import yuku.alkitab.base.storage.InternalDb; import yuku.alkitab.debug.R; import yuku.alkitab.model.Label; import yuku.alkitab.model.Marker; import yuku.alkitab.model.Marker_Label; import java.io.InputStream; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import static yuku.alkitab.base.util.Literals.ToStringArray; // Imported from v3. Used for once-only migration from v3 to v4. public class BookmarkImporter { static final String TAG = BookmarkImporter.class.getSimpleName(); // constants static class Bookmark2_Label { // DO NOT CHANGE CONSTANT VALUES! public static final String XMLTAG_Bookmark2_Label = "Bukmak2_Label"; public static final String XMLATTR_bookmark2_relId = "bukmak2_relId"; public static final String XMLATTR_label_relId = "label_relId"; } // constants static class BackupManager { public static final String XMLTAG_Bukmak2 = "Bukmak2"; private static final String XMLATTR_ari = "ari"; private static final String XMLATTR_kind = "jenis"; private static final String XMLATTR_caption = "tulisan"; private static final String XMLATTR_addTime = "waktuTambah"; private static final String XMLATTR_modifyTime = "waktuUbah"; private static final String XMLATTR_relId = "relId"; private static final String XMLVAL_bookmark = "bukmak"; private static final String XMLVAL_note = "catatan"; private static final String XMLVAL_highlight = "stabilo"; public static final String XMLTAG_Label = "Label"; private static final String XMLATTR_title = "judul"; private static final String XMLATTR_bgColor = "warnaLatar"; @Nullable public static Marker markerFromAttributes(Attributes attributes) { int ari = Integer.parseInt(attributes.getValue("", XMLATTR_ari)); String kind_s = attributes.getValue("", XMLATTR_kind); Marker.Kind kind = kind_s.equals(XMLVAL_bookmark) ? Marker.Kind.bookmark : kind_s.equals(XMLVAL_note) ? Marker.Kind.note : kind_s.equals(XMLVAL_highlight) ? Marker.Kind.highlight : null; String caption = unescapeHighUnicode(attributes.getValue("", XMLATTR_caption)); Date addTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_addTime))); Date modifyTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_modifyTime))); if (kind == null) { // invalid return null; } return Marker.createNewMarker(ari, kind, caption, 1, addTime, modifyTime); } public static int getRelId(Attributes attributes) { String s = attributes.getValue("", XMLATTR_relId); return s == null ? 0 : Integer.parseInt(s); } public static Label labelFromAttributes(Attributes attributes) { String title = unescapeHighUnicode(attributes.getValue("", XMLATTR_title)); String bgColor = attributes.getValue("", XMLATTR_bgColor); return Label.createNewLabel(title, 0, bgColor); } static ThreadLocal<Matcher> highUnicodeMatcher = new ThreadLocal<Matcher>() { @Override protected Matcher initialValue() { return Pattern.compile("\\[\\[~U([0-9A-Fa-f]{6})~\\]\\]").matcher(""); } }; public static String unescapeHighUnicode(String input) { if (input == null) return null; final Matcher m = highUnicodeMatcher.get(); m.reset(input); StringBuffer res = new StringBuffer(); while (m.find()) { String s = m.group(1); final int cp = Integer.parseInt(s, 16); m.appendReplacement(res, new String(new int[]{cp}, 0, 1)); } m.appendTail(res); return res.toString(); } } public static void importBookmarks(final Activity activity, @NonNull final InputStream fis, final boolean finishActivityAfterwards, final Runnable runWhenDone) { final MaterialDialog pd = new MaterialDialog.Builder(activity) .content(R.string.mengimpor_titiktiga) .cancelable(false) .progress(true, 0) .show(); new AsyncTask<Boolean, Integer, Object>() { int count_bookmark = 0; int count_label = 0; @Override protected Object doInBackground(Boolean... params) { final List<Marker> markers = new ArrayList<>(); final TObjectIntHashMap<Marker> markerToRelIdMap = new TObjectIntHashMap<>(); final List<Label> labels = new ArrayList<>(); final TObjectIntHashMap<Label> labelToRelIdMap = new TObjectIntHashMap<>(); final TIntLongHashMap labelRelIdToAbsIdMap = new TIntLongHashMap(); final TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap = new TIntObjectHashMap<>(); try { Xml.parse(fis, Xml.Encoding.UTF_8, new DefaultHandler2() { @Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { switch (localName) { case BackupManager.XMLTAG_Bukmak2: final Marker marker = BackupManager.markerFromAttributes(attributes); if (marker != null) { markers.add(marker); final int bookmark2_relId = BackupManager.getRelId(attributes); markerToRelIdMap.put(marker, bookmark2_relId); count_bookmark++; } break; case BackupManager.XMLTAG_Label: { final Label label = BackupManager.labelFromAttributes(attributes); int label_relId = BackupManager.getRelId(attributes); labels.add(label); labelToRelIdMap.put(label, label_relId); count_label++; break; } case Bookmark2_Label.XMLTAG_Bookmark2_Label: { final int bookmark2_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_bookmark2_relId)); final int label_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_label_relId)); TIntList labelRelIds = markerRelIdToLabelRelIdsMap.get(bookmark2_relId); if (labelRelIds == null) { labelRelIds = new TIntArrayList(); markerRelIdToLabelRelIdsMap.put(bookmark2_relId, labelRelIds); } labelRelIds.add(label_relId); break; } } } }); fis.close(); } catch (Exception e) { return e; } { // bikin label-label yang diperlukan, juga map relId dengan id dari label. final HashMap<String, Label> judulMap = new HashMap<>(); final List<Label> xlabelLama = S.getDb().listAllLabels(); for (Label labelLama : xlabelLama) { judulMap.put(labelLama.title, labelLama); } for (Label label : labels) { // cari apakah label yang judulnya persis sama udah ada Label labelLama = judulMap.get(label.title); final int labelRelId = labelToRelIdMap.get(label); if (labelLama != null) { // removed from v3: update warna label lama labelRelIdToAbsIdMap.put(labelRelId, labelLama._id); AppLog.d(TAG, "label (lama) r->a : " + labelRelId + "->" + labelLama._id); } else { // belum ada, harus bikin baru Label labelBaru = S.getDb().insertLabel(label.title, label.backgroundColor); labelRelIdToAbsIdMap.put(labelRelId, labelBaru._id); AppLog.d(TAG, "label (baru) r->a : " + labelRelId + "->" + labelBaru._id); } } } importBookmarks(markers, markerToRelIdMap, labelRelIdToAbsIdMap, markerRelIdToLabelRelIdsMap); return null; } @Override protected void onPostExecute(@NonNull Object result) { pd.dismiss(); if (result instanceof Exception) { AppLog.e(TAG, "Error when importing markers", (Throwable) result); new MaterialDialog.Builder(activity) .content(activity.getString(R.string.terjadi_kesalahan_ketika_mengimpor_pesan, ((Exception) result).getMessage())) .positiveText(R.string.ok) .show(); } else { final Dialog dialog = new MaterialDialog.Builder(activity) .content(activity.getString(R.string.impor_berhasil_angka_diproses, count_bookmark, count_label)) .positiveText(R.string.ok) .show(); if (finishActivityAfterwards) { dialog.setOnDismissListener(dialog1 -> activity.finish()); } } if (runWhenDone != null) runWhenDone.run(); } }.execute(); } public static void importBookmarks(List<Marker> markers, TObjectIntHashMap<Marker> markerToRelIdMap, TIntLongHashMap labelRelIdToAbsIdMap, TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap) { SQLiteDatabase db = S.getDb().getWritableDatabase(); db.beginTransaction(); try { final TIntObjectHashMap<Marker> markerRelIdToMarker = new TIntObjectHashMap<>(); { // write new markers (if not available yet) for (int i = 0; i < markers.size(); i++) { Marker marker = markers.get(i); final int marker_relId = markerToRelIdMap.get(marker); // migrate: look for existing marker with same kind, ari, and content try (Cursor cursor = db.query( Db.TABLE_Marker, null, Db.Marker.ari + "=? and " + Db.Marker.kind + "=? and " + Db.Marker.caption + "=?", ToStringArray(marker.ari, marker.kind.code, marker.caption), null, null, null )) { if (cursor.moveToNext()) { marker = InternalDb.markerFromCursor(cursor); markers.set(i, marker); } else { InternalDb.insertMarker(db, marker); } // map it markerRelIdToMarker.put(marker_relId, marker); } } } { // now is marker-label assignments for (final int marker_relId : markerRelIdToLabelRelIdsMap.keys()) { final TIntList label_relIds = markerRelIdToLabelRelIdsMap.get(marker_relId); final Marker marker = markerRelIdToMarker.get(marker_relId); if (marker != null) { // existing labels > 0: ignore // existing labels == 0: insert final int existing_label_count = (int) DatabaseUtils.queryNumEntries(db, Db.TABLE_Marker_Label, Db.Marker_Label.marker_gid + "=?", ToStringArray(marker.gid)); if (existing_label_count == 0) { for (int label_relId : label_relIds.toArray()) { final long label_id = labelRelIdToAbsIdMap.get(label_relId); if (label_id > 0) { final Label label = S.getDb().getLabelById(label_id); final Marker_Label marker_label = Marker_Label.createNewMarker_Label(marker.gid, label.gid); InternalDb.insertMarker_LabelIfNotExists(db, marker_label); } else { AppLog.w(TAG, "label_id is invalid!: " + label_id); } } } } else { AppLog.w(TAG, "wrong marker_relId: " + marker_relId); } } } db.setTransactionSuccessful(); } finally { db.endTransaction(); } App.getLbm().sendBroadcast(new Intent(IsiActivity.ACTION_ATTRIBUTE_MAP_CHANGED)); } }
infojulio/androidbible
Alkitab/src/main/java/yuku/alkitab/base/util/BookmarkImporter.java
Java
apache-2.0
11,461
/** * Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.core.region; import java.util.Set; import org.joda.beans.impl.flexi.FlexiBean; import org.threeten.bp.ZoneId; import com.opengamma.id.ExternalBundleIdentifiable; import com.opengamma.id.ExternalIdBundle; import com.opengamma.id.UniqueId; import com.opengamma.id.UniqueIdentifiable; import com.opengamma.util.PublicAPI; import com.opengamma.util.i18n.Country; import com.opengamma.util.money.Currency; /** * A region of the world. * <p> * Many aspects of business, algorithms and contracts are specific to a region. The region may be of any size, from a municipality to a super-national group. * <p> * This interface is read-only. Implementations may be mutable. */ @PublicAPI public interface Region extends UniqueIdentifiable, ExternalBundleIdentifiable { /** * Gets the unique identifier of the region. * <p> * This specifies a single version-correction of the region. * * @return the unique identifier for this region, not null within the engine */ @Override UniqueId getUniqueId(); /** * Gets the external identifier bundle that defines the region. * <p> * Each external system has one or more identifiers by which they refer to the region. * Some of these may be unique within that system, while others may be more descriptive. * This bundle stores the set of these external identifiers. * <p> * This will include the country, currency and time-zone. * * @return the bundle defining the region, not null */ @Override // override for Javadoc ExternalIdBundle getExternalIdBundle(); /** * Gets the classification of the region. * * @return the classification of region, such as SUPER_NATIONAL or INDEPENDENT_STATE, not null */ RegionClassification getClassification(); /** * Gets the unique identifiers of the regions that this region is a member of. For example, a country might be a member * of the World, UN, European Union and NATO. * * @return the parent unique identifiers, null if this is the root entry */ Set<UniqueId> getParentRegionIds(); /** * Gets the country. * * @return the country, null if not applicable */ Country getCountry(); /** * Gets the currency. * * @return the currency, null if not applicable */ Currency getCurrency(); /** * Gets the time-zone. For larger regions, there can be multiple time-zones, so this is only reliable for municipalities. * * @return the time-zone, null if not applicable */ ZoneId getTimeZone(); /** * Gets the short descriptive name of the region. * * @return the name of the region, not null */ String getName(); /** * Gets the full descriptive name of the region. * * @return the full name of the region, not null */ String getFullName(); /** * Gets the extensible data store for additional information. Applications may store additional region based information here. * * @return the additional data, not null */ FlexiBean getData(); }
McLeodMoores/starling
projects/core/src/main/java/com/opengamma/core/region/Region.java
Java
apache-2.0
3,168
/* $Id$ * $URL: https://dev.almende.com/svn/abms/coala-common/src/main/java/com/almende/coala/time/NanoInstant.java $ * * Part of the EU project Adapt4EE, see http://www.adapt4ee.eu/ * * @license * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * Copyright (c) 2010-2013 Almende B.V. */ package io.coala.time; /** * {@link NanoInstant} has the nano-second as base time unit * * @date $Date: 2014-06-03 14:26:09 +0200 (Tue, 03 Jun 2014) $ * @version $Revision: 296 $ * @author <a href="mailto:Rick@almende.org">Rick</a> */ public class NanoInstant extends AbstractInstant<NanoInstant> { /** */ private static final long serialVersionUID = 1L; /** */ // private static final Logger LOG = LogUtil.getLogger(NanoInstant.class); /** */ // private static final TimeUnit BASE_UNIT = TimeUnit.NANOS; /** */ public static final NanoInstant ZERO = new NanoInstant(null, 0); /** * {@link NanoInstant} constructor * * @param value */ public NanoInstant(final ClockID clockID, final Number value) { super(clockID, value, TimeUnit.NANOS); } // /** // * {@link NanoInstant} constructor // * // * @param value // */ // public NanoInstant(final ClockID clockID, final Number value, // final TimeUnit unit) // { // super(clockID, value, unit); // } // // /** @see Instant#getBaseUnit() */ // @Override // public TimeUnit getBaseUnit() // { // return BASE_UNIT; // } /** @see Instant#toUnit(TimeUnit) */ @Override public NanoInstant toUnit(final TimeUnit unit) { throw new RuntimeException( "Can't convert NanoInstant to another TimeUnit"); } /** @see Instant#plus(Number) */ @Override public NanoInstant plus(final Number value) { return new NanoInstant(getClockID(), getValue().doubleValue() + value.doubleValue()); } }
krevelen/coala
coala-core/src/main/java/io/coala/time/NanoInstant.java
Java
apache-2.0
2,299
package cat.ereza.customactivityoncrash.activity; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import cat.ereza.customactivityoncrash.CustomActivityOnCrash; /** * Created by zhy on 15/8/4. */ public class ClearStack extends Activity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Intent intent = getIntent().getParcelableExtra(CustomActivityOnCrash.KEY_CURRENT_INTENT); startActivity(intent); finish(); Runtime.getRuntime().exit(0); } }
hongyangAndroid/CustomActivityOnCrash
library/src/main/java/cat/ereza/customactivityoncrash/activity/ClearStack.java
Java
apache-2.0
596
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.tools.common; import javax.xml.namespace.QName; public final class ToolConstants { //public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/common/toolspec/toolspecs/"; public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/"; public static final String SCHEMA_URI = "http://www.w3.org/2001/XMLSchema"; public static final String XML_NAMESPACE_URI = "http://www.w3.org/XML/1998/namespace"; public static final String WSDL_NAMESPACE_URI = "http://schemas.xmlsoap.org/wsdl/"; public static final String WSA_NAMESPACE_URI = "http://www.w3.org/2005/08/addressing"; /** * Tools permit caller to pass in additional bean definitions. */ public static final String CFG_BEAN_CONFIG = "beans"; public static final String DEFAULT_TEMP_DIR = "gen_tmp"; public static final String CFG_OUTPUTDIR = "outputdir"; public static final String CFG_OUTPUTFILE = "outputfile"; public static final String CFG_WSDLURL = "wsdlurl"; public static final String CFG_WSDLLOCATION = "wsdlLocation"; public static final String CFG_WSDLLIST = "wsdlList"; public static final String CFG_NAMESPACE = "namespace"; public static final String CFG_VERBOSE = "verbose"; public static final String CFG_PORT = "port"; public static final String CFG_BINDING = "binding"; public static final String CFG_AUTORESOLVE = "autoNameResolution"; public static final String CFG_WEBSERVICE = "webservice"; public static final String CFG_SERVER = "server"; public static final String CFG_CLIENT = "client"; public static final String CFG_ALL = "all"; public static final String CFG_IMPL = "impl"; public static final String CFG_PACKAGENAME = "packagename"; public static final String CFG_JSPACKAGEPREFIX = "jspackageprefix"; public static final String CFG_NINCLUDE = "ninclude"; public static final String CFG_NEXCLUDE = "nexclude"; public static final String CFG_CMD_ARG = "args"; public static final String CFG_INSTALL_DIR = "install.dir"; public static final String CFG_PLATFORM_VERSION = "platform.version"; public static final String CFG_COMPILE = "compile"; public static final String CFG_CLASSDIR = "classdir"; public static final String CFG_EXTRA_SOAPHEADER = "exsoapheader"; public static final String CFG_DEFAULT_NS = "defaultns"; public static final String CFG_DEFAULT_EX = "defaultex"; public static final String CFG_NO_TYPES = "notypes"; public static final String CFG_XJC_ARGS = "xjc"; public static final String CFG_CATALOG = "catalog"; public static final String CFG_BAREMETHODS = "bareMethods"; public static final String CFG_ASYNCMETHODS = "asyncMethods"; public static final String CFG_MIMEMETHODS = "mimeMethods"; public static final String CFG_DEFAULT_VALUES = "defaultValues"; public static final String CFG_JAVASCRIPT_UTILS = "javascriptUtils"; public static final String CFG_VALIDATE_WSDL = "validate"; public static final String CFG_CREATE_XSD_IMPORTS = "createxsdimports"; /** * Front-end selection command-line option to java2ws. */ public static final String CFG_FRONTEND = "frontend"; public static final String CFG_DATABINDING = "databinding"; public static final String DEFAULT_ADDRESS = "http://localhost:9090"; // WSDL2Java Constants public static final String CFG_TYPES = "types"; public static final String CFG_INTERFACE = "interface"; public static final String CFG_NIGNOREEXCLUDE = "nignoreexclude"; public static final String CFG_ANT = "ant"; public static final String CFG_LIB_REF = "library.references"; public static final String CFG_ANT_PROP = "ant.prop"; public static final String CFG_NO_ADDRESS_BINDING = "noAddressBinding"; public static final String CFG_ALLOW_ELEMENT_REFS = "allowElementReferences"; public static final String CFG_RESERVE_NAME = "reserveClass"; public static final String CFG_FAULT_SERIAL_VERSION_UID = "faultSerialVersionUID"; public static final String CFG_EXCEPTION_SUPER = "exceptionSuper"; public static final String CFG_MARK_GENERATED = "mark-generated"; //Internal Flag to generate public static final String CFG_IMPL_CLASS = "implClass"; public static final String CFG_GEN_CLIENT = "genClient"; public static final String CFG_GEN_SERVER = "genServer"; public static final String CFG_GEN_IMPL = "genImpl"; public static final String CFG_GEN_TYPES = "genTypes"; public static final String CFG_GEN_SEI = "genSEI"; public static final String CFG_GEN_ANT = "genAnt"; public static final String CFG_GEN_SERVICE = "genService"; public static final String CFG_GEN_OVERWRITE = "overwrite"; public static final String CFG_GEN_FAULT = "genFault"; public static final String CFG_GEN_NEW_ONLY = "newonly"; // Java2WSDL Constants public static final String CFG_CLASSPATH = "classpath"; public static final String CFG_TNS = "tns"; public static final String CFG_SERVICENAME = "servicename"; public static final String CFG_SCHEMANS = "schemans"; public static final String CFG_USETYPES = "usetypes"; public static final String CFG_CLASSNAME = "classname"; public static final String CFG_PORTTYPE = "porttype"; public static final String CFG_SOURCEDIR = "sourcedir"; public static final String CFG_WSDL = "wsdl"; public static final String CFG_WRAPPERBEAN = "wrapperbean"; // WSDL2Service Constants public static final String CFG_ADDRESS = "address"; public static final String CFG_TRANSPORT = "transport"; public static final String CFG_SERVICE = "service"; public static final String CFG_BINDING_ATTR = "attrbinding"; public static final String CFG_SOAP12 = "soap12"; // WSDL2Soap Constants public static final String CFG_STYLE = "style"; public static final String CFG_USE = "use"; // XSD2WSDL Constants public static final String CFG_XSDURL = "xsdurl"; public static final String CFG_NAME = "name"; // WsdlValidator public static final String CFG_DEEP = "deep"; public static final String CFG_SCHEMA_DIR = "schemaDir"; public static final String CFG_SCHEMA_URL = "schemaURL"; public static final String CXF_SCHEMA_DIR = "cxf_schema_dir"; public static final String CXF_SCHEMAS_DIR_INJAR = "schemas/wsdl/"; public static final String CFG_SUPPRESS_WARNINGS = "suppressWarnings"; // WSDL2Java Processor Constants public static final String SEI_GENERATOR = "sei.generator"; public static final String FAULT_GENERATOR = "fault.generator"; public static final String TYPE_GENERATOR = "type.generator"; public static final String IMPL_GENERATOR = "impl.generator"; public static final String SVR_GENERATOR = "svr.generator"; public static final String CLT_GENERATOR = "clt.generator"; public static final String SERVICE_GENERATOR = "service.generator"; public static final String ANT_GENERATOR = "ant.generator"; public static final String HANDLER_GENERATOR = "handler.generator"; // Binding namespace public static final String NS_JAXWS_BINDINGS = "http://java.sun.com/xml/ns/jaxws"; public static final String NS_JAXB_BINDINGS = "http://java.sun.com/xml/ns/jaxb"; public static final QName JAXWS_BINDINGS = new QName(NS_JAXWS_BINDINGS, "bindings"); public static final QName JAXB_BINDINGS = new QName(NS_JAXB_BINDINGS, "bindings"); public static final String JAXWS_BINDINGS_WSDL_LOCATION = "wsdlLocation"; public static final String JAXWS_BINDING_NODE = "node"; public static final String JAXWS_BINDING_VERSION = "version"; public static final String ASYNC_METHOD_SUFFIX = "Async"; public static final String HANDLER_CHAINS_URI = "http://java.sun.com/xml/ns/javaee"; public static final String HANDLER_CHAIN = "handler-chain"; public static final String HANDLER_CHAINS = "handler-chains"; //public static final String RAW_JAXB_MODEL = "rawjaxbmodel"; // JMS address public static final String NS_JMS_ADDRESS = "http://cxf.apache.org/transports/jms"; public static final QName JMS_ADDRESS = new QName(NS_JMS_ADDRESS, "address"); public static final String JMS_ADDR_DEST_STYLE = "destinationStyle"; public static final String JMS_ADDR_JNDI_URL = "jndiProviderURL"; public static final String JMS_ADDR_JNDI_FAC = "jndiConnectionFactoryName"; public static final String JMS_ADDR_JNDI_DEST = "jndiDestinationName"; public static final String JMS_ADDR_MSG_TYPE = "messageType"; public static final String JMS_ADDR_INIT_CTX = "initialContextFactory"; public static final String JMS_ADDR_SUBSCRIBER_NAME = "durableSubscriberName"; public static final String JMS_ADDR_MSGID_TO_CORRID = "useMessageIDAsCorrelationID"; // XML Binding public static final String XMLBINDING_ROOTNODE = "rootNode"; public static final String XMLBINDING_HTTP_LOCATION = "location"; public static final String NS_XML_FORMAT = "http://cxf.apache.org/bindings/xformat"; public static final String XML_FORMAT_PREFIX = "xformat"; public static final String NS_XML_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"; public static final String XML_HTTP_PREFIX = "http"; public static final QName XML_HTTP_ADDRESS = new QName(NS_XML_HTTP, "address"); public static final QName XML_FORMAT = new QName(NS_XML_FORMAT, "body"); public static final QName XML_BINDING_FORMAT = new QName(NS_XML_FORMAT, "binding"); public static final String XML_SCHEMA_COLLECTION = "xmlSchemaCollection"; public static final String PORTTYPE_MAP = "portTypeMap"; public static final String SCHEMA_TARGET_NAMESPACES = "schemaTargetNameSpaces"; public static final String WSDL_DEFINITION = "wsdlDefinition"; public static final String IMPORTED_DEFINITION = "importedDefinition"; public static final String IMPORTED_PORTTYPE = "importedPortType"; public static final String IMPORTED_SERVICE = "importedService"; public static final String BINDING_GENERATOR = "BindingGenerator"; // Tools framework public static final String FRONTEND_PLUGIN = "frontend"; public static final String DATABINDING_PLUGIN = "databinding"; public static final String RUNTIME_DATABINDING_CLASS = "databinding-class"; public static final String CFG_WSDL_VERSION = "wsdlversion"; // Suppress the code generation, in this case you can just get the generated code model public static final String CFG_SUPPRESS_GEN = "suppress"; public static final String DEFAULT_PACKAGE_NAME = "defaultnamespace"; //For java2ws tool public static final String SERVICE_LIST = "serviceList"; public static final String GEN_FROM_SEI = "genFromSEI"; public static final String JAXWS_FRONTEND = "jaxws"; public static final String SIMPLE_FRONTEND = "simple"; public static final String JAXB_DATABINDING = "jaxb"; public static final String AEGIS_DATABINDING = "aegis"; //For Simple FrontEnd public static final String SEI_CLASS = "seiClass"; public static final String IMPL_CLASS = "implClass"; public static final String SERVICE_NAME = "serviceName"; public static final String PORT_NAME = "portName"; public static final String DEFAULT_DATA_BINDING_NAME = "jaxb"; public static final String DATABIND_BEAN_NAME_SUFFIX = "DatabindingBean"; public static final String CLIENT_CLASS = "clientClass"; public static final String SERVER_CLASS = "serverClass"; public static final String CFG_JSPREFIXMAP = "javascriptPrefixMap"; private ToolConstants() { //utility class } }
zzsoszz/webservice_gzdx
opensource_cxf/org/apache/cxf/tools/common/ToolConstants.java
Java
apache-2.0
12,534
/** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.client.connection.impl; import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.EventLoopGroup; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollServerSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslHandler; import io.pravega.client.ClientConfig; import io.pravega.shared.protocol.netty.CommandDecoder; import io.pravega.shared.protocol.netty.CommandEncoder; import io.pravega.shared.protocol.netty.ConnectionFailedException; import io.pravega.shared.protocol.netty.FailingReplyProcessor; import io.pravega.shared.protocol.netty.PravegaNodeUri; import io.pravega.shared.protocol.netty.WireCommands; import io.pravega.test.common.AssertExtensions; import io.pravega.test.common.SecurityConfigDefaults; import io.pravega.test.common.TestUtils; import java.io.File; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.function.Function; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; import javax.net.ssl.SSLParameters; import lombok.Cleanup; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; import static io.pravega.shared.metrics.MetricNotifier.NO_OP_METRIC_NOTIFIER; import static io.pravega.shared.protocol.netty.WireCommands.MAX_WIRECOMMAND_SIZE; import static io.pravega.test.common.AssertExtensions.assertThrows; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; public class ConnectionPoolingTest { @Rule public Timeout globalTimeout = Timeout.seconds(1000); boolean ssl = false; private Channel serverChannel; private int port; private final String seg = "Segment-0"; private final long offset = 1234L; private final int length = 1024; private final String data = "data"; private final Function<Long, WireCommands.ReadSegment> readRequestGenerator = id -> new WireCommands.ReadSegment(seg, offset, length, "", id); private final Function<Long, WireCommands.SegmentRead> readResponseGenerator = id -> new WireCommands.SegmentRead(seg, offset, true, false, Unpooled.wrappedBuffer(data.getBytes(StandardCharsets.UTF_8)), id); private class EchoServerHandler extends ChannelInboundHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { cause.printStackTrace(); ctx.close(); } @Override public void channelRead(ChannelHandlerContext ctx, Object message) { if (message instanceof WireCommands.Hello) { ctx.write(message); ctx.flush(); } else if (message instanceof WireCommands.ReadSegment) { WireCommands.ReadSegment msg = (WireCommands.ReadSegment) message; ctx.write(readResponseGenerator.apply(msg.getRequestId())); ctx.flush(); } } } @Before public void setUp() throws Exception { // Configure SSL. port = TestUtils.getAvailableListenPort(); final SslContext sslCtx; if (ssl) { try { sslCtx = SslContextBuilder.forServer( new File(SecurityConfigDefaults.TLS_SERVER_CERT_PATH), new File(SecurityConfigDefaults.TLS_SERVER_PRIVATE_KEY_PATH)) .build(); } catch (SSLException e) { throw new RuntimeException(e); } } else { sslCtx = null; } boolean nio = false; EventLoopGroup bossGroup; EventLoopGroup workerGroup; try { bossGroup = new EpollEventLoopGroup(1); workerGroup = new EpollEventLoopGroup(); } catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) { nio = true; bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(); } ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslCtx != null) { SslHandler handler = sslCtx.newHandler(ch.alloc()); SSLEngine sslEngine = handler.engine(); SSLParameters sslParameters = sslEngine.getSSLParameters(); sslParameters.setEndpointIdentificationAlgorithm("LDAPS"); sslEngine.setSSLParameters(sslParameters); p.addLast(handler); } p.addLast(new CommandEncoder(null, NO_OP_METRIC_NOTIFIER), new LengthFieldBasedFrameDecoder(MAX_WIRECOMMAND_SIZE, 4, 4), new CommandDecoder(), new EchoServerHandler()); } }); // Start the server. serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel(); } @After public void tearDown() throws Exception { serverChannel.close(); serverChannel.closeFuture(); } @Test public void testNonPooling() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); // create a second connection, since not using a flow. @Cleanup ClientConnection connection2 = connectionPool.getClientConnection(new PravegaNodeUri("localhost", port), rp).join(); Flow flow2 = new Flow(2, 0); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); assertEquals(2, factory.getOpenSocketCount()); // send data over connection1 and verify. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); // close a client connection, this should not close the channel. connection2.close(); assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong()))); // verify we are able to send data over connection1. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // close connection1 connection1.close(); assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong()))); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } @Test public void testConnectionPooling() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); // create a second connection, since the max number of connections is 1 this should reuse the same connection. Flow flow2 = new Flow(2, 0); CompletableFuture<ClientConnection> cf = new CompletableFuture<>(); connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp, cf); @Cleanup ClientConnection connection2 = cf.join(); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); assertEquals(1, factory.getOpenSocketCount()); // send data over connection1 and verify. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); // close a client connection, this should not close the channel. connection2.close(); assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong()))); // verify we are able to send data over connection1. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // close connection1 connection1.close(); assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong()))); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } @Test public void testPoolBalancing() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(2) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, factory.getOpenSocketCount()); // create a second connection, since the max number of connections is 2 this should not reuse the same connection. Flow flow2 = new Flow(2, 0); @Cleanup ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertNotEquals(((FlowClientConnection) connection1).getChannel(), ((FlowClientConnection) connection2).getChannel()); // create a second connection, since the max number of connections is 2 this should reuse the same connection. Flow flow3 = new Flow(3, 0); @Cleanup ClientConnection connection3 = connectionPool.getClientConnection(flow3, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection3 and verify. connection3.send(readRequestGenerator.apply(flow3.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow3.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertEquals(((FlowClientConnection) connection1).getChannel(), ((FlowClientConnection) connection3).getChannel()); Flow flow4 = new Flow(3, 0); @Cleanup ClientConnection connection4 = connectionPool.getClientConnection(flow4, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection3 and verify. connection3.send(readRequestGenerator.apply(flow4.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow4.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertEquals(2, connectionPool.getActiveChannels().size()); assertNotEquals(((FlowClientConnection) connection3).getChannel(), ((FlowClientConnection) connection4).getChannel()); assertEquals(((FlowClientConnection) connection2).getChannel(), ((FlowClientConnection) connection4).getChannel()); } @Test public void testConcurrentRequests() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); // create a second connection, since the max number of connections is 1 this should reuse the same connection. Flow flow2 = new Flow(2, 0); ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join(); assertEquals(1, factory.getOpenSocketCount()); assertEquals(1, connectionPool.getActiveChannels().size()); connection1.send(readRequestGenerator.apply(flow1.asLong())); connection2.send(readRequestGenerator.apply(flow2.asLong())); List<WireCommands.SegmentRead> msgs = new ArrayList<WireCommands.SegmentRead>(); msgs.add(msgRead.take()); msgs.add(msgRead.take()); assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong()))); assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong()))); assertEquals(1, factory.getOpenSocketCount()); connection1.close(); connection2.close(); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } }
pravega/pravega
client/src/test/java/io/pravega/client/connection/impl/ConnectionPoolingTest.java
Java
apache-2.0
21,376
package jp.co.omana.action; import org.seasar.struts.annotation.Execute; public class ServiceAction { @Execute(validator = false) public String index() { return "board.jsp"; } @Execute(validator = false) public String confirm() { return "index.jsp"; } @Execute(validator = false) public String finish() { return "index.jsp"; } }
ikraikra/bunsekiya
src/main/java/jp/co/omana/action/ServiceAction.java
Java
apache-2.0
387
/** * */ package com.sivalabs.demo.orders.repositories; import org.springframework.data.jpa.repository.JpaRepository; import com.sivalabs.demo.orders.entities.Order; /** * @author Siva * */ public interface OrderRepository extends JpaRepository<Order, Integer>{ }
sivaprasadreddy/springboot-learn-by-example
chapter-09/springboot-multiple-datasources-demo/src/main/java/com/sivalabs/demo/orders/repositories/OrderRepository.java
Java
apache-2.0
274
/************************************************************ * * EaseMob CONFIDENTIAL * __________________ * Copyright (C) 2013-2014 EaseMob Technologies. All rights reserved. * * NOTICE: All information contained herein is, and remains * the property of EaseMob Technologies. * Dissemination of this information or reproduction of this material * is strictly forbidden unless prior written permission is obtained * from EaseMob Technologies. */ package com.easemob.chatuidemo.activity; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.Collections; import java.util.List; import android.annotation.SuppressLint; import android.app.AlertDialog; import android.app.ProgressDialog; import android.content.Context; import android.content.DialogInterface; import android.graphics.Bitmap; import android.graphics.PixelFormat; import android.hardware.Camera; import android.hardware.Camera.CameraInfo; import android.hardware.Camera.Parameters; import android.hardware.Camera.Size; import android.media.MediaRecorder; import android.media.MediaRecorder.OnErrorListener; import android.media.MediaRecorder.OnInfoListener; import android.media.MediaScannerConnection; import android.media.MediaScannerConnection.MediaScannerConnectionClient; import android.net.Uri; import android.os.Bundle; import android.os.Environment; import android.os.PowerManager; import android.os.SystemClock; import android.text.TextUtils; import android.view.SurfaceHolder; import android.view.View; import android.view.View.OnClickListener; import android.view.Window; import android.view.WindowManager; import android.widget.Button; import android.widget.Chronometer; import android.widget.ImageView; import android.widget.Toast; import android.widget.VideoView; import com.easemob.chatuidemo.utils.CommonUtils; import com.easemob.chatuidemo.video.util.Utils; import com.easemob.qixin.R; import com.easemob.util.EMLog; import com.easemob.util.PathUtil; public class RecorderVideoActivity extends BaseActivity implements OnClickListener, SurfaceHolder.Callback, OnErrorListener, OnInfoListener { private static final String TAG = "RecorderVideoActivity"; private final static String CLASS_LABEL = "RecordActivity"; private PowerManager.WakeLock mWakeLock; private ImageView btnStart;// 开始录制按钮 private ImageView btnStop;// 停止录制按钮 private MediaRecorder mediaRecorder;// 录制视频的类 private VideoView mVideoView;// 显示视频的控件 String localPath = "";// 录制的视频路径 private Camera mCamera; // 预览的宽高 private int previewWidth = 480; private int previewHeight = 480; private Chronometer chronometer; private int frontCamera = 0;// 0是后置摄像头,1是前置摄像头 private Button btn_switch; Parameters cameraParameters = null; private SurfaceHolder mSurfaceHolder; int defaultVideoFrameRate = -1; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); requestWindowFeature(Window.FEATURE_NO_TITLE);// 去掉标题栏 getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);// 设置全屏 // 选择支持半透明模式,在有surfaceview的activity中使用 getWindow().setFormat(PixelFormat.TRANSLUCENT); setContentView(R.layout.recorder_activity); PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL); mWakeLock.acquire(); initViews(); } private void initViews() { btn_switch = (Button) findViewById(R.id.switch_btn); btn_switch.setOnClickListener(this); btn_switch.setVisibility(View.VISIBLE); mVideoView = (VideoView) findViewById(R.id.mVideoView); btnStart = (ImageView) findViewById(R.id.recorder_start); btnStop = (ImageView) findViewById(R.id.recorder_stop); btnStart.setOnClickListener(this); btnStop.setOnClickListener(this); mSurfaceHolder = mVideoView.getHolder(); mSurfaceHolder.addCallback(this); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); chronometer = (Chronometer) findViewById(R.id.chronometer); } public void back(View view) { releaseRecorder(); releaseCamera(); finish(); } @Override protected void onResume() { super.onResume(); if (mWakeLock == null) { // 获取唤醒锁,保持屏幕常亮 PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL); mWakeLock.acquire(); } // if (!initCamera()) { // showFailDialog(); // } } @SuppressLint("NewApi") private boolean initCamera() { try { if (frontCamera == 0) { mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK); } else { mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT); } Camera.Parameters camParams = mCamera.getParameters(); mCamera.lock(); mSurfaceHolder = mVideoView.getHolder(); mSurfaceHolder.addCallback(this); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); mCamera.setDisplayOrientation(90); } catch (RuntimeException ex) { EMLog.e("video", "init Camera fail " + ex.getMessage()); return false; } return true; } private void handleSurfaceChanged() { if (mCamera == null) { finish(); return; } boolean hasSupportRate = false; List<Integer> supportedPreviewFrameRates = mCamera.getParameters() .getSupportedPreviewFrameRates(); if (supportedPreviewFrameRates != null && supportedPreviewFrameRates.size() > 0) { Collections.sort(supportedPreviewFrameRates); for (int i = 0; i < supportedPreviewFrameRates.size(); i++) { int supportRate = supportedPreviewFrameRates.get(i); if (supportRate == 15) { hasSupportRate = true; } } if (hasSupportRate) { defaultVideoFrameRate = 15; } else { defaultVideoFrameRate = supportedPreviewFrameRates.get(0); } } // 获取摄像头的所有支持的分辨率 List<Camera.Size> resolutionList = Utils.getResolutionList(mCamera); if (resolutionList != null && resolutionList.size() > 0) { Collections.sort(resolutionList, new Utils.ResolutionComparator()); Camera.Size previewSize = null; boolean hasSize = false; // 如果摄像头支持640*480,那么强制设为640*480 for (int i = 0; i < resolutionList.size(); i++) { Size size = resolutionList.get(i); if (size != null && size.width == 640 && size.height == 480) { previewSize = size; previewWidth = previewSize.width; previewHeight = previewSize.height; hasSize = true; break; } } // 如果不支持设为中间的那个 if (!hasSize) { int mediumResolution = resolutionList.size() / 2; if (mediumResolution >= resolutionList.size()) mediumResolution = resolutionList.size() - 1; previewSize = resolutionList.get(mediumResolution); previewWidth = previewSize.width; previewHeight = previewSize.height; } } } @Override protected void onPause() { super.onPause(); if (mWakeLock != null) { mWakeLock.release(); mWakeLock = null; } } @Override public void onClick(View view) { switch (view.getId()) { case R.id.switch_btn: switchCamera(); break; case R.id.recorder_start: // start recording if(!startRecording()) return; Toast.makeText(this, R.string.The_video_to_start, Toast.LENGTH_SHORT).show(); btn_switch.setVisibility(View.INVISIBLE); btnStart.setVisibility(View.INVISIBLE); btnStart.setEnabled(false); btnStop.setVisibility(View.VISIBLE); // 重置其他 chronometer.setBase(SystemClock.elapsedRealtime()); chronometer.start(); break; case R.id.recorder_stop: btnStop.setEnabled(false); // 停止拍摄 stopRecording(); btn_switch.setVisibility(View.VISIBLE); chronometer.stop(); btnStart.setVisibility(View.VISIBLE); btnStop.setVisibility(View.INVISIBLE); new AlertDialog.Builder(this) .setMessage(R.string.Whether_to_send) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); sendVideo(null); } }) .setNegativeButton(R.string.cancel, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { if(localPath != null){ File file = new File(localPath); if(file.exists()) file.delete(); } finish(); } }).setCancelable(false).show(); break; default: break; } } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { // 将holder,这个holder为开始在oncreat里面取得的holder,将它赋给surfaceHolder mSurfaceHolder = holder; } @Override public void surfaceCreated(SurfaceHolder holder) { if (mCamera == null){ if(!initCamera()){ showFailDialog(); return; } } try { mCamera.setPreviewDisplay(mSurfaceHolder); mCamera.startPreview(); handleSurfaceChanged(); } catch (Exception e1) { EMLog.e("video", "start preview fail " + e1.getMessage()); showFailDialog(); } } @Override public void surfaceDestroyed(SurfaceHolder arg0) { EMLog.v("video", "surfaceDestroyed"); } public boolean startRecording(){ if (mediaRecorder == null){ if(!initRecorder()) return false; } mediaRecorder.setOnInfoListener(this); mediaRecorder.setOnErrorListener(this); mediaRecorder.start(); return true; } @SuppressLint("NewApi") private boolean initRecorder(){ if(!CommonUtils.isExitsSdcard()){ showNoSDCardDialog(); return false; } if (mCamera == null) { if(!initCamera()){ showFailDialog(); return false; } } mVideoView.setVisibility(View.VISIBLE); // TODO init button mCamera.stopPreview(); mediaRecorder = new MediaRecorder(); mCamera.unlock(); mediaRecorder.setCamera(mCamera); mediaRecorder.setAudioSource(MediaRecorder.AudioSource.DEFAULT); // 设置录制视频源为Camera(相机) mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA); if (frontCamera == 1) { mediaRecorder.setOrientationHint(270); } else { mediaRecorder.setOrientationHint(90); } // 设置录制完成后视频的封装格式THREE_GPP为3gp.MPEG_4为mp4 mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4); mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); // 设置录制的视频编码h263 h264 mediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264); // 设置视频录制的分辨率。必须放在设置编码和格式的后面,否则报错 mediaRecorder.setVideoSize(previewWidth, previewHeight); // 设置视频的比特率 mediaRecorder.setVideoEncodingBitRate(384 * 1024); // // 设置录制的视频帧率。必须放在设置编码和格式的后面,否则报错 if (defaultVideoFrameRate != -1) { mediaRecorder.setVideoFrameRate(defaultVideoFrameRate); } // 设置视频文件输出的路径 localPath = PathUtil.getInstance().getVideoPath() + "/" + System.currentTimeMillis() + ".mp4"; mediaRecorder.setOutputFile(localPath); mediaRecorder.setMaxDuration(30000); mediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface()); try { mediaRecorder.prepare(); } catch (IllegalStateException e) { e.printStackTrace(); return false; } catch (IOException e) { e.printStackTrace(); return false; } return true; } public void stopRecording() { if (mediaRecorder != null) { mediaRecorder.setOnErrorListener(null); mediaRecorder.setOnInfoListener(null); try { mediaRecorder.stop(); } catch (IllegalStateException e) { EMLog.e("video", "stopRecording error:" + e.getMessage()); } } releaseRecorder(); if (mCamera != null) { mCamera.stopPreview(); releaseCamera(); } } private void releaseRecorder() { if (mediaRecorder != null) { mediaRecorder.release(); mediaRecorder = null; } } protected void releaseCamera() { try { if (mCamera != null) { mCamera.stopPreview(); mCamera.release(); mCamera = null; } } catch (Exception e) { } } @SuppressLint("NewApi") public void switchCamera() { if (mCamera == null) { return; } if (Camera.getNumberOfCameras() >= 2) { btn_switch.setEnabled(false); if (mCamera != null) { mCamera.stopPreview(); mCamera.release(); mCamera = null; } switch (frontCamera) { case 0: mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT); frontCamera = 1; break; case 1: mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK); frontCamera = 0; break; } try { mCamera.lock(); mCamera.setDisplayOrientation(90); mCamera.setPreviewDisplay(mVideoView.getHolder()); mCamera.startPreview(); } catch (IOException e) { mCamera.release(); mCamera = null; } btn_switch.setEnabled(true); } } MediaScannerConnection msc = null; ProgressDialog progressDialog = null; public void sendVideo(View view) { if (TextUtils.isEmpty(localPath)) { EMLog.e("Recorder", "recorder fail please try again!"); return; } if(msc == null) msc = new MediaScannerConnection(this, new MediaScannerConnectionClient() { @Override public void onScanCompleted(String path, Uri uri) { EMLog.d(TAG, "scanner completed"); msc.disconnect(); progressDialog.dismiss(); setResult(RESULT_OK, getIntent().putExtra("uri", uri)); finish(); } @Override public void onMediaScannerConnected() { msc.scanFile(localPath, "video/*"); } }); if(progressDialog == null){ progressDialog = new ProgressDialog(this); progressDialog.setMessage("processing..."); progressDialog.setCancelable(false); } progressDialog.show(); msc.connect(); } @Override public void onInfo(MediaRecorder mr, int what, int extra) { EMLog.v("video", "onInfo"); if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_DURATION_REACHED) { EMLog.v("video", "max duration reached"); stopRecording(); btn_switch.setVisibility(View.VISIBLE); chronometer.stop(); btnStart.setVisibility(View.VISIBLE); btnStop.setVisibility(View.INVISIBLE); chronometer.stop(); if (localPath == null) { return; } String st3 = getResources().getString(R.string.Whether_to_send); new AlertDialog.Builder(this) .setMessage(st3) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface arg0, int arg1) { arg0.dismiss(); sendVideo(null); } }).setNegativeButton(R.string.cancel, null) .setCancelable(false).show(); } } @Override public void onError(MediaRecorder mr, int what, int extra) { EMLog.e("video", "recording onError:"); stopRecording(); Toast.makeText(this, "Recording error has occurred. Stopping the recording", Toast.LENGTH_SHORT).show(); } public void saveBitmapFile(Bitmap bitmap) { File file = new File(Environment.getExternalStorageDirectory(), "a.jpg"); try { BufferedOutputStream bos = new BufferedOutputStream( new FileOutputStream(file)); bitmap.compress(Bitmap.CompressFormat.JPEG, 100, bos); bos.flush(); bos.close(); } catch (IOException e) { e.printStackTrace(); } } @Override protected void onDestroy() { super.onDestroy(); releaseCamera(); if (mWakeLock != null) { mWakeLock.release(); mWakeLock = null; } } @Override public void onBackPressed() { back(null); } private void showFailDialog() { new AlertDialog.Builder(this) .setTitle(R.string.prompt) .setMessage(R.string.Open_the_equipment_failure) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finish(); } }).setCancelable(false).show(); } private void showNoSDCardDialog() { new AlertDialog.Builder(this) .setTitle(R.string.prompt) .setMessage("No sd card!") .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finish(); } }).setCancelable(false).show(); } }
liyuzhao/enterpriseChat-android
src/com/easemob/chatuidemo/activity/RecorderVideoActivity.java
Java
apache-2.0
16,993
package com.github.sergejsamsonow.codegenerator.producer.pojo.renderer; import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCMethodCodeConcatenator; import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCNewLineAndIndentationFormat; import com.github.sergejsamsonow.codegenerator.producer.pojo.model.PojoProperty; import com.github.sergejsamsonow.codegenerator.producer.pojo.renderer.javalang.BeanModifier; public class JavaLangToString extends BeanModifier { public JavaLangToString(SCNewLineAndIndentationFormat format) { super(format); } @Override protected void writeBeforePropertiesIteration() { SCMethodCodeConcatenator writer = getMethodCodeWriter(); writer.annotation("@Override"); writer.start("public String toString() {"); writer.code("StringBuilder builder = new StringBuilder();"); writer.code("builder.append(\"%s (\");", getData().getClassName()); } @Override protected void writePropertyCode(PojoProperty property) { SCMethodCodeConcatenator writer = getMethodCodeWriter(); String end = isLast() ? ");" : " + \", \");"; writer.code("builder.append(\"%s: \" + Objects.toString(%s())%s", property.getFieldName(), property.getGetterName(), end); } @Override protected void writeAfterPropertiesIteration() { SCMethodCodeConcatenator writer = getMethodCodeWriter(); writer.code("builder.append(\")\");"); writer.code("return builder.toString();"); writer.end(); writer.emptyNewLine(); } }
sergej-samsonow/code-generator
producer/pojo/src/main/java/com/github/sergejsamsonow/codegenerator/producer/pojo/renderer/JavaLangToString.java
Java
apache-2.0
1,597
/* * Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.autoscaling.model; /** * <p> * The output for the TerminateInstanceInAutoScalingGroup action. * </p> */ public class TerminateInstanceInAutoScalingGroupResult { /** * A Scaling Activity. */ private Activity activity; /** * A Scaling Activity. * * @return A Scaling Activity. */ public Activity getActivity() { return activity; } /** * A Scaling Activity. * * @param activity A Scaling Activity. */ public void setActivity(Activity activity) { this.activity = activity; } /** * A Scaling Activity. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param activity A Scaling Activity. * * @return A reference to this updated object so that method calls can be chained * together. */ public TerminateInstanceInAutoScalingGroupResult withActivity(Activity activity) { this.activity = activity; return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); sb.append("Activity: " + activity + ", "); sb.append("}"); return sb.toString(); } }
apetresc/aws-sdk-for-java-on-gae
src/main/java/com/amazonaws/services/autoscaling/model/TerminateInstanceInAutoScalingGroupResult.java
Java
apache-2.0
2,135
/* * Copyright 2014 Alexey Andreev. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.classlib.java.util; import org.teavm.classlib.java.io.TSerializable; import org.teavm.classlib.java.lang.TMath; import org.teavm.classlib.java.lang.TObject; import org.teavm.javascript.spi.GeneratedBy; /** * * @author Alexey Andreev */ public class TRandom extends TObject implements TSerializable { public TRandom() { } public TRandom(@SuppressWarnings("unused") long seed) { } public void setSeed(@SuppressWarnings("unused") long seed) { } protected int next(int bits) { return (int)(random() * (1L << TMath.min(32, bits))); } public void nextBytes(byte[] bytes) { for (int i = 0; i < bytes.length; ++i) { bytes[i] = (byte)next(8); } } public int nextInt() { return next(32); } public int nextInt(int n) { return (int)(random() * n); } public long nextLong() { return ((long)nextInt() << 32) | nextInt(); } public boolean nextBoolean() { return nextInt() % 2 == 0; } public float nextFloat() { return (float)random(); } public double nextDouble() { return random(); } @GeneratedBy(RandomNativeGenerator.class) private static native double random(); }
mpoindexter/teavm
teavm-classlib/src/main/java/org/teavm/classlib/java/util/TRandom.java
Java
apache-2.0
1,877
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 */ package net.sf.mmm.util.io.base; import net.sf.mmm.util.exception.api.NlsNullPointerException; /** * This class is similar to {@link java.nio.ByteBuffer} but a lot simpler. * * @see java.nio.ByteBuffer#wrap(byte[], int, int) * * @author Joerg Hohwiller (hohwille at users.sourceforge.net) * @since 1.1.0 */ public class ByteArrayImpl extends AbstractByteArray { private final byte[] buffer; private int minimumIndex; private int maximumIndex; /** * The constructor. * * @param capacity is the {@code length} of the internal {@link #getBytes() buffer}. */ public ByteArrayImpl(int capacity) { this(new byte[capacity], 0, -1); } /** * The constructor. * * @param buffer is the internal {@link #getBytes() buffer}. */ public ByteArrayImpl(byte[] buffer) { this(buffer, 0, buffer.length - 1); } /** * The constructor. * * @param buffer is the internal {@link #getBytes() buffer}. * @param startIndex is the {@link #getCurrentIndex() current index} as well as the {@link #getMinimumIndex() minimum * index}. * @param maximumIndex is the {@link #getMaximumIndex() maximum index}. */ public ByteArrayImpl(byte[] buffer, int startIndex, int maximumIndex) { super(); if (buffer == null) { throw new NlsNullPointerException("buffer"); } this.buffer = buffer; this.minimumIndex = startIndex; this.maximumIndex = maximumIndex; } @Override public byte[] getBytes() { return this.buffer; } @Override public int getCurrentIndex() { return this.minimumIndex; } @Override public int getMinimumIndex() { return this.minimumIndex; } @Override public int getMaximumIndex() { return this.maximumIndex; } /** * This method sets the {@link #getMaximumIndex() maximumIndex}. This may be useful if the buffer should be reused. * <br> * <b>ATTENTION:</b><br> * Be very careful and only use this method if you know what you are doing! * * @param maximumIndex is the {@link #getMaximumIndex() maximumIndex} to set. It has to be in the range from {@code 0} * ( <code>{@link #getCurrentIndex() currentIndex} - 1</code>) to <code>{@link #getBytes()}.length</code>. */ protected void setMaximumIndex(int maximumIndex) { this.maximumIndex = maximumIndex; } @Override public ByteArrayImpl createSubArray(int minimum, int maximum) { checkSubArray(minimum, maximum); return new ByteArrayImpl(this.buffer, minimum, maximum); } @Override public String toString() { return new String(this.buffer, this.minimumIndex, getBytesAvailable()); } }
m-m-m/util
io/src/main/java/net/sf/mmm/util/io/base/ByteArrayImpl.java
Java
apache-2.0
2,897
/* * Licensed to Crate under one or more contributor license agreements. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. Crate licenses this file * to you under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. * * However, if you have executed another commercial license agreement * with Crate these terms will supersede the license and you may use the * software solely pursuant to the terms of the relevant commercial * agreement. */ package io.crate.execution.engine.collect; import io.crate.breaker.RamAccounting; import io.crate.data.BatchIterator; import io.crate.data.Row; import io.crate.execution.engine.aggregation.impl.SumAggregation; import io.crate.expression.reference.doc.lucene.BytesRefColumnReference; import io.crate.expression.reference.doc.lucene.CollectorContext; import io.crate.expression.reference.doc.lucene.LongColumnReference; import io.crate.expression.reference.doc.lucene.LuceneCollectorExpression; import io.crate.metadata.Functions; import io.crate.metadata.Reference; import io.crate.metadata.ReferenceIdent; import io.crate.metadata.RelationName; import io.crate.metadata.RowGranularity; import io.crate.metadata.functions.Signature; import io.crate.test.integration.CrateDummyClusterServiceUnitTest; import io.crate.testing.TestingRowConsumer; import io.crate.types.DataTypes; import org.apache.lucene.document.Document; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.ByteBuffersDirectory; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.junit.Before; import org.junit.Test; import java.io.IOException; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static io.crate.testing.TestingHelpers.createNodeContext; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.instanceOf; public class DocValuesGroupByOptimizedIteratorTest extends CrateDummyClusterServiceUnitTest { private Functions functions; private IndexSearcher indexSearcher; private List<Object[]> rows = List.of( new Object[]{"1", 1L, 1L}, new Object[]{"0", 0L, 2L}, new Object[]{"1", 1L, 3L}, new Object[]{"0", 0L, 4L} ); @Before public void setup() throws IOException { var nodeContext = createNodeContext(); functions = nodeContext.functions(); var indexWriter = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig()); for (var row : rows) { Document doc = new Document(); doc.add(new SortedSetDocValuesField("x", BytesRefs.toBytesRef(row[0]))); doc.add(new NumericDocValuesField("y", (Long) row[1])); doc.add(new NumericDocValuesField("z", (Long) row[2])); indexWriter.addDocument(doc); } indexWriter.commit(); indexSearcher = new IndexSearcher(DirectoryReader.open(indexWriter)); } @Test public void test_group_by_doc_values_optimized_iterator_for_single_numeric_key() throws Exception { SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified( Signature.aggregate( SumAggregation.NAME, DataTypes.LONG.getTypeSignature(), DataTypes.LONG.getTypeSignature() ), List.of(DataTypes.LONG), DataTypes.LONG ); var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); aggregationField.setName("z"); var sumDocValuesAggregator = sumAggregation.getDocValueAggregator( List.of(DataTypes.LONG), List.of(aggregationField) ); var keyExpressions = List.of(new LongColumnReference("y")); var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forSingleKey( List.of(sumDocValuesAggregator), indexSearcher, new Reference( new ReferenceIdent(RelationName.fromIndexName("test"), "y"), RowGranularity.DOC, DataTypes.LONG, null, null ), keyExpressions, RamAccounting.NO_ACCOUNTING, new MatchAllDocsQuery(), new CollectorContext() ); var rowConsumer = new TestingRowConsumer(); rowConsumer.accept(it, null); assertThat( rowConsumer.getResult(), containsInAnyOrder(new Object[]{0L, 6L}, new Object[]{1L, 4L})); } @Test public void test_group_by_doc_values_optimized_iterator_for_many_keys() throws Exception { SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified( Signature.aggregate( SumAggregation.NAME, DataTypes.LONG.getTypeSignature(), DataTypes.LONG.getTypeSignature() ), List.of(DataTypes.LONG), DataTypes.LONG ); var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); aggregationField.setName("z"); var sumDocValuesAggregator = sumAggregation.getDocValueAggregator( List.of(DataTypes.LONG), List.of(aggregationField) ); var keyExpressions = List.of(new BytesRefColumnReference("x"), new LongColumnReference("y")); var keyRefs = List.of( new Reference( new ReferenceIdent(RelationName.fromIndexName("test"), "x"), RowGranularity.DOC, DataTypes.STRING, null, null ), new Reference( new ReferenceIdent(RelationName.fromIndexName("test"), "y"), RowGranularity.DOC, DataTypes.LONG, null, null ) ); var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forManyKeys( List.of(sumDocValuesAggregator), indexSearcher, keyRefs, keyExpressions, RamAccounting.NO_ACCOUNTING, new MatchAllDocsQuery(), new CollectorContext() ); var rowConsumer = new TestingRowConsumer(); rowConsumer.accept(it, null); assertThat( rowConsumer.getResult(), containsInAnyOrder(new Object[]{"0", 0L, 6L}, new Object[]{"1", 1L, 4L}) ); } @Test public void test_optimized_iterator_stop_processing_on_kill() throws Exception { Throwable expectedException = stopOnInterrupting(it -> it.kill(new InterruptedException("killed"))); assertThat(expectedException, instanceOf(InterruptedException.class)); } @Test public void test_optimized_iterator_stop_processing_on_close() throws Exception { Throwable expectedException = stopOnInterrupting(BatchIterator::close); assertThat(expectedException, instanceOf(IllegalStateException.class)); } private Throwable stopOnInterrupting(Consumer<BatchIterator<Row>> interrupt) throws Exception { CountDownLatch waitForLoadNextBatch = new CountDownLatch(1); CountDownLatch pauseOnDocumentCollecting = new CountDownLatch(1); CountDownLatch batchLoadingCompleted = new CountDownLatch(1); BatchIterator<Row> it = createBatchIterator(() -> { waitForLoadNextBatch.countDown(); try { pauseOnDocumentCollecting.await(5, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException(e); } }); AtomicReference<Throwable> exception = new AtomicReference<>(); Thread t = new Thread(() -> { try { it.loadNextBatch().whenComplete((r, e) -> { if (e != null) { exception.set(e.getCause()); } batchLoadingCompleted.countDown(); }); } catch (Exception e) { exception.set(e); } }); t.start(); waitForLoadNextBatch.await(5, TimeUnit.SECONDS); interrupt.accept(it); pauseOnDocumentCollecting.countDown(); batchLoadingCompleted.await(5, TimeUnit.SECONDS); return exception.get(); } private BatchIterator<Row> createBatchIterator(Runnable onNextReader) { return DocValuesGroupByOptimizedIterator.GroupByIterator.getIterator( List.of(), indexSearcher, List.of(new LuceneCollectorExpression<>() { @Override public void setNextReader(LeafReaderContext context) { onNextReader.run(); } @Override public Object value() { return null; } }), RamAccounting.NO_ACCOUNTING, (states, key) -> { }, (expressions) -> expressions.get(0).value(), (key, cells) -> cells[0] = key, new MatchAllDocsQuery(), new CollectorContext() ); } }
EvilMcJerkface/crate
server/src/test/java/io/crate/execution/engine/collect/DocValuesGroupByOptimizedIteratorTest.java
Java
apache-2.0
10,272
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespaclient; public class ClusterDef { private final String name; public ClusterDef(String name) { this.name = name; } public String getName() { return name; } public String getRoute() { return "[Content:cluster=" + name + "]"; } }
vespa-engine/vespa
vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterDef.java
Java
apache-2.0
372
/* * Copyright (C) 2018 the original author or authors. * * This file is part of jBB Application Project. * * Licensed under the Apache License, Version 2.0 (the "License"); * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 */ package org.jbb.security.rest.oauth.client; import io.swagger.annotations.ApiModel; import lombok.AccessLevel; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; @Getter @Setter @Builder @ApiModel("OAuthClientSecret") @NoArgsConstructor(access = AccessLevel.PUBLIC) @AllArgsConstructor(access = AccessLevel.PRIVATE) public class ClientSecretDto { private String clientSecret; }
jbb-project/jbb
domain-rest/jbb-security-rest/src/main/java/org/jbb/security/rest/oauth/client/ClientSecretDto.java
Java
apache-2.0
751
package weixin.popular.bean.scan.crud; import weixin.popular.bean.scan.base.ProductGet; import weixin.popular.bean.scan.info.BrandInfo; public class ProductCreate extends ProductGet { private BrandInfo brand_info; public BrandInfo getBrand_info() { return brand_info; } public void setBrand_info(BrandInfo brand_info) { this.brand_info = brand_info; } }
liyiorg/weixin-popular
src/main/java/weixin/popular/bean/scan/crud/ProductCreate.java
Java
apache-2.0
395
package com.mattinsler.guiceymongo.data.query; import org.bson.BSON; /** * Created by IntelliJ IDEA. * User: mattinsler * Date: 12/29/10 * Time: 3:28 AM * To change this template use File | Settings | File Templates. */ public enum BSONType { Double(BSON.NUMBER), String(BSON.STRING), Object(BSON.OBJECT), Array(BSON.ARRAY), BinaryData(BSON.BINARY), ObjectId(BSON.OID), Boolean(BSON.BOOLEAN), Date(BSON.DATE), Null(BSON.NULL), RegularExpression(BSON.REGEX), Code(BSON.CODE), Symbol(BSON.SYMBOL), CodeWithScope(BSON.CODE_W_SCOPE), Integer(BSON.NUMBER_INT), Timestamp(BSON.TIMESTAMP), Long(BSON.NUMBER_LONG), MinKey(BSON.MINKEY), MaxKey(BSON.MAXKEY); private final byte _typeCode; BSONType(byte typeCode) { _typeCode = typeCode; } byte getTypeCode() { return _typeCode; } }
mattinsler/guiceymongo
src/main/java/com/mattinsler/guiceymongo/data/query/BSONType.java
Java
apache-2.0
891
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org) * Copyright (C) 2011-2012 Eugene Fradkin (eugene.fradkin@gmail.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.oracle.views; import org.eclipse.swt.SWT; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.*; import org.jkiss.dbeaver.core.DBeaverCore; import org.jkiss.dbeaver.ext.oracle.model.OracleConstants; import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore; import org.jkiss.dbeaver.model.DBPDataSourceContainer; import org.jkiss.dbeaver.ui.UIUtils; import org.jkiss.dbeaver.ui.preferences.PreferenceStoreDelegate; import org.jkiss.dbeaver.ui.preferences.TargetPrefPage; import org.jkiss.dbeaver.utils.PrefUtils; /** * PrefPageOracle */ public class PrefPageOracle extends TargetPrefPage { public static final String PAGE_ID = "org.jkiss.dbeaver.preferences.oracle.general"; //$NON-NLS-1$ private Text explainTableText; private Button rowidSupportCheck; private Button enableDbmsOuputCheck; public PrefPageOracle() { super(); setPreferenceStore(new PreferenceStoreDelegate(DBeaverCore.getGlobalPreferenceStore())); } @Override protected boolean hasDataSourceSpecificOptions(DBPDataSourceContainer dataSourceDescriptor) { DBPPreferenceStore store = dataSourceDescriptor.getPreferenceStore(); return store.contains(OracleConstants.PREF_EXPLAIN_TABLE_NAME) || store.contains(OracleConstants.PREF_SUPPORT_ROWID) || store.contains(OracleConstants.PREF_DBMS_OUTPUT) ; } @Override protected boolean supportsDataSourceSpecificOptions() { return true; } @Override protected Control createPreferenceContent(Composite parent) { Composite composite = UIUtils.createPlaceholder(parent, 1); { Group planGroup = UIUtils.createControlGroup(composite, "Execution plan", 2, GridData.FILL_HORIZONTAL, 0); Label descLabel = new Label(planGroup, SWT.WRAP); descLabel.setText("By default plan table in current or SYS schema will be used.\nYou may set some particular fully qualified plan table name here."); GridData gd = new GridData(GridData.HORIZONTAL_ALIGN_BEGINNING); gd.horizontalSpan = 2; descLabel.setLayoutData(gd); explainTableText = UIUtils.createLabelText(planGroup, "Plan table", "", SWT.BORDER, new GridData(GridData.FILL_HORIZONTAL)); } { Group planGroup = UIUtils.createControlGroup(composite, "Misc", 2, GridData.FILL_HORIZONTAL, 0); rowidSupportCheck = UIUtils.createLabelCheckbox(planGroup, "Use ROWID to identify rows", true); enableDbmsOuputCheck = UIUtils.createLabelCheckbox(planGroup, "Enable DBMS Output", true); } return composite; } @Override protected void loadPreferences(DBPPreferenceStore store) { explainTableText.setText(store.getString(OracleConstants.PREF_EXPLAIN_TABLE_NAME)); rowidSupportCheck.setSelection(store.getBoolean(OracleConstants.PREF_SUPPORT_ROWID)); enableDbmsOuputCheck.setSelection(store.getBoolean(OracleConstants.PREF_DBMS_OUTPUT)); } @Override protected void savePreferences(DBPPreferenceStore store) { store.setValue(OracleConstants.PREF_EXPLAIN_TABLE_NAME, explainTableText.getText()); store.setValue(OracleConstants.PREF_SUPPORT_ROWID, rowidSupportCheck.getSelection()); store.setValue(OracleConstants.PREF_DBMS_OUTPUT, enableDbmsOuputCheck.getSelection()); PrefUtils.savePreferenceStore(store); } @Override protected void clearPreferences(DBPPreferenceStore store) { store.setToDefault(OracleConstants.PREF_EXPLAIN_TABLE_NAME); store.setToDefault(OracleConstants.PREF_SUPPORT_ROWID); store.setToDefault(OracleConstants.PREF_DBMS_OUTPUT); } @Override protected String getPropertyPageID() { return PAGE_ID; } }
ruspl-afed/dbeaver
plugins/org.jkiss.dbeaver.ext.oracle/src/org/jkiss/dbeaver/ext/oracle/views/PrefPageOracle.java
Java
apache-2.0
4,753
package com.winsun.fruitmix.model; /** * Created by Administrator on 2016/7/6. */ public class Equipment { private String serviceName; private String host; private int port; public Equipment(String serviceName, String host, int port) { this.serviceName = serviceName; this.host = host; this.port = port; } public Equipment() { } public String getServiceName() { return serviceName; } public void setServiceName(String serviceName) { this.serviceName = serviceName; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public int getPort() { return port; } public void setPort(int port) { this.port = port; } }
andywu91/fruitMix-android
app/src/main/java/com/winsun/fruitmix/model/Equipment.java
Java
apache-2.0
815
package com.wangshan.service.impl; import com.wangshan.dao.UserDao; import com.wangshan.models.User; import com.wangshan.service.ValidateService; import com.wangshan.utils.gabriel.EncryptUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * Created by Administrator on 2015/11/15. */ @Service public class ValidateServiceImpl implements ValidateService{ @Autowired private UserDao userDao; @Override public Boolean validatePassword(String email, String password){ User user = userDao.getUserByEmail(email); if(user != null && new EncryptUtil().encrypt(password + "-" + user.getSalt(), "SHA-1").equals(user.getPassword())){ return true; } else { return false; } } @Override public Boolean validateMobileRepeat(String mobile){ return false; } @Override public Boolean validateEmailRepeat(String email){ return false; } }
sanyiwangshan/my_space
backend/src/main/java/com/wangshan/service/impl/ValidateServiceImpl.java
Java
apache-2.0
1,047
/* ### * IP: GHIDRA * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ghidra.util.prop; import ghidra.util.*; import java.io.*; /** * Handles general storage and retrieval of saveable objects indexed by long * keys. * */ public class SaveableObjectPropertySet extends PropertySet { private final static long serialVersionUID = 1; /** * Constructor for SaveableObjectPropertySet. * @param name the name associated with this property set. */ public SaveableObjectPropertySet(String name, Class<?> objectClass) { super(name, objectClass); if (!Saveable.class.isAssignableFrom(objectClass)) { throw new IllegalArgumentException("Class "+objectClass+ "does not implement the Saveable interface"); } try { objectClass.newInstance(); } catch(Exception e) { throw new IllegalArgumentException("Class "+objectClass+ "must be public and have a public, no args, constructor"); } } /** * @see PropertySet#getDataSize() */ @Override public int getDataSize() { return 20; } /** * Stores a saveable object at the given index. Any object currently at * that index will be replaced by the new object. * @param index the index at which to store the saveable object. * @param value the saveable object to store. */ public void putObject(long index, Saveable value) { PropertyPage page = getOrCreatePage(getPageID(index)); int n = page.getSize(); page.addSaveableObject(getPageOffset(index), value); numProperties += page.getSize() - n; } /** * Retrieves the saveable object stored at the given index. * @param index the index at which to retrieve the saveable object. * @return the saveable object stored at the given index or null if no * object is stored at the index. */ public Saveable getObject(long index) { PropertyPage page = getPage(getPageID(index)); if (page != null) { return page.getSaveableObject(getPageOffset(index)); } return null; } /* (non-Javadoc) * @see ghidra.util.prop.PropertySet#moveIndex(long, long) */ @Override protected void moveIndex(long from, long to) { Saveable value = getObject(from); remove(from); putObject(to, value); } /** * saves the property at the given index to the given output stream. */ @Override protected void saveProperty(ObjectOutputStream oos, long index) throws IOException { Saveable obj = getObject(index); oos.writeObject(obj.getClass().getName()); obj.save(new ObjectStorageStreamAdapter(oos)); } /** * restores the property from the input stream to the given index. */ @Override protected void restoreProperty(ObjectInputStream ois, long index) throws IOException, ClassNotFoundException { try { String className = (String)ois.readObject(); Class<?> c = Class.forName(className); Saveable obj = (Saveable)c.newInstance(); obj.restore(new ObjectStorageStreamAdapter(ois)); putObject(index, obj); } catch (Exception e) { Msg.showError(this, null, null, null, e); } } /** * * @see ghidra.util.prop.PropertySet#applyValue(PropertyVisitor, long) */ @Override public void applyValue(PropertyVisitor visitor, long addr) { Saveable obj = getObject(addr); if (obj != null) { visitor.visit(obj); } } }
NationalSecurityAgency/ghidra
Ghidra/Framework/Generic/src/main/java/ghidra/util/prop/SaveableObjectPropertySet.java
Java
apache-2.0
3,858
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.seda; import org.apache.camel.CamelExecutionException; import org.apache.camel.ContextTestSupport; import org.apache.camel.builder.RouteBuilder; /** * @version */ public class SedaInOutWithErrorDeadLetterChannelTest extends ContextTestSupport { public void testInOutWithErrorUsingDLC() throws Exception { getMockEndpoint("mock:result").expectedMessageCount(0); getMockEndpoint("mock:dead").expectedMessageCount(1); try { template.requestBody("direct:start", "Hello World", String.class); fail("Should have thrown an exception"); } catch (CamelExecutionException e) { assertIsInstanceOf(IllegalArgumentException.class, e.getCause()); assertEquals("Damn I cannot do this", e.getCause().getMessage()); } assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { errorHandler(deadLetterChannel("mock:dead").maximumRedeliveries(2).redeliveryDelay(0).handled(false)); from("direct:start").to("seda:foo"); from("seda:foo").transform(constant("Bye World")) .throwException(new IllegalArgumentException("Damn I cannot do this")) .to("mock:result"); } }; } }
everttigchelaar/camel-svn
camel-core/src/test/java/org/apache/camel/component/seda/SedaInOutWithErrorDeadLetterChannelTest.java
Java
apache-2.0
2,341
package com.asura.monitor.platform.dao; import com.asura.framework.base.paging.PagingResult; import com.asura.framework.base.paging.SearchMap; import com.asura.framework.dao.mybatis.base.MybatisDaoContext; import com.asura.framework.dao.mybatis.paginator.domain.PageBounds; import com.asura.common.dao.BaseDao; import com.asura.monitor.platform.entity.MonitorPlatformServerEntity; import org.springframework.stereotype.Repository; import javax.annotation.Resource; /** * <p></p> * <p/> * <PRE> * <BR> * <BR>----------------------------------------------- * <BR> * </PRE> * * @author zhaozq14 * @version 1.0 * @date 2016-11-07 11:35:05 * @since 1.0 */ @Repository("com.asura.monitor.configure.dao.MonitorPlatformServerDao") public class MonitorPlatformServerDao extends BaseDao<MonitorPlatformServerEntity>{ @Resource(name="monitor.MybatisDaoContext") private MybatisDaoContext mybatisDaoContext; /** * * @param searchMap * @param pageBounds * @return */ public PagingResult<MonitorPlatformServerEntity> findAll(SearchMap searchMap, PageBounds pageBounds, String sqlId){ return mybatisDaoContext.findForPage(this.getClass().getName()+"."+sqlId,MonitorPlatformServerEntity.class,searchMap,pageBounds); } }
AsuraTeam/monitor
server/src/main/java/com/asura/monitor/platform/dao/MonitorPlatformServerDao.java
Java
apache-2.0
1,279
/** * Jakarta Bean Validation TCK * * License: Apache License, Version 2.0 * See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>. */ package org.hibernate.beanvalidation.tck.tests.constraints.constraintdefinition; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertNoViolations; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertThat; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.violationOf; import static org.testng.Assert.assertEquals; import java.util.Set; import jakarta.validation.ConstraintViolation; import jakarta.validation.Validator; import jakarta.validation.constraints.Size; import jakarta.validation.groups.Default; import jakarta.validation.metadata.ConstraintDescriptor; import org.hibernate.beanvalidation.tck.beanvalidation.Sections; import org.hibernate.beanvalidation.tck.tests.AbstractTCKTest; import org.hibernate.beanvalidation.tck.util.TestUtil; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.jboss.test.audit.annotations.SpecAssertion; import org.jboss.test.audit.annotations.SpecVersion; import org.testng.annotations.Test; /** * @author Hardy Ferentschik * @author Guillaume Smet */ @SpecVersion(spec = "beanvalidation", version = "3.0.0") public class ConstraintDefinitionsTest extends AbstractTCKTest { @Deployment public static WebArchive createTestArchive() { return webArchiveBuilder() .withTestClassPackage( ConstraintDefinitionsTest.class ) .build(); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES, id = "a") @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a") public void testConstraintWithCustomAttributes() { Validator validator = TestUtil.getValidatorUnderTest(); Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Person.class ) .getConstraintsForProperty( "lastName" ) .getConstraintDescriptors(); assertEquals( descriptors.size(), 2, "There should be two constraints on the lastName property." ); for ( ConstraintDescriptor<?> descriptor : descriptors ) { assertEquals( descriptor.getAnnotation().annotationType().getName(), AlwaysValid.class.getName(), "Wrong annotation type." ); } Set<ConstraintViolation<Person>> constraintViolations = validator.validate( new Person( "John", "Doe" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( AlwaysValid.class ) ); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a") @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "b") public void testRepeatableConstraint() { Validator validator = TestUtil.getValidatorUnderTest(); Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Movie.class ) .getConstraintsForProperty( "title" ) .getConstraintDescriptors(); assertEquals( descriptors.size(), 2, "There should be two constraints on the title property." ); for ( ConstraintDescriptor<?> descriptor : descriptors ) { assertEquals( descriptor.getAnnotation().annotationType().getName(), Size.class.getName(), "Wrong annotation type." ); } Set<ConstraintViolation<Movie>> constraintViolations = validator.validate( new Movie( "Title" ) ); assertNoViolations( constraintViolations ); constraintViolations = validator.validate( new Movie( "A" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( Size.class ) ); constraintViolations = validator.validate( new Movie( "A movie title far too long that does not respect the constraint" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( Size.class ) ); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES_GROUPS, id = "d") public void testDefaultGroupAssumedWhenNoGroupsSpecified() { Validator validator = TestUtil.getValidatorUnderTest(); ConstraintDescriptor<?> descriptor = validator.getConstraintsForClass( Person.class ) .getConstraintsForProperty( "firstName" ) .getConstraintDescriptors() .iterator() .next(); Set<Class<?>> groups = descriptor.getGroups(); assertEquals( groups.size(), 1, "The group set should only contain one entry." ); assertEquals( groups.iterator().next(), Default.class, "The Default group should be returned." ); } }
beanvalidation/beanvalidation-tck
tests/src/main/java/org/hibernate/beanvalidation/tck/tests/constraints/constraintdefinition/ConstraintDefinitionsTest.java
Java
apache-2.0
4,672
/* * Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.map.impl; import com.hazelcast.config.MaxSizeConfig; import com.hazelcast.core.IFunction; import com.hazelcast.nio.serialization.Data; import com.hazelcast.nio.serialization.SerializableByConvention; import com.hazelcast.spi.partition.IPartitionService; import com.hazelcast.util.CollectionUtil; import com.hazelcast.util.UnmodifiableIterator; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; import static com.hazelcast.config.MaxSizeConfig.MaxSizePolicy.PER_NODE; import static com.hazelcast.util.MapUtil.createHashMap; import static com.hazelcast.util.Preconditions.checkNotNull; public final class MapKeyLoaderUtil { private MapKeyLoaderUtil() { } /** * Returns the role for the map key loader based on the passed parameters. * The partition owner of the map name partition is the sender. * The first replica of the map name partition is the sender backup. * Other partition owners are receivers and other partition replicas do * not have a role. * * @param isPartitionOwner if this is the partition owner * @param isMapNamePartition if this is the partition containing the map name * @param isMapNamePartitionFirstReplica if this is the first replica for the partition * containing the map name * @return the map key loader role */ static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition, boolean isMapNamePartitionFirstReplica) { if (isMapNamePartition) { if (isPartitionOwner) { // map-name partition owner is the SENDER return MapKeyLoader.Role.SENDER; } else { if (isMapNamePartitionFirstReplica) { // first replica of the map-name partition is the SENDER_BACKUP return MapKeyLoader.Role.SENDER_BACKUP; } else { // other replicas of the map-name partition do not have a role return MapKeyLoader.Role.NONE; } } } else { // ordinary partition owners are RECEIVERs, otherwise no role return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE; } } /** * Transforms an iterator of entries to an iterator of entry batches * where each batch is represented as a map from entry key to * list of entry values. * The maximum size of the entry value list in any batch is * determined by the {@code maxBatch} parameter. Only one * entry value list may have the {@code maxBatch} size, other * lists will be smaller. * * @param entries the entries to be batched * @param maxBatch the maximum size of an entry group in a single batch * @return an iterator with entry batches */ static Iterator<Map<Integer, List<Data>>> toBatches(final Iterator<Entry<Integer, Data>> entries, final int maxBatch) { return new UnmodifiableIterator<Map<Integer, List<Data>>>() { @Override public boolean hasNext() { return entries.hasNext(); } @Override public Map<Integer, List<Data>> next() { if (!entries.hasNext()) { throw new NoSuchElementException(); } return nextBatch(entries, maxBatch); } }; } /** * Groups entries by the entry key. The entries will be grouped * until at least one group has up to {@code maxBatch} * entries or until the {@code entries} have been exhausted. * * @param entries the entries to be grouped by key * @param maxBatch the maximum size of a group * @return the grouped entries by entry key */ private static Map<Integer, List<Data>> nextBatch(Iterator<Entry<Integer, Data>> entries, int maxBatch) { Map<Integer, List<Data>> batch = createHashMap(maxBatch); while (entries.hasNext()) { Entry<Integer, Data> e = entries.next(); List<Data> partitionKeys = CollectionUtil.addToValueList(batch, e.getKey(), e.getValue()); if (partitionKeys.size() >= maxBatch) { break; } } return batch; } /** * Returns the configured maximum entry count per node if the max * size policy is {@link MaxSizeConfig.MaxSizePolicy#PER_NODE} * and is not the default, otherwise returns {@code -1}. * * @param maxSizeConfig the max size configuration * @return the max size per node or {@code -1} if not configured or is the default * @see MaxSizeConfig#getMaxSizePolicy() * @see MaxSizeConfig#getSize() */ public static int getMaxSizePerNode(MaxSizeConfig maxSizeConfig) { // max size or -1 if policy is different or not set double maxSizePerNode = maxSizeConfig.getMaxSizePolicy() == PER_NODE ? maxSizeConfig.getSize() : -1D; if (maxSizePerNode == MaxSizeConfig.DEFAULT_MAX_SIZE) { // unlimited return -1; } return (int) maxSizePerNode; } /** * Returns a {@link IFunction} that transforms a {@link Data} * parameter to an map entry where the key is the partition ID * and the value is the provided parameter. * * @param partitionService the partition service */ static IFunction<Data, Entry<Integer, Data>> toPartition(final IPartitionService partitionService) { return new DataToEntry(partitionService); } @SerializableByConvention private static class DataToEntry implements IFunction<Data, Entry<Integer, Data>> { private final IPartitionService partitionService; public DataToEntry(IPartitionService partitionService) { this.partitionService = partitionService; } @Override public Entry<Integer, Data> apply(Data input) { // Null-pointer here, in case of null key loaded by MapLoader checkNotNull(input, "Key loaded by a MapLoader cannot be null."); Integer partition = partitionService.getPartitionId(input); return new MapEntrySimple<Integer, Data>(partition, input); } } }
dbrimley/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/MapKeyLoaderUtil.java
Java
apache-2.0
7,145
package org.targettest.org.apache.lucene.index; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.targettest.org.apache.lucene.document.Document; import org.targettest.org.apache.lucene.document.FieldSelector; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions; import org.targettest.org.apache.lucene.search.DefaultSimilarity; import org.targettest.org.apache.lucene.search.FieldCache; /** An IndexReader which reads multiple indexes, appending * their content. */ public class MultiReader extends IndexReader implements Cloneable { protected IndexReader[] subReaders; private int[] starts; // 1st docno for each segment private boolean[] decrefOnClose; // remember which subreaders to decRef on close private Map<String,byte[]> normsCache = new HashMap<String,byte[]>(); private int maxDoc = 0; private int numDocs = -1; private boolean hasDeletions = false; /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * <p>Note that all subreaders are closed if this Multireader is closed.</p> * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * @param closeSubReaders indicates whether the subreaders should be closed * when this MultiReader is closed * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) { initialize(subReaders, closeSubReaders); } private void initialize(IndexReader[] subReaders, boolean closeSubReaders) { this.subReaders = subReaders.clone(); starts = new int[subReaders.length + 1]; // build starts array decrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { starts[i] = maxDoc; maxDoc += subReaders[i].maxDoc(); // compute maxDocs if (!closeSubReaders) { subReaders[i].incRef(); decrefOnClose[i] = true; } else { decrefOnClose[i] = false; } if (subReaders[i].hasDeletions()) hasDeletions = true; } starts[subReaders.length] = maxDoc; } /** * Tries to reopen the subreaders. * <br> * If one or more subreaders could be re-opened (i. e. subReader.reopen() * returned a new instance != subReader), then a new MultiReader instance * is returned, otherwise this instance is returned. * <p> * A re-opened instance might share one or more subreaders with the old * instance. Index modification operations result in undefined behavior * when performed before the old instance is closed. * (see {@link IndexReader#reopen()}). * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ @Override public synchronized IndexReader reopen() throws CorruptIndexException, IOException { return doReopen(false); } /** * Clones the subreaders. * (see {@link IndexReader#clone()}). * <br> * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. */ @Override public synchronized Object clone() { try { return doReopen(true); } catch (Exception ex) { throw new RuntimeException(ex); } } /** * If clone is true then we clone each of the subreaders * @param doClone * @return New IndexReader, or same one (this) if * reopen/clone is not necessary * @throws CorruptIndexException * @throws IOException */ protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); boolean reopened = false; IndexReader[] newSubReaders = new IndexReader[subReaders.length]; boolean success = false; try { for (int i = 0; i < subReaders.length; i++) { if (doClone) newSubReaders[i] = (IndexReader) subReaders[i].clone(); else newSubReaders[i] = subReaders[i].reopen(); // if at least one of the subreaders was updated we remember that // and return a new MultiReader if (newSubReaders[i] != subReaders[i]) { reopened = true; } } success = true; } finally { if (!success && reopened) { for (int i = 0; i < newSubReaders.length; i++) { if (newSubReaders[i] != subReaders[i]) { try { newSubReaders[i].close(); } catch (IOException ignore) { // keep going - we want to clean up as much as possible } } } } } if (reopened) { boolean[] newDecrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { if (newSubReaders[i] == subReaders[i]) { newSubReaders[i].incRef(); newDecrefOnClose[i] = true; } } MultiReader mr = new MultiReader(newSubReaders); mr.decrefOnClose = newDecrefOnClose; return mr; } else { return this; } } @Override public TermFreqVector[] getTermFreqVectors(int n) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment } @Override public TermFreqVector getTermFreqVector(int n, String field) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVector(n - starts[i], field); } @Override public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper); } @Override public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } @Override public boolean isOptimized() { return false; } @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) // NOTE: multiple threads may wind up init'ing // numDocs... but that's harmless if (numDocs == -1) { // check cache int n = 0; // cache miss--recompute for (int i = 0; i < subReaders.length; i++) n += subReaders[i].numDocs(); // sum from readers numDocs = n; } return numDocs; } @Override public int maxDoc() { // Don't call ensureOpen() here (it could affect performance) return maxDoc; } // inherit javadoc @Override public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader } @Override public boolean isDeleted(int n) { // Don't call ensureOpen() here (it could affect performance) int i = readerIndex(n); // find segment num return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader } @Override public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return hasDeletions; } @Override protected void doDelete(int n) throws CorruptIndexException, IOException { numDocs = -1; // invalidate cache int i = readerIndex(n); // find segment num subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader hasDeletions = true; } @Override protected void doUndeleteAll() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].undeleteAll(); hasDeletions = false; numDocs = -1; // invalidate cache } private int readerIndex(int n) { // find reader for doc n: return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length); } @Override public boolean hasNorms(String field) throws IOException { ensureOpen(); for (int i = 0; i < subReaders.length; i++) { if (subReaders[i].hasNorms(field)) return true; } return false; } @Override public synchronized byte[] norms(String field) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); if (bytes != null) return bytes; // cache hit if (!hasNorms(field)) return null; bytes = new byte[maxDoc()]; for (int i = 0; i < subReaders.length; i++) subReaders[i].norms(field, bytes, starts[i]); normsCache.put(field, bytes); // update cache return bytes; } @Override public synchronized void norms(String field, byte[] result, int offset) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); for (int i = 0; i < subReaders.length; i++) // read from segments subReaders[i].norms(field, result, offset + starts[i]); if (bytes==null && !hasNorms(field)) { Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f)); } else if (bytes != null) { // cache hit System.arraycopy(bytes, 0, result, offset, maxDoc()); } else { for (int i = 0; i < subReaders.length; i++) { // read from segments subReaders[i].norms(field, result, offset + starts[i]); } } } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { synchronized (normsCache) { normsCache.remove(field); // clear cache } int i = readerIndex(n); // find segment num subReaders[i].setNorm(n-starts[i], field, value); // dispatch } @Override public TermEnum terms() throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, null); } @Override public TermEnum terms(Term term) throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, term); } @Override public int docFreq(Term t) throws IOException { ensureOpen(); int total = 0; // sum freqs in segments for (int i = 0; i < subReaders.length; i++) total += subReaders[i].docFreq(t); return total; } @Override public TermDocs termDocs() throws IOException { ensureOpen(); return new MultiTermDocs(this, subReaders, starts); } @Override public TermPositions termPositions() throws IOException { ensureOpen(); return new MultiTermPositions(this, subReaders, starts); } @Override protected void doCommit(Map<String,String> commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); } @Override protected synchronized void doClose() throws IOException { for (int i = 0; i < subReaders.length; i++) { if (decrefOnClose[i]) { subReaders[i].decRef(); } else { subReaders[i].close(); } } // NOTE: only needed in case someone had asked for // FieldCache for top-level reader (which is generally // not a good idea): FieldCache.DEFAULT.purge(this); } @Override public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) { ensureOpen(); return DirectoryReader.getFieldNames(fieldNames, this.subReaders); } /** * Checks recursively if all subreaders are up to date. */ @Override public boolean isCurrent() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) { if (!subReaders[i].isCurrent()) { return false; } } // all subreaders are up to date return true; } /** Not implemented. * @throws UnsupportedOperationException */ @Override public long getVersion() { throw new UnsupportedOperationException("MultiReader does not support this method."); } @Override public IndexReader[] getSequentialSubReaders() { return subReaders; } }
chrishumphreys/provocateur
provocateur-thirdparty/src/main/java/org/targettest/org/apache/lucene/index/MultiReader.java
Java
apache-2.0
14,073
/* * Copyright Strimzi authors. * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). */ package io.strimzi.systemtest.kafka; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.SecurityContextBuilder; import io.fabric8.kubernetes.api.model.Service; import io.fabric8.kubernetes.api.model.apps.StatefulSet; import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext; import io.strimzi.api.kafka.Crds; import io.strimzi.api.kafka.KafkaTopicList; import io.strimzi.api.kafka.model.EntityOperatorSpec; import io.strimzi.api.kafka.model.EntityTopicOperatorSpec; import io.strimzi.api.kafka.model.EntityUserOperatorSpec; import io.strimzi.api.kafka.model.Kafka; import io.strimzi.api.kafka.model.KafkaClusterSpec; import io.strimzi.api.kafka.model.KafkaResources; import io.strimzi.api.kafka.model.KafkaTopic; import io.strimzi.api.kafka.model.SystemProperty; import io.strimzi.api.kafka.model.SystemPropertyBuilder; import io.strimzi.api.kafka.model.ZookeeperClusterSpec; import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener; import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder; import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType; import io.strimzi.api.kafka.model.storage.JbodStorage; import io.strimzi.api.kafka.model.storage.JbodStorageBuilder; import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder; import io.strimzi.operator.common.model.Labels; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Constants; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.annotations.OpenShiftOnly; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.cli.KafkaCmdClient; import io.strimzi.systemtest.kafkaclients.internalClients.InternalKafkaClient; import io.strimzi.systemtest.resources.ResourceOperation; import io.strimzi.systemtest.resources.crd.KafkaResource; import io.strimzi.systemtest.resources.crd.KafkaTopicResource; import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.StUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.StatefulSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils; import io.strimzi.test.TestUtils; import io.strimzi.test.executor.ExecResult; import io.strimzi.test.timemeasuring.Operation; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.hamcrest.CoreMatchers; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.extension.ExtensionContext; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.stream.Collectors; import static io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName; import static io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName; import static io.strimzi.systemtest.Constants.CRUISE_CONTROL; import static io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED; import static io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED; import static io.strimzi.systemtest.Constants.REGRESSION; import static io.strimzi.systemtest.Constants.STATEFUL_SET; import static io.strimzi.systemtest.utils.StUtils.configMap2Properties; import static io.strimzi.systemtest.utils.StUtils.stringToProperties; import static io.strimzi.test.TestUtils.fromYamlString; import static io.strimzi.test.TestUtils.map; import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient; import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; import static java.util.Arrays.asList; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.junit.jupiter.api.Assumptions.assumeFalse; @Tag(REGRESSION) @SuppressWarnings("checkstyle:ClassFanOutComplexity") class KafkaST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaST.class); private static final String TEMPLATE_PATH = TestUtils.USER_PATH + "/../packaging/examples/templates/cluster-operator"; public static final String NAMESPACE = "kafka-cluster-test"; private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster"; @ParallelNamespaceTest @OpenShiftOnly void testDeployKafkaClusterViaTemplate(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); cluster.createCustomResources(extensionContext, TEMPLATE_PATH); String templateName = "strimzi-ephemeral"; cmdKubeClient(namespaceName).createResourceAndApply(templateName, map("CLUSTER_NAME", OPENSHIFT_CLUSTER_NAME)); StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET)); StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET)); DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME), 1); //Testing docker images testDockerImagesForKafkaCluster(OPENSHIFT_CLUSTER_NAME, NAMESPACE, namespaceName, 3, 3, false); //Testing labels verifyLabelsForKafkaCluster(NAMESPACE, namespaceName, OPENSHIFT_CLUSTER_NAME, templateName); LOGGER.info("Deleting Kafka cluster {} after test", OPENSHIFT_CLUSTER_NAME); cmdKubeClient(namespaceName).deleteByName("Kafka", OPENSHIFT_CLUSTER_NAME); //Wait for kafka deletion cmdKubeClient(namespaceName).waitForResourceDeletion(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME); kubeClient(namespaceName).listPods(namespaceName).stream() .filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME)) .forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName())); StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME)); StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME)); DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME)); } @ParallelNamespaceTest void testEODeletion(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); // Get pod name to check termination process Pod pod = kubeClient(namespaceName).listPods(namespaceName).stream() .filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName))) .findAny() .orElseThrow(); assertThat("Entity operator pod does not exist", pod, notNullValue()); LOGGER.info("Setting entity operator to null"); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().setEntityOperator(null), namespaceName); // Wait when EO(UO + TO) will be removed DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)); PodUtils.deletePodWithWait(namespaceName, pod.getMetadata().getName()); LOGGER.info("Entity operator was deleted"); } @ParallelNamespaceTest @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"}) void testCustomAndUpdatedValues(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>(); envVarGeneral.put("TEST_ENV_1", "test.env.one"); envVarGeneral.put("TEST_ENV_2", "test.env.two"); LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>(); envVarUpdated.put("TEST_ENV_2", "updated.test.env.two"); envVarUpdated.put("TEST_ENV_3", "test.env.three"); // Kafka Broker config Map<String, Object> kafkaConfig = new HashMap<>(); kafkaConfig.put("offsets.topic.replication.factor", "1"); kafkaConfig.put("transaction.state.log.replication.factor", "1"); kafkaConfig.put("default.replication.factor", "1"); Map<String, Object> updatedKafkaConfig = new HashMap<>(); updatedKafkaConfig.put("offsets.topic.replication.factor", "2"); updatedKafkaConfig.put("transaction.state.log.replication.factor", "2"); updatedKafkaConfig.put("default.replication.factor", "2"); // Zookeeper Config Map<String, Object> zookeeperConfig = new HashMap<>(); zookeeperConfig.put("tickTime", "2000"); zookeeperConfig.put("initLimit", "5"); zookeeperConfig.put("syncLimit", "2"); zookeeperConfig.put("autopurge.purgeInterval", "1"); Map<String, Object> updatedZookeeperConfig = new HashMap<>(); updatedZookeeperConfig.put("tickTime", "2500"); updatedZookeeperConfig.put("initLimit", "3"); updatedZookeeperConfig.put("syncLimit", "5"); final int initialDelaySeconds = 30; final int timeoutSeconds = 10; final int updatedInitialDelaySeconds = 31; final int updatedTimeoutSeconds = 11; final int periodSeconds = 10; final int successThreshold = 1; final int failureThreshold = 3; final int updatedPeriodSeconds = 5; final int updatedFailureThreshold = 1; resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2) .editSpec() .editKafka() .withNewReadinessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endReadinessProbe() .withNewLivenessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endLivenessProbe() .withConfig(kafkaConfig) .withNewTemplate() .withNewKafkaContainer() .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)) .endKafkaContainer() .endTemplate() .endKafka() .editZookeeper() .withReplicas(2) .withNewReadinessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .endReadinessProbe() .withNewLivenessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .endLivenessProbe() .withConfig(zookeeperConfig) .withNewTemplate() .withNewZookeeperContainer() .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)) .endZookeeperContainer() .endTemplate() .endZookeeper() .editEntityOperator() .withNewTemplate() .withNewTopicOperatorContainer() .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)) .endTopicOperatorContainer() .withNewUserOperatorContainer() .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)) .endUserOperatorContainer() .withNewTlsSidecarContainer() .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral)) .endTlsSidecarContainer() .endTemplate() .editUserOperator() .withNewReadinessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endReadinessProbe() .withNewLivenessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endLivenessProbe() .endUserOperator() .editTopicOperator() .withNewReadinessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endReadinessProbe() .withNewLivenessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endLivenessProbe() .endTopicOperator() .withNewTlsSidecar() .withNewReadinessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endReadinessProbe() .withNewLivenessProbe() .withInitialDelaySeconds(initialDelaySeconds) .withTimeoutSeconds(timeoutSeconds) .withPeriodSeconds(periodSeconds) .withSuccessThreshold(successThreshold) .withFailureThreshold(failureThreshold) .endLivenessProbe() .endTlsSidecar() .endEntityOperator() .endSpec() .build()); final Map<String, String> kafkaSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); final Map<String, String> zkSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)); final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)); LOGGER.info("Verify values before update"); checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName); checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral); String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config"); assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1")); assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1")); assertThat(kafkaConfiguration, containsString("default.replication.factor=1")); String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out(); assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1")); assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1")); assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1")); LOGGER.info("Testing Zookeepers"); checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig); checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral); LOGGER.info("Checking configuration of TO and UO"); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral); LOGGER.info("Updating configuration of Kafka cluster"); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> { KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka(); kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds); kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds); kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds); kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds); kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold); kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold); kafkaClusterSpec.setConfig(updatedKafkaConfig); kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated)); ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper(); zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds); zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds); zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds); zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds); zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold); zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold); zookeeperClusterSpec.setConfig(updatedZookeeperConfig); zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated)); // Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator(); entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds); entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds); entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds); entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold); entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated)); entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated)); entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated)); }, namespaceName); StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName), 2, zkSnapshot); StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 2, kafkaSnapshot); DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod); KafkaUtils.waitForKafkaReady(namespaceName, clusterName); LOGGER.info("Verify values after update"); checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName); checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated); kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config"); assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2")); assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2")); assertThat(kafkaConfiguration, containsString("default.replication.factor=2")); kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out(); assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2")); assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2")); assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2")); LOGGER.info("Testing Zookeepers"); checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig); checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated); LOGGER.info("Getting entity operator to check configuration of TO and UO"); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated); checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated); } @ParallelNamespaceTest void testJvmAndResources(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); ArrayList<SystemProperty> javaSystemProps = new ArrayList<>(); javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug") .withValue("verbose").build()); Map<String, String> jvmOptionsXX = new HashMap<>(); jvmOptionsXX.put("UseG1GC", "true"); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1) .editSpec() .editKafka() .withResources(new ResourceRequirementsBuilder() .addToLimits("memory", new Quantity("1.5Gi")) .addToLimits("cpu", new Quantity("1")) .addToRequests("memory", new Quantity("1Gi")) .addToRequests("cpu", new Quantity("50m")) .build()) .withNewJvmOptions() .withXmx("1g") .withXms("512m") .withXx(jvmOptionsXX) .endJvmOptions() .endKafka() .editZookeeper() .withResources( new ResourceRequirementsBuilder() .addToLimits("memory", new Quantity("1G")) .addToLimits("cpu", new Quantity("0.5")) .addToRequests("memory", new Quantity("0.5G")) .addToRequests("cpu", new Quantity("25m")) .build()) .withNewJvmOptions() .withXmx("1G") .withXms("512M") .withXx(jvmOptionsXX) .endJvmOptions() .endZookeeper() .withNewEntityOperator() .withNewTopicOperator() .withResources( new ResourceRequirementsBuilder() .addToLimits("memory", new Quantity("1024Mi")) .addToLimits("cpu", new Quantity("500m")) .addToRequests("memory", new Quantity("384Mi")) .addToRequests("cpu", new Quantity("0.025")) .build()) .withNewJvmOptions() .withXmx("2G") .withXms("1024M") .withJavaSystemProperties(javaSystemProps) .endJvmOptions() .endTopicOperator() .withNewUserOperator() .withResources( new ResourceRequirementsBuilder() .addToLimits("memory", new Quantity("512M")) .addToLimits("cpu", new Quantity("300m")) .addToRequests("memory", new Quantity("256M")) .addToRequests("cpu", new Quantity("30m")) .build()) .withNewJvmOptions() .withXmx("1G") .withXms("512M") .withJavaSystemProperties(javaSystemProps) .endJvmOptions() .endUserOperator() .endEntityOperator() .endSpec() .build()); // Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName); final String kafkaStsName = kafkaStatefulSetName(clusterName); final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName); final Map<String, String> zkPods = StatefulSetUtils.ssSnapshot(namespaceName, zkStsName); final Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStsName); final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName); assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "1536Mi", "1", "1Gi", "50m"); assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "-Xmx1g", "-Xms512m", "-XX:+UseG1GC"); assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "1G", "500m", "500M", "25m"); assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "-Xmx1G", "-Xms512M", "-XX:+UseG1GC"); Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName) .stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName))) .findFirst(); assertThat("EO pod does not exist", pod.isPresent(), is(true)); assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator", "1Gi", "500m", "384Mi", "25m"); assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator", "512M", "300m", "256M", "30m"); assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator", "-Xmx2G", "-Xms1024M", null); assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator", "-Xmx1G", "-Xms512M", null); String eoPod = eoPods.keySet().toArray()[0].toString(); kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> { if (!container.getName().equals("tls-sidecar")) { LOGGER.info("Check if -D java options are present in {}", container.getName()); String javaSystemProp = container.getEnv().stream().filter(envVar -> envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue(); String javaOpts = container.getEnv().stream().filter(envVar -> envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue(); assertThat(javaSystemProp, is("-Djavax.net.debug=verbose")); if (container.getName().equals("topic-operator")) { assertThat(javaOpts, is("-Xms1024M -Xmx2G")); } if (container.getName().equals("user-operator")) { assertThat(javaOpts, is("-Xms512M -Xmx1G")); } } }); LOGGER.info("Checking no rolling update for Kafka cluster"); StatefulSetUtils.waitForNoRollingUpdate(namespaceName, zkStsName, zkPods); StatefulSetUtils.waitForNoRollingUpdate(namespaceName, kafkaStsName, kafkaPods); DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods); } @ParallelNamespaceTest void testForTopicOperator(ExtensionContext extensionContext) throws InterruptedException { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); final String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); final String cliTopicName = "topic-from-cli"; //Creating topics for testing resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build()); KafkaTopicUtils.waitForKafkaTopicReady(namespaceName, topicName); assertThat(KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicName).get().getMetadata().getName(), is(topicName)); assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItem(topicName)); KafkaCmdClient.createTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName, 1, 1); assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItems(topicName, cliTopicName)); assertThat(cmdKubeClient(namespaceName).list(KafkaTopic.RESOURCE_KIND), hasItems(cliTopicName, topicName)); //Updating first topic using pod CLI KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespaceName, clusterName, 0, topicName, 2); KafkaUtils.waitForKafkaReady(namespaceName, clusterName); assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, topicName), hasItems("PartitionCount:2")); KafkaTopic testTopic = fromYamlString(cmdKubeClient().get(KafkaTopic.RESOURCE_KIND, topicName), KafkaTopic.class); assertThat(testTopic, is(CoreMatchers.notNullValue())); assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue())); assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2))); //Updating second topic via KafkaTopic update KafkaTopicResource.replaceTopicResourceInSpecificNamespace(cliTopicName, topic -> topic.getSpec().setPartitions(2), namespaceName); KafkaUtils.waitForKafkaReady(namespaceName, clusterName); assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName), hasItems("PartitionCount:2")); testTopic = fromYamlString(cmdKubeClient(namespaceName).get(KafkaTopic.RESOURCE_KIND, cliTopicName), KafkaTopic.class); assertThat(testTopic, is(CoreMatchers.notNullValue())); assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue())); assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2))); //Deleting first topic by deletion of CM cmdKubeClient(namespaceName).deleteByName(KafkaTopic.RESOURCE_KIND, cliTopicName); //Deleting another topic using pod CLI KafkaCmdClient.deleteTopicUsingPodCli(namespaceName, clusterName, 0, topicName); KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, topicName); //Checking all topics were deleted Thread.sleep(Constants.TIMEOUT_TEARDOWN); List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0); assertThat(topics, not(hasItems(topicName))); assertThat(topics, not(hasItems(cliTopicName))); } @ParallelNamespaceTest void testRemoveTopicOperatorFromEntityOperator(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Deploying Kafka cluster {}", clusterName); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)) .get(0).getMetadata().getName(); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(null), namespaceName); //Waiting when EO pod will be recreated without TO PodUtils.deletePodWithWait(namespaceName, eoPodName); DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2); //Checking that TO was removed kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), not(containsString("topic-operator"))); }); }); eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)) .get(0).getMetadata().getName(); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()), namespaceName); //Waiting when EO pod will be recreated with TO PodUtils.deletePodWithWait(namespaceName, eoPodName); DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); //Checking that TO was created kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), anyOf( containsString("topic-operator"), containsString("user-operator"), containsString("tls-sidecar")) ); }); }); } @ParallelNamespaceTest void testRemoveUserOperatorFromEntityOperator(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Deploying Kafka cluster {}", clusterName); String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)) .get(0).getMetadata().getName(); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(null), namespaceName); //Waiting when EO pod will be recreated without UO PodUtils.deletePodWithWait(namespaceName, eoPodName); DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2); //Checking that UO was removed kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), not(containsString("user-operator"))); }); }); eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)) .get(0).getMetadata().getName(); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()), namespaceName); //Waiting when EO pod will be recreated with UO PodUtils.deletePodWithWait(namespaceName, eoPodName); DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); //Checking that UO was created kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), anyOf( containsString("topic-operator"), containsString("user-operator"), containsString("tls-sidecar")) ); }); }); timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId)); } @ParallelNamespaceTest void testRemoveUserAndTopicOperatorsFromEntityOperator(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); // TODO issue #4152 - temporarily disabled for Namespace RBAC scoped assumeFalse(Environment.isNamespaceRbacScope()); LOGGER.info("Deploying Kafka cluster {}", clusterName); String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> { k.getSpec().getEntityOperator().setTopicOperator(null); k.getSpec().getEntityOperator().setUserOperator(null); }, namespaceName); PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, eoDeploymentName, 0); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> { k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()); k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()); }, namespaceName); DeploymentUtils.waitForDeploymentReady(namespaceName, eoDeploymentName); //Checking that EO was created kubeClient().listPodsByPrefixInName(namespaceName, eoDeploymentName).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), anyOf( containsString("topic-operator"), containsString("user-operator"), containsString("tls-sidecar")) ); }); }); timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId)); } @ParallelNamespaceTest void testEntityOperatorWithoutTopicOperator(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Deploying Kafka cluster without TO in EO"); String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3) .editSpec() .withNewEntityOperator() .withNewUserOperator() .endUserOperator() .endEntityOperator() .endSpec() .build()); timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId)); //Checking that TO was not deployed kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), not(containsString("topic-operator"))); }); }); } @ParallelNamespaceTest void testEntityOperatorWithoutUserOperator(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Deploying Kafka cluster without UO in EO"); String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3) .editSpec() .withNewEntityOperator() .withNewTopicOperator() .endTopicOperator() .endEntityOperator() .endSpec() .build()); timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId)); //Checking that UO was not deployed kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> { pod.getSpec().getContainers().forEach(container -> { assertThat(container.getName(), not(containsString("user-operator"))); }); }); } @ParallelNamespaceTest void testEntityOperatorWithoutUserAndTopicOperators(ExtensionContext extensionContext) { String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Deploying Kafka cluster without UO and TO in EO"); String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3) .editSpec() .withNewEntityOperator() .endEntityOperator() .endSpec() .build()); timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName()); assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId)); //Checking that EO was not deployed assertThat("EO should not be deployed", kubeClient().listPodsByPrefixInName(KafkaResources.entityOperatorDeploymentName(clusterName)).size(), is(0)); } @ParallelNamespaceTest void testTopicWithoutLabels(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); // Negative scenario: creating topic without any labels and make sure that TO can't handle this topic resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); // Creating topic without any label resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, "topic-without-labels", 1, 1, 1) .editMetadata() .withLabels(null) .endMetadata() .build()); // Checking that resource was created assertThat(cmdKubeClient(namespaceName).list("kafkatopic"), hasItems("topic-without-labels")); // Checking that TO didn't handle new topic and zk pods don't contain new topic assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), not(hasItems("topic-without-labels"))); // Checking TO logs String tOPodName = cmdKubeClient(namespaceName).listResourcesByLabel("pod", Labels.STRIMZI_NAME_LABEL + "=" + clusterName + "-entity-operator").get(0); String tOlogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, tOPodName, "topic-operator"); assertThat(tOlogs, not(containsString("Created topic 'topic-without-labels'"))); //Deleting topic cmdKubeClient(namespaceName).deleteByName("kafkatopic", "topic-without-labels"); KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, "topic-without-labels"); //Checking all topics were deleted List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0); assertThat(topics, not(hasItems("topic-without-labels"))); } @ParallelNamespaceTest void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final int kafkaReplicas = 2; final String diskSizeGi = "10"; JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes( new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build(); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build()); // kafka cluster already deployed verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi); final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size(); LOGGER.info("Deleting cluster"); cmdKubeClient(namespaceName).deleteByName("kafka", clusterName); LOGGER.info("Waiting for PVC deletion"); PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName); } @ParallelNamespaceTest void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final int kafkaReplicas = 2; final String diskSizeGi = "10"; JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes( new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build(); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build()); // kafka cluster already deployed verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi); final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size(); LOGGER.info("Deleting cluster"); cmdKubeClient(namespaceName).deleteByName("kafka", clusterName); LOGGER.info("Waiting for PVC deletion"); PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName); } @ParallelNamespaceTest void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final int kafkaReplicas = 2; final String diskSizeGi = "10"; JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes( new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build(); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build()); // kafka cluster already deployed verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi); int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size(); LOGGER.info("Deleting cluster"); cmdKubeClient(namespaceName).deleteByName("kafka", clusterName); LOGGER.info("Waiting for PVC deletion"); PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) void testPersistentStorageSize(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName()); final String[] diskSizes = {"70Gi", "20Gi"}; final int kafkaRepl = 2; final int diskCount = 2; JbodStorage jbodStorage = new JbodStorageBuilder() .withVolumes( new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(), new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build() ).build(); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl) .editSpec() .editKafka() .withStorage(jbodStorage) .endKafka() .editZookeeper(). withReplicas(1) .endZookeeper() .endSpec() .build()); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter( persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList()); checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount); String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName); internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); } @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); LOGGER.info("Creating kafka without external listener"); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build()); final String brokerSecret = clusterName + "-kafka-brokers"; Secret secretsWithoutExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret); LOGGER.info("Editing kafka with external listener"); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { List<GenericKafkaListener> lst = asList( new GenericKafkaListenerBuilder() .withName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .withPort(9092) .withType(KafkaListenerType.INTERNAL) .withTls(false) .build(), new GenericKafkaListenerBuilder() .withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME) .withPort(9094) .withType(KafkaListenerType.LOADBALANCER) .withTls(true) .withNewConfiguration() .withFinalizers(LB_FINALIZERS) .endConfiguration() .build() ); kafka.getSpec().getKafka().setListeners(lst); }, namespaceName); StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName))); Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret); LOGGER.info("Checking secrets"); kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> { String kafkaPodName = kafkaPod.getMetadata().getName(); assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt")))); assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key")))); }); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName()); Map<String, String> labels = new HashMap<>(); final String[] labelKeys = {"label-name-1", "label-name-2", ""}; final String[] labelValues = {"name-of-the-label-1", "name-of-the-label-2", ""}; labels.put(labelKeys[0], labelValues[0]); labels.put(labelKeys[1], labelValues[1]); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1) .editMetadata() .withLabels(labels) .endMetadata() .build()); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)); LOGGER.info("Waiting for kafka stateful set labels changed {}", labels); StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels); LOGGER.info("Getting labels from stateful set resource"); StatefulSet statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); LOGGER.info("Verifying default labels in the Kafka CR"); assertThat("Label exists in stateful set with concrete value", labelValues[0].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[0]))); assertThat("Label exists in stateful set with concrete value", labelValues[1].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[1]))); labelValues[0] = "new-name-of-the-label-1"; labelValues[1] = "new-name-of-the-label-2"; labelKeys[2] = "label-name-3"; labelValues[2] = "name-of-the-label-3"; LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}", "name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]); LOGGER.info("Edit kafka labels in Kafka CR"); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> { resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]); resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]); resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]); }, namespaceName); labels.put(labelKeys[0], labelValues[0]); labels.put(labelKeys[1], labelValues[1]); labels.put(labelKeys[2], labelValues[2]); LOGGER.info("Waiting for kafka service labels changed {}", labels); ServiceUtils.waitForServiceLabelsChange(namespaceName, KafkaResources.brokersServiceName(clusterName), labels); LOGGER.info("Verifying kafka labels via services"); Service service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName)); verifyPresentLabels(labels, service); LOGGER.info("Waiting for kafka config map labels changed {}", labels); ConfigMapUtils.waitForConfigMapLabelsChange(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labels); LOGGER.info("Verifying kafka labels via config maps"); ConfigMap configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)); verifyPresentLabels(labels, configMap); LOGGER.info("Waiting for kafka stateful set labels changed {}", labels); StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels); LOGGER.info("Verifying kafka labels via stateful set"); statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); verifyPresentLabels(labels, statefulSet); StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods); LOGGER.info("Verifying via kafka pods"); labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels(); assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0]))); assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1]))); assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2]))); LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]), labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2])); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> { resource.getMetadata().getLabels().remove(labelKeys[0]); resource.getMetadata().getLabels().remove(labelKeys[1]); resource.getMetadata().getLabels().remove(labelKeys[2]); }, namespaceName); labels.remove(labelKeys[0]); labels.remove(labelKeys[1]); labels.remove(labelKeys[2]); LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString()); ServiceUtils.waitForServiceLabelsDeletion(namespaceName, KafkaResources.brokersServiceName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]); LOGGER.info("Verifying kafka labels via services"); service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName)); verifyNullLabels(labelKeys, service); LOGGER.info("Verifying kafka labels via config maps"); ConfigMapUtils.waitForConfigMapLabelsDeletion(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]); configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)); verifyNullLabels(labelKeys, configMap); LOGGER.info("Waiting for kafka stateful set labels changed {}", labels); String statefulSetName = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getName(); StatefulSetUtils.waitForStatefulSetLabelsDeletion(namespaceName, statefulSetName, labelKeys[0], labelKeys[1], labelKeys[2]); statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); LOGGER.info("Verifying kafka labels via stateful set"); verifyNullLabels(labelKeys, statefulSet); StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods); LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString()); PodUtils.waitUntilPodLabelsDeletion(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), labelKeys[0], labelKeys[1], labelKeys[2]); labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels(); LOGGER.info("Verifying via kafka pods"); verifyNullLabels(labelKeys, labels); internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) void testAppDomainLabels(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build()); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); Map<String, String> labels; LOGGER.info("---> PODS <---"); List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream() .filter(pod -> pod.getMetadata().getName().startsWith(clusterName)) .filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS)) .collect(Collectors.toList()); for (Pod pod : pods) { LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName()); verifyAppLabels(pod.getMetadata().getLabels()); } LOGGER.info("---> STATEFUL SETS <---"); LOGGER.info("Getting labels from stateful set of kafka resource"); labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels(); verifyAppLabels(labels); LOGGER.info("Getting labels from stateful set of zookeeper resource"); labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).getMetadata().getLabels(); verifyAppLabels(labels); LOGGER.info("---> SERVICES <---"); List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream() .filter(service -> service.getMetadata().getName().startsWith(clusterName)) .collect(Collectors.toList()); for (Service service : services) { LOGGER.info("Getting labels from {} service", service.getMetadata().getName()); verifyAppLabels(service.getMetadata().getLabels()); } LOGGER.info("---> SECRETS <---"); List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream() .filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque")) .collect(Collectors.toList()); for (Secret secret : secrets) { LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName()); verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels()); } LOGGER.info("---> CONFIG MAPS <---"); List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName); for (ConfigMap configMap : configMaps) { LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName()); verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels()); } internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); } @ParallelNamespaceTest void testUOListeningOnlyUsersInSameCluster(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String userName = mapWithTestUsers.get(extensionContext.getDisplayName()); final String firstClusterName = "my-cluster-1"; final String secondClusterName = "my-cluster-2"; resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(firstClusterName, 3, 1).build()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3, 1).build()); resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(firstClusterName, userName).build()); LOGGER.info("Verifying that user {} in cluster {} is created", userName, firstClusterName); String entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(firstClusterName)).get(0); String uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator"); assertThat(uOLogs, containsString("User " + userName + " in namespace " + namespaceName + " was ADDED")); LOGGER.info("Verifying that user {} in cluster {} is not created", userName, secondClusterName); entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(secondClusterName)).get(0); uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator"); assertThat(uOLogs, not(containsString("User " + userName + " in namespace " + namespaceName + " was ADDED"))); LOGGER.info("Verifying that user belongs to {} cluster", firstClusterName); String kafkaUserResource = cmdKubeClient(namespaceName).getResourceAsYaml("kafkauser", userName); assertThat(kafkaUserResource, containsString(Labels.STRIMZI_CLUSTER_LABEL + ": " + firstClusterName)); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) void testMessagesAreStoredInDisk(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).build()); Map<String, String> kafkaPodsSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 1, 1).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); TestUtils.waitFor("KafkaTopic creation inside kafka pod", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(topicName)); String topicDirNameInPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + topicName + "/p'").out(); String commandToGetDataFromTopic = "cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log"; LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0)); String topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", commandToGetDataFromTopic).out(); LOGGER.info("Topic {} is present in kafka broker {} with no data", topicName, KafkaResources.kafkaPodName(clusterName, 0)); assertThat("Topic contains data", topicData, emptyOrNullString()); internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0)); topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", commandToGetDataFromTopic).out(); assertThat("Topic has no data", topicData, notNullValue()); List<Pod> kafkaPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); for (Pod kafkaPod : kafkaPods) { LOGGER.info("Deleting kafka pod {}", kafkaPod.getMetadata().getName()); kubeClient(namespaceName).deletePod(namespaceName, kafkaPod); } LOGGER.info("Wait for kafka to rolling restart ..."); StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 1, kafkaPodsSnapshot); LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0)); topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", commandToGetDataFromTopic).out(); assertThat("Topic has no data", topicData, notNullValue()); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) void testConsumerOffsetFiles(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName()); final Map<String, Object> kafkaConfig = new HashMap<>(); kafkaConfig.put("offsets.topic.replication.factor", "3"); kafkaConfig.put("offsets.topic.num.partitions", "100"); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1) .editSpec() .editKafka() .withConfig(kafkaConfig) .endKafka() .endSpec() .build()); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 1).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/;" + "ls -1 | sed -n \"s#__consumer_offsets-\\([0-9]*\\)#\\1#p\" | sort -V"; LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0)); String result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", commandToGetFiles).out(); // TODO / FIXME //assertThat("Folder kafka-log0 has data in files:\n" + result, result.equals("")); LOGGER.info("Result: \n" + result); internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0)); result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c", commandToGetFiles).out(); StringBuilder stringToMatch = new StringBuilder(); for (int i = 0; i < 100; i++) { stringToMatch.append(i).append("\n"); } assertThat("Folder kafka-log0 doesn't contain 100 files", result, containsString(stringToMatch.toString())); } @ParallelNamespaceTest void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String labelAnnotationKey = "testKey"; final String firstValue = "testValue"; final String changedValue = "editedTestValue"; Map<String, String> pvcLabel = new HashMap<>(); pvcLabel.put(labelAnnotationKey, firstValue); Map<String, String> pvcAnnotation = pvcLabel; Map<String, String> statefulSetLabels = new HashMap<>(); statefulSetLabels.put("app.kubernetes.io/part-of", "some-app"); statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app"); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1) .editSpec() .editKafka() .withNewTemplate() .withNewStatefulset() .withNewMetadata() .withLabels(statefulSetLabels) .endMetadata() .endStatefulset() .withNewPersistentVolumeClaim() .withNewMetadata() .addToLabels(pvcLabel) .addToAnnotations(pvcAnnotation) .endMetadata() .endPersistentVolumeClaim() .endTemplate() .withStorage(new JbodStorageBuilder().withVolumes( new PersistentClaimStorageBuilder() .withDeleteClaim(false) .withId(0) .withSize("20Gi") .build(), new PersistentClaimStorageBuilder() .withDeleteClaim(true) .withId(1) .withSize("10Gi") .build()) .build()) .endKafka() .editZookeeper() .withNewTemplate() .withNewPersistentVolumeClaim() .withNewMetadata() .addToLabels(pvcLabel) .addToAnnotations(pvcAnnotation) .endMetadata() .endPersistentVolumeClaim() .endTemplate() .withNewPersistentClaimStorage() .withDeleteClaim(false) .withId(0) .withSize("3Gi") .endPersistentClaimStorage() .endZookeeper() .endSpec() .build()); LOGGER.info("Check if Kubernetes labels are applied"); Map<String, String> actualStatefulSetLabels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels(); assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app")); assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app")); LOGGER.info("Kubernetes labels are correctly set and present"); List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter( persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList()); assertThat(pvcs.size(), is(7)); for (PersistentVolumeClaim pvc : pvcs) { LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey)); assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey))); assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey))); } pvcLabel.put(labelAnnotationKey, changedValue); pvcAnnotation.put(labelAnnotationKey, changedValue); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue); kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel); kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation); kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel); kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation); }, namespaceName); PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey); PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey); KafkaUtils.waitForKafkaReady(namespaceName, clusterName); pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter( persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList()); LOGGER.info(pvcs.toString()); assertThat(pvcs.size(), is(7)); for (PersistentVolumeClaim pvc : pvcs) { LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey)); assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue)); assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue)); } } @ParallelNamespaceTest void testKafkaOffsetsReplicationFactorHigherThanReplicas(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1) .editSpec() .editKafka() .addToConfig("offsets.topic.replication.factor", 4) .addToConfig("transaction.state.log.min.isr", 4) .addToConfig("transaction.state.log.replication.factor", 4) .endKafka() .endSpec().build()); KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, namespaceName, "Kafka configuration option .* should be set to " + 3 + " or less because 'spec.kafka.replicas' is " + 3); } @ParallelNamespaceTest @Tag(INTERNAL_CLIENTS_USED) @Tag(CRUISE_CONTROL) void testReadOnlyRootFileSystem(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName()); final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName()); resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3) .editSpec() .editKafka() .withNewTemplate() .withNewKafkaContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endKafkaContainer() .endTemplate() .endKafka() .editZookeeper() .withNewTemplate() .withNewZookeeperContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endZookeeperContainer() .endTemplate() .endZookeeper() .editEntityOperator() .withNewTemplate() .withNewTlsSidecarContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endTlsSidecarContainer() .withNewTopicOperatorContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endTopicOperatorContainer() .withNewUserOperatorContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endUserOperatorContainer() .endTemplate() .endEntityOperator() .editOrNewKafkaExporter() .withNewTemplate() .withNewContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endContainer() .endTemplate() .endKafkaExporter() .editOrNewCruiseControl() .withNewTemplate() .withNewTlsSidecarContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endTlsSidecarContainer() .withNewCruiseControlContainer() .withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build()) .endCruiseControlContainer() .endTemplate() .endCruiseControl() .endSpec() .build()); KafkaUtils.waitForKafkaReady(namespaceName, clusterName); resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build()); resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build()); final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName(); InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder() .withUsingPodName(kafkaClientsPodName) .withTopicName(topicName) .withNamespaceName(namespaceName) .withClusterName(clusterName) .withMessageCount(MESSAGE_COUNT) .withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME) .build(); LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName); internalKafkaClient.checkProducedAndConsumedMessages( internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain() ); } protected void checkKafkaConfiguration(String namespaceName, String podNamePrefix, Map<String, Object> config, String clusterName) { LOGGER.info("Checking kafka configuration"); List<Pod> pods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, podNamePrefix); Properties properties = configMap2Properties(kubeClient(namespaceName).getConfigMap(namespaceName, clusterName + "-kafka-config")); for (Map.Entry<String, Object> property : config.entrySet()) { String key = property.getKey(); Object val = property.getValue(); assertThat(properties.keySet().contains(key), is(true)); assertThat(properties.getProperty(key), is(val)); } for (Pod pod: pods) { ExecResult result = cmdKubeClient(namespaceName).execInPod(pod.getMetadata().getName(), "/bin/bash", "-c", "cat /tmp/strimzi.properties"); Properties execProperties = stringToProperties(result.out()); for (Map.Entry<String, Object> property : config.entrySet()) { String key = property.getKey(); Object val = property.getValue(); assertThat(execProperties.keySet().contains(key), is(true)); assertThat(execProperties.getProperty(key), is(val)); } } } void checkStorageSizeForVolumes(List<PersistentVolumeClaim> volumes, String[] diskSizes, int kafkaRepl, int diskCount) { int k = 0; for (int i = 0; i < kafkaRepl; i++) { for (int j = 0; j < diskCount; j++) { LOGGER.info("Checking volume {} and size of storage {}", volumes.get(k).getMetadata().getName(), volumes.get(k).getSpec().getResources().getRequests().get("storage")); assertThat(volumes.get(k).getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizes[i]))); k++; } } } void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) { ArrayList<String> pvcs = new ArrayList<>(); kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream() .filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka")) .forEach(volume -> { String volumeName = volume.getMetadata().getName(); pvcs.add(volumeName); LOGGER.info("Checking labels for volume:" + volumeName); assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName)); assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND)); assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka"))); assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi"))); }); LOGGER.info("Checking PVC names included in JBOD array"); for (int i = 0; i < kafkaReplicas; i++) { for (int j = 0; j < diskCountPerReplica; j++) { assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true)); } } LOGGER.info("Checking PVC on Kafka pods"); for (int i = 0; i < kafkaReplicas; i++) { ArrayList<String> dataSourcesOnPod = new ArrayList<>(); ArrayList<String> pvcsOnPod = new ArrayList<>(); LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i); for (int j = 0; j < diskCountPerReplica; j++) { dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i)) .getSpec().getVolumes().get(j).getName()); pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i)) .getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName()); } LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i); for (int j = 0; j < diskCountPerReplica; j++) { assertThat(dataSourcesOnPod.contains("data-" + j), is(true)); assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true)); } } } void verifyPresentLabels(Map<String, String> labels, HasMetadata resources) { for (Map.Entry<String, String> label : labels.entrySet()) { assertThat("Label exists with concrete value in HasMetadata(Services, CM, STS) resources", label.getValue().equals(resources.getMetadata().getLabels().get(label.getKey()))); } } void verifyNullLabels(String[] labelKeys, Map<String, String> labels) { for (String labelKey : labelKeys) { assertThat(labels.get(labelKey), nullValue()); } } void verifyNullLabels(String[] labelKeys, HasMetadata resources) { for (String labelKey : labelKeys) { assertThat(resources.getMetadata().getLabels().get(labelKey), nullValue()); } } void verifyAppLabels(Map<String, String> labels) { LOGGER.info("Verifying labels {}", labels); assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL)); assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL)); assertThat("Label " + Labels.STRIMZI_NAME_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_NAME_LABEL)); } void verifyAppLabelsForSecretsAndConfigMaps(Map<String, String> labels) { LOGGER.info("Verifying labels {}", labels); assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL)); assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL)); } @BeforeAll void setup(ExtensionContext extensionContext) { install = new SetupClusterOperator.SetupClusterOperatorBuilder() .withExtensionContext(extensionContext) .withNamespace(NAMESPACE) .withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES) .createInstallation() .runInstallation(); } protected void afterEachMayOverride(ExtensionContext extensionContext) throws Exception { resourceManager.deleteResources(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext); if (cluster.getListOfDeployedResources().contains(TEMPLATE_PATH)) { cluster.deleteCustomResources(extensionContext, TEMPLATE_PATH); } if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(OPENSHIFT_CLUSTER_NAME).get() != null) { cmdKubeClient(namespaceName).deleteByName(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME); } kubeClient(namespaceName).listPods(namespaceName).stream() .filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME)) .forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName())); kubeClient(namespaceName).getClient().customResources(CustomResourceDefinitionContext.fromCrd(Crds.kafkaTopic()), KafkaTopic.class, KafkaTopicList.class).inNamespace(namespaceName).delete(); kubeClient(namespaceName).getClient().persistentVolumeClaims().inNamespace(namespaceName).delete(); } }
scholzj/barnabas
systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java
Java
apache-2.0
107,527
package com.basicalgorithms.coding_games; import java.util.HashSet; import java.util.Objects; import java.util.Scanner; import java.util.Set; /** * Original question: https://www.codingame.com/multiplayer/bot-programming/coders-strike-back */ public class CodersStrikeBack { static double longestDist = Integer.MIN_VALUE; static Point initialPoint = null; static boolean hasFinishedOneLap; static Point from = null; static Point lastCheckpoint = null; static final Set<Point> visitedCheckPoints = new HashSet<>(); static boolean hasBoosted = false; public static void main(String args[]) { Scanner in = new Scanner(System.in); // game loop while (true) { int x = in.nextInt(); int y = in.nextInt(); int nextCheckpointX = in.nextInt(); // x position of the next check point int nextCheckpointY = in.nextInt(); // y position of the next check point int nextCheckpointDist = in.nextInt(); // distance to the next checkpoint int nextCheckpointAngle = in.nextInt(); // angle between your pod orientation and the direction of the next checkpoint int opponentX = in.nextInt(); int opponentY = in.nextInt(); // Write an action using System.out.println() // To debug: System.err.println("Debug messages..."); // You have to output the target position // followed by the power (0 <= thrust <= 100) // i.e.: "x y thrust" final Point nextCheckpoint = new Point(nextCheckpointX, nextCheckpointY); final Point currentPosition = new Point(x, y); final Point enemyPosition = new Point(opponentX, opponentY); if (visitedCheckPoints.size() > 1 && enemyInRange(currentPosition, enemyPosition)) { ramEnemyShip(currentPosition, enemyPosition); } else { cruise(currentPosition, nextCheckpoint, nextCheckpointAngle); } if (!nextCheckpoint.equals(lastCheckpoint)) { from = lastCheckpoint; } lastCheckpoint = nextCheckpoint; } } private static void ramEnemyShip(final Point currentPosition, final Point enemyPosition) { sailToDestination((enemyPosition.x), enemyPosition.y, "100"); } private static boolean enemyInRange(final Point currentPosition, final Point enemyPosition) { return getDistant(currentPosition, enemyPosition) <= 1000; } private static void cruise( final Point currentPosition, final Point nextCheckpoint, final int nextCheckpointAngle) { if (initialPoint == null) { initialPoint = currentPosition; } int thrust = isWithinAngle(nextCheckpointAngle) ? 100 : 0; String power = String.valueOf(thrust); visitedCheckPoints.add(nextCheckpoint); System.err.println( "Checkpoint added:" + " nextCheckpointX=" + nextCheckpoint.x + ", nextCheckpointY=" + nextCheckpoint.y); for (final Point visitedCheckPoint : visitedCheckPoints) { System.err.println("Visited checkpoint: (" + visitedCheckPoint.x + ", " + visitedCheckPoint.y + ")"); } if (shouldSlowDown(currentPosition, nextCheckpoint)) { power = String.valueOf(35); } if (hasFinishedOneLap(nextCheckpoint) && isLongestDistant(from, nextCheckpoint) && isWithinSharpAngle(nextCheckpointAngle) && !hasBoosted) { power = "BOOST"; hasBoosted = true; System.err.println("Boosted!!!"); } sailToDestination(nextCheckpoint.x, nextCheckpoint.y, power); } private static boolean shouldSlowDown( final Point currentPosition, final Point nextCheckpoint) { return getDistant(currentPosition, nextCheckpoint) < 1000; } private static void sailToDestination(final int nextCheckpointX, final int nextCheckpointY, final String power) { System.out.println(nextCheckpointX + " " + nextCheckpointY + " " + power); System.err.println("Thrust:" + power); } private static boolean isWithinAngle(final int nextCheckpointAngle) { return -90 < nextCheckpointAngle && nextCheckpointAngle < 90; } private static boolean isWithinSharpAngle(final int nextCheckpointAngle) { return -15 < nextCheckpointAngle && nextCheckpointAngle < 15; } private static boolean hasFinishedOneLap(final Point point) { if (hasFinishedOneLap) { return true; } if (initialPoint == null) { return false; } hasFinishedOneLap = getDistant(initialPoint, point) <= 600; return hasFinishedOneLap; } private static boolean isLongestDistant(final Point from, final Point endPoint) { if (from == null) { return false; } System.err.println("Start Point: (" + from.x + ", " + from.y + "); End Point: (" + endPoint.x + ", " + endPoint.y + ") "); double dist = getDistant(from, endPoint); System.err.println("dist=" + dist + ", longestDist=" + longestDist); if (dist >= longestDist) { longestDist = dist; return true; } return false; } private static double getDistant(final Point from, final Point endPoint) { return Math.sqrt(Math.pow(from.x - endPoint.x, 2) + Math.pow(from.y - endPoint.y, 2)); } private static class Point { final int x; final int y; private Point(final int t1, final int t2) { this.x = t1; this.y = t2; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (!(o instanceof Point)) { return false; } final Point point = (Point) o; return x == point.x && y == point.y; } @Override public int hashCode() { return Objects.hash(x, y); } } }
Ericliu001/basic-algorithms
src/test/java/com/basicalgorithms/coding_games/CodersStrikeBack.java
Java
apache-2.0
6,186
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.client; import com.google.inject.AbstractModule; import com.google.inject.Inject; import org.junit.Test; import static org.junit.Assert.assertEquals; /** * @author Simon Thoresen Hult */ public class ClientDriverTestCase { @Test public void requireThatApplicationInstanceInjectionWorks() throws Exception { MyModule module = new MyModule(); ClientDriver.runApplication(new MyApplication(module)); assertEquals(5, module.state); } @Test public void requireThatApplicationClassInjectionWorks() throws Exception { MyModule module = new MyModule(); ClientDriver.runApplication(MyApplication.class, module); assertEquals(5, module.state); } private static class MyApplication implements ClientApplication { final MyModule module; @Inject MyApplication(MyModule module) { this.module = module; module.state = 1; } @Override public void start() { if (++module.state != 2) { throw new IllegalStateException(); } } @Override public void run() { if (++module.state != 3) { throw new IllegalStateException(); } } @Override public void stop() { if (++module.state != 4) { throw new IllegalStateException(); } } @Override public void destroy() { if (++module.state != 5) { throw new IllegalStateException(); } } } private static class MyModule extends AbstractModule { int state = 0; @Override protected void configure() { bind(MyModule.class).toInstance(this); } } }
vespa-engine/vespa
jdisc_core/src/test/java/com/yahoo/jdisc/client/ClientDriverTestCase.java
Java
apache-2.0
1,946
package sample.multiversion; public interface Core { String getVersion(); String getDependencyVersion(); }
omacarena/only-short-poc
java.multiversion/v1/src/main/sample/multiversion/Core.java
Java
apache-2.0
117
package org.example; import org.camunda.bpm.spring.boot.starter.annotation.EnableProcessApplication; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @SpringBootApplication @EnableProcessApplication("dynamic-tenant-designation") public class CamundaApplication { public static void main(String... args) { SpringApplication.run(CamundaApplication.class, args); } }
camunda/camunda-consulting
snippets/dynamic-tenant-designation/src/main/java/org/example/CamundaApplication.java
Java
apache-2.0
445
package org.galaxy.myhttp; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
galaxy-captain/MyHttp
app/src/test/java/org/galaxy/myhttp/ExampleUnitTest.java
Java
apache-2.0
310
package org.commcare; import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorage; import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorageMock; import org.javarosa.core.services.storage.Persistable; /** * Delegator around CommCareApp allowing the test suite to override logic. * * @author Phillip Mates (pmates@dimagi.com). */ public class CommCareTestApp extends CommCareApp { private final CommCareApp app; public CommCareTestApp(CommCareApp app) { super(app.getAppRecord()); fileRoot = app.fileRoot; setAppResourceState(app.getAppResourceState()); this.app = app; } @Override public <T extends Persistable> UnencryptedHybridFileBackedSqlStorage<T> getFileBackedStorage(String name, Class<T> c) { return new UnencryptedHybridFileBackedSqlStorageMock<>(name, c, app.buildAndroidDbHelper(), app); } }
dimagi/commcare-android
app/unit-tests/src/org/commcare/CommCareTestApp.java
Java
apache-2.0
906
/** * Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.core.startree; import java.io.BufferedOutputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.joda.time.DateTime; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Objects; import com.google.common.collect.BiMap; import com.google.common.collect.HashBiMap; import com.linkedin.pinot.common.data.DimensionFieldSpec; import com.linkedin.pinot.common.data.MetricFieldSpec; import com.linkedin.pinot.common.data.FieldSpec.DataType; import com.linkedin.pinot.common.data.Schema; import com.linkedin.pinot.common.utils.Pairs.IntPair; import com.linkedin.pinot.core.data.GenericRow; import com.linkedin.pinot.core.segment.creator.impl.V1Constants; /** * Uses file to build the star tree. Each row is divided into dimension and metrics. Time is added to dimension list. * We use the split order to build the tree. In most cases, split order will be ranked depending on the cardinality (descending order). * Time column will be excluded or last entry in split order irrespective of its cardinality * This is a recursive algorithm where we branch on one dimension at every level. * * <b>Psuedo algo</b> * <code> * * build(){ * let table(1,N) consists of N input rows * table.sort(1,N) //sort the table on all dimensions, according to split order * constructTree(table, 0, N, 0); * } * constructTree(table,start,end, level){ * splitDimensionName = dimensionsSplitOrder[level] * groupByResult<dimName, length> = table.groupBy(dimensionsSplitOrder[level]); //returns the number of rows for each value in splitDimension * int rangeStart = 0; * for each ( entry<dimName,length> groupByResult){ * if(entry.length > minThreshold){ * constructTree(table, rangeStart, rangeStart + entry.length, level +1); * } * rangeStart = rangeStart + entry.length; * updateStarTree() //add new child * } * * //create a star tree node * * aggregatedRows = table.uniqueAfterRemovingAttributeAndAggregateMetrics(start,end, splitDimensionName); * for(each row in aggregatedRows_ * table.add(row); * if(aggregateRows.size > minThreshold) { * table.sort(end, end + aggregatedRows.size); * constructStarTree(table, end, end + aggregatedRows.size, level +1); * } * } * </code> */ public class OffHeapStarTreeBuilder implements StarTreeBuilder { private static final Logger LOG = LoggerFactory.getLogger(OffHeapStarTreeBuilder.class); File dataFile; private DataOutputStream dataBuffer; int rawRecordCount = 0; int aggRecordCount = 0; private List<String> dimensionsSplitOrder; private Set<String> skipStarNodeCreationForDimensions; private Set<String> skipMaterializationForDimensions; private int maxLeafRecords; private StarTree starTree; private StarTreeIndexNode starTreeRootIndexNode; private int numDimensions; private int numMetrics; private List<String> dimensionNames; private List<String> metricNames; private String timeColumnName; private List<DataType> dimensionTypes; private List<DataType> metricTypes; private Map<String, Object> dimensionNameToStarValueMap; private HashBiMap<String, Integer> dimensionNameToIndexMap; private Map<String, Integer> metricNameToIndexMap; private int dimensionSizeBytes; private int metricSizeBytes; private File outDir; private Map<String, HashBiMap<Object, Integer>> dictionaryMap; boolean debugMode = false; private int[] sortOrder; private int skipMaterializationCardinalityThreshold; public void init(StarTreeBuilderConfig builderConfig) throws Exception { Schema schema = builderConfig.schema; timeColumnName = schema.getTimeColumnName(); this.dimensionsSplitOrder = builderConfig.dimensionsSplitOrder; skipStarNodeCreationForDimensions = builderConfig.getSkipStarNodeCreationForDimensions(); skipMaterializationForDimensions = builderConfig.getSkipMaterializationForDimensions(); skipMaterializationCardinalityThreshold = builderConfig.getSkipMaterializationCardinalityThreshold(); this.maxLeafRecords = builderConfig.maxLeafRecords; this.outDir = builderConfig.getOutDir(); if (outDir == null) { outDir = new File(System.getProperty("java.io.tmpdir"), V1Constants.STAR_TREE_INDEX_DIR + "_" + DateTime.now()); } LOG.debug("Index output directory:{}", outDir); dimensionTypes = new ArrayList<>(); dimensionNames = new ArrayList<>(); dimensionNameToIndexMap = HashBiMap.create(); dimensionNameToStarValueMap = new HashMap<>(); dictionaryMap = new HashMap<>(); //READ DIMENSIONS COLUMNS List<DimensionFieldSpec> dimensionFieldSpecs = schema.getDimensionFieldSpecs(); for (int index = 0; index < dimensionFieldSpecs.size(); index++) { DimensionFieldSpec spec = dimensionFieldSpecs.get(index); String dimensionName = spec.getName(); dimensionNames.add(dimensionName); dimensionNameToIndexMap.put(dimensionName, index); Object starValue; starValue = getAllStarValue(spec); dimensionNameToStarValueMap.put(dimensionName, starValue); dimensionTypes.add(spec.getDataType()); HashBiMap<Object, Integer> dictionary = HashBiMap.create(); dictionaryMap.put(dimensionName, dictionary); } //treat time column as just another dimension, only difference is that we will never split on this dimension unless explicitly specified in split order if (timeColumnName != null) { dimensionNames.add(timeColumnName); dimensionTypes.add(schema.getTimeFieldSpec().getDataType()); int index = dimensionNameToIndexMap.size(); dimensionNameToIndexMap.put(timeColumnName, index); HashBiMap<Object, Integer> dictionary = HashBiMap.create(); dictionaryMap.put(schema.getTimeColumnName(), dictionary); } dimensionSizeBytes = dimensionNames.size() * Integer.SIZE / 8; this.numDimensions = dimensionNames.size(); //READ METRIC COLUMNS this.metricTypes = new ArrayList<>(); this.metricNames = new ArrayList<>(); this.metricNameToIndexMap = new HashMap<>(); this.metricSizeBytes = 0; List<MetricFieldSpec> metricFieldSpecs = schema.getMetricFieldSpecs(); for (int index = 0; index < metricFieldSpecs.size(); index++) { MetricFieldSpec spec = metricFieldSpecs.get(index); String metricName = spec.getName(); metricNames.add(metricName); metricNameToIndexMap.put(metricName, index); DataType dataType = spec.getDataType(); metricTypes.add(dataType); metricSizeBytes += dataType.size(); } this.numMetrics = metricNames.size(); builderConfig.getOutDir().mkdirs(); dataFile = new File(outDir, "star-tree.buf"); dataBuffer = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile))); //INITIALIZE THE ROOT NODE this.starTreeRootIndexNode = new StarTreeIndexNode(); this.starTreeRootIndexNode.setDimensionName(StarTreeIndexNode.all()); this.starTreeRootIndexNode.setDimensionValue(StarTreeIndexNode.all()); this.starTreeRootIndexNode.setLevel(0); LOG.debug("dimensionNames:{}", dimensionNames); LOG.debug("metricNames:{}", metricNames); } /** * Validate the split order by removing any dimensions that may be part of the skip materialization list. * @param dimensionsSplitOrder * @param skipMaterializationForDimensions * @return */ private List<String> sanitizeSplitOrder(List<String> dimensionsSplitOrder, Set<String> skipMaterializationForDimensions) { List<String> validatedSplitOrder = new ArrayList<String>(); for (String dimension : dimensionsSplitOrder) { if (skipMaterializationForDimensions == null || !skipMaterializationForDimensions.contains(dimension)) { LOG.info("Adding dimension {} to split order", dimension); validatedSplitOrder.add(dimension); } else { LOG.info( "Dimension {} cannot be part of 'dimensionSplitOrder' and 'skipMaterializationForDimensions', removing it from split order", dimension); } } return validatedSplitOrder; } private Object getAllStarValue(DimensionFieldSpec spec) throws Exception { switch (spec.getDataType()) { case STRING: return "ALL"; case BOOLEAN: case BYTE: case CHAR: case DOUBLE: case FLOAT: case INT: case LONG: return spec.getDefaultNullValue(); case OBJECT: case SHORT: case DOUBLE_ARRAY: case CHAR_ARRAY: case FLOAT_ARRAY: case INT_ARRAY: case LONG_ARRAY: case SHORT_ARRAY: case STRING_ARRAY: case BYTE_ARRAY: default: throw new Exception("Unsupported dimension data type" + spec); } } public GenericRow toGenericRow(DimensionBuffer dimensionKey, MetricBuffer metricsHolder) { GenericRow row = new GenericRow(); Map<String, Object> map = new HashMap<>(); for (int i = 0; i < dimensionNames.size(); i++) { String dimName = dimensionNames.get(i); BiMap<Integer, Object> inverseDictionary = dictionaryMap.get(dimName).inverse(); Object dimValue = inverseDictionary.get(dimensionKey.getDimension(i)); if (dimValue == null) { dimValue = dimensionNameToStarValueMap.get(dimName); } map.put(dimName, dimValue); } for (int i = 0; i < numMetrics; i++) { String metName = metricNames.get(i); map.put(metName, metricsHolder.get(i)); } row.init(map); return row; } public void append(GenericRow row) throws Exception { DimensionBuffer dimension = new DimensionBuffer(numDimensions); for (int i = 0; i < dimensionNames.size(); i++) { String dimName = dimensionNames.get(i); Map<Object, Integer> dictionary = dictionaryMap.get(dimName); Object dimValue = row.getValue(dimName); if (dimValue == null) { //TODO: Have another default value to represent STAR. Using default value to represent STAR as of now. //It does not matter during query execution, since we know that values is STAR from the star tree dimValue = dimensionNameToStarValueMap.get(dimName); } if (!dictionary.containsKey(dimValue)) { dictionary.put(dimValue, dictionary.size()); } dimension.setDimension(i, dictionary.get(dimValue)); } Number[] numbers = new Number[numMetrics]; for (int i = 0; i < numMetrics; i++) { String metName = metricNames.get(i); numbers[i] = (Number) row.getValue(metName); } MetricBuffer metrics = new MetricBuffer(numbers); append(dimension, metrics); } public void append(DimensionBuffer dimension, MetricBuffer metrics) throws Exception { appendToRawBuffer(dimension, metrics); } private void appendToRawBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException { appendToBuffer(dataBuffer, dimension, metrics); rawRecordCount++; } private void appendToAggBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException { appendToBuffer(dataBuffer, dimension, metrics); aggRecordCount++; } private void appendToBuffer(DataOutputStream dos, DimensionBuffer dimensions, MetricBuffer metricHolder) throws IOException { for (int i = 0; i < numDimensions; i++) { dos.writeInt(dimensions.getDimension(i)); } dos.write(metricHolder.toBytes(metricSizeBytes, metricTypes)); } public void build() throws Exception { if (skipMaterializationForDimensions == null || skipMaterializationForDimensions.isEmpty()) { skipMaterializationForDimensions = computeDefaultDimensionsToSkipMaterialization(); } if (dimensionsSplitOrder == null || dimensionsSplitOrder.isEmpty()) { dimensionsSplitOrder = computeDefaultSplitOrder(); } // Remove any dimensions from split order that would be not be materialized. dimensionsSplitOrder = sanitizeSplitOrder(dimensionsSplitOrder, skipMaterializationForDimensions); LOG.debug("Split order:{}", dimensionsSplitOrder); long start = System.currentTimeMillis(); dataBuffer.flush(); sort(dataFile, 0, rawRecordCount); constructStarTree(starTreeRootIndexNode, 0, rawRecordCount, 0, dataFile); long end = System.currentTimeMillis(); LOG.debug("Took {} ms to build star tree index. Original records:{} Materialized record:{}", (end - start), rawRecordCount, aggRecordCount); starTree = new StarTree(starTreeRootIndexNode, dimensionNameToIndexMap); File treeBinary = new File(outDir, "star-tree.bin"); LOG.debug("Saving tree binary at: {} ", treeBinary); starTree.writeTree(new BufferedOutputStream(new FileOutputStream(treeBinary))); printTree(starTreeRootIndexNode, 0); LOG.debug("Finished build tree. out dir: {} ", outDir); dataBuffer.close(); } private void printTree(StarTreeIndexNode node, int level) { for (int i = 0; i < level; i++) { LOG.debug(" "); } BiMap<Integer, String> inverse = dimensionNameToIndexMap.inverse(); String dimName = "ALL"; Object dimValue = "ALL"; if (node.getDimensionName() != StarTreeIndexNode.all()) { dimName = inverse.get(node.getDimensionName()); } if (node.getDimensionValue() != StarTreeIndexNode.all()) { dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue()); } String formattedOutput = Objects.toStringHelper(node).add("nodeId", node.getNodeId()).add("level", level).add("dimensionName", dimName) .add("dimensionValue", dimValue).add("childDimensionName", inverse.get(node.getChildDimensionName())) .add("childCount", node.getChildren() == null ? 0 : node.getChildren().size()) .add("startDocumentId", node.getStartDocumentId()).add("endDocumentId", node.getEndDocumentId()) .add("documentCount", (node.getEndDocumentId() - node.getStartDocumentId())).toString(); LOG.debug(formattedOutput); if (!node.isLeaf()) { for (StarTreeIndexNode child : node.getChildren().values()) { printTree(child, level + 1); } } } private List<String> computeDefaultSplitOrder() { ArrayList<String> defaultSplitOrder = new ArrayList<>(); //include only the dimensions not time column. Also, assumes that skipMaterializationForDimensions is built. for (String dimensionName : dimensionNames) { if (skipMaterializationForDimensions != null && !skipMaterializationForDimensions.contains(dimensionName)) { defaultSplitOrder.add(dimensionName); } } if (timeColumnName != null) { defaultSplitOrder.remove(timeColumnName); } Collections.sort(defaultSplitOrder, new Comparator<String>() { @Override public int compare(String o1, String o2) { return dictionaryMap.get(o2).size() - dictionaryMap.get(o1).size(); //descending } }); return defaultSplitOrder; } private Set<String> computeDefaultDimensionsToSkipMaterialization() { Set<String> skipDimensions = new HashSet<String>(); for (String dimensionName : dimensionNames) { if (dictionaryMap.get(dimensionName).size() > skipMaterializationCardinalityThreshold) { skipDimensions.add(dimensionName); } } return skipDimensions; } /* * Sorts the file on all dimensions */ private void sort(File file, int startDocId, int endDocId) throws IOException { if (debugMode) { LOG.info("BEFORE SORTING"); printFile(file, startDocId, endDocId); } StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); dataSorter.sort(startDocId, endDocId, 0, dimensionSizeBytes); if (debugMode) { LOG.info("AFTER SORTING"); printFile(file, startDocId, endDocId); } } private int[] getSortOrder() { if (sortOrder == null) { sortOrder = new int[dimensionNames.size()]; for (int i = 0; i < dimensionsSplitOrder.size(); i++) { sortOrder[i] = dimensionNameToIndexMap.get(dimensionsSplitOrder.get(i)); } //add remaining dimensions that were not part of dimensionsSplitOrder int counter = 0; for (String dimName : dimensionNames) { if (!dimensionsSplitOrder.contains(dimName)) { sortOrder[dimensionsSplitOrder.size() + counter] = dimensionNameToIndexMap.get(dimName); counter = counter + 1; } } } return sortOrder; } private void printFile(File file, int startDocId, int endDocId) throws IOException { LOG.info("Contents of file:{} from:{} to:{}", file.getName(), startDocId, endDocId); StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId); int numRecordsToPrint = 100; int counter = 0; while (iterator.hasNext()) { Pair<byte[], byte[]> next = iterator.next(); LOG.info("{}, {}", DimensionBuffer.fromBytes(next.getLeft()), MetricBuffer.fromBytes(next.getRight(), metricTypes)); if (counter++ == numRecordsToPrint) { break; } } } private int constructStarTree(StarTreeIndexNode node, int startDocId, int endDocId, int level, File file) throws Exception { //node.setStartDocumentId(startDocId); int docsAdded = 0; if (level == dimensionsSplitOrder.size() - 1) { return 0; } String splitDimensionName = dimensionsSplitOrder.get(level); Integer splitDimensionId = dimensionNameToIndexMap.get(splitDimensionName); LOG.debug("Building tree at level:{} using file:{} from startDoc:{} endDocId:{} splitting on dimension:{}", level, file.getName(), startDocId, endDocId, splitDimensionName); Map<Integer, IntPair> sortGroupBy = groupBy(startDocId, endDocId, splitDimensionId, file); LOG.debug("Group stats:{}", sortGroupBy); node.setChildDimensionName(splitDimensionId); node.setChildren(new HashMap<Integer, StarTreeIndexNode>()); for (int childDimensionValue : sortGroupBy.keySet()) { StarTreeIndexNode child = new StarTreeIndexNode(); child.setDimensionName(splitDimensionId); child.setDimensionValue(childDimensionValue); child.setParent(node); child.setLevel(node.getLevel() + 1); // n.b. We will number the nodes later using BFS after fully split // Add child to parent node.getChildren().put(childDimensionValue, child); int childDocs = 0; IntPair range = sortGroupBy.get(childDimensionValue); if (range.getRight() - range.getLeft() > maxLeafRecords) { childDocs = constructStarTree(child, range.getLeft(), range.getRight(), level + 1, file); docsAdded += childDocs; } // Either range <= maxLeafRecords, or we did not split further (last level). if (childDocs == 0) { child.setStartDocumentId(range.getLeft()); child.setEndDocumentId(range.getRight()); } } // Return if star node does not need to be created. if (skipStarNodeCreationForDimensions != null && skipStarNodeCreationForDimensions.contains(splitDimensionName)) { return docsAdded; } //create star node StarTreeIndexNode starChild = new StarTreeIndexNode(); starChild.setDimensionName(splitDimensionId); starChild.setDimensionValue(StarTreeIndexNode.all()); starChild.setParent(node); starChild.setLevel(node.getLevel() + 1); // n.b. We will number the nodes later using BFS after fully split // Add child to parent node.getChildren().put(StarTreeIndexNode.all(), starChild); Iterator<Pair<DimensionBuffer, MetricBuffer>> iterator = uniqueCombinations(startDocId, endDocId, file, splitDimensionId); int rowsAdded = 0; int startOffset = rawRecordCount + aggRecordCount; while (iterator.hasNext()) { Pair<DimensionBuffer, MetricBuffer> next = iterator.next(); DimensionBuffer dimension = next.getLeft(); MetricBuffer metricsHolder = next.getRight(); LOG.debug("Adding row:{}", dimension); appendToAggBuffer(dimension, metricsHolder); rowsAdded++; } docsAdded += rowsAdded; LOG.debug("Added {} additional records at level {}", rowsAdded, level); //flush dataBuffer.flush(); int childDocs = 0; if (rowsAdded >= maxLeafRecords) { sort(dataFile, startOffset, startOffset + rowsAdded); childDocs = constructStarTree(starChild, startOffset, startOffset + rowsAdded, level + 1, dataFile); docsAdded += childDocs; } // Either rowsAdded < maxLeafRecords, or we did not split further (last level). if (childDocs == 0) { starChild.setStartDocumentId(startOffset); starChild.setEndDocumentId(startOffset + rowsAdded); } //node.setEndDocumentId(endDocId + docsAdded); return docsAdded; } /** * Assumes the file is already sorted, returns the unique combinations after removing a specified dimension. * Aggregates the metrics for each unique combination, currently only sum is supported by default * @param startDocId * @param endDocId * @param file * @param splitDimensionId * @return * @throws Exception */ private Iterator<Pair<DimensionBuffer, MetricBuffer>> uniqueCombinations(int startDocId, int endDocId, File file, int splitDimensionId) throws Exception { StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); Iterator<Pair<byte[], byte[]>> iterator1 = dataSorter.iterator(startDocId, endDocId); File tempFile = new File(outDir, file.getName() + "_" + startDocId + "_" + endDocId + ".unique.tmp"); DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile))); while (iterator1.hasNext()) { Pair<byte[], byte[]> next = iterator1.next(); byte[] dimensionBuffer = next.getLeft(); byte[] metricBuffer = next.getRight(); DimensionBuffer dimensions = DimensionBuffer.fromBytes(dimensionBuffer); for (int i = 0; i < numDimensions; i++) { String dimensionName = dimensionNameToIndexMap.inverse().get(i); if (i == splitDimensionId || (skipMaterializationForDimensions != null && skipMaterializationForDimensions.contains(dimensionName))) { dos.writeInt(StarTreeIndexNode.all()); } else { dos.writeInt(dimensions.getDimension(i)); } } dos.write(metricBuffer); } dos.close(); dataSorter = new StarTreeDataTable(tempFile, dimensionSizeBytes, metricSizeBytes, getSortOrder()); dataSorter.sort(0, endDocId - startDocId); if (debugMode) { printFile(tempFile, 0, endDocId - startDocId); } final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(0, endDocId - startDocId); return new Iterator<Pair<DimensionBuffer, MetricBuffer>>() { Pair<DimensionBuffer, MetricBuffer> prev = null; boolean done = false; @Override public void remove() { throw new UnsupportedOperationException(); } @Override public boolean hasNext() { return !done; } @Override public Pair<DimensionBuffer, MetricBuffer> next() { while (iterator.hasNext()) { Pair<byte[], byte[]> next = iterator.next(); byte[] dimBuffer = next.getLeft(); byte[] metricBuffer = next.getRight(); if (prev == null) { prev = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes)); } else { Pair<DimensionBuffer, MetricBuffer> current = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes)); if (!current.getLeft().equals(prev.getLeft())) { Pair<DimensionBuffer, MetricBuffer> ret = prev; prev = current; LOG.debug("Returning unique {}", prev.getLeft()); return ret; } else { prev.getRight().aggregate(current.getRight(), metricTypes); } } } done = true; LOG.debug("Returning unique {}", prev.getLeft()); return prev; } }; } /** * sorts the file from start to end on a dimension index * @param startDocId * @param endDocId * @param dimension * @param file * @return */ private Map<Integer, IntPair> groupBy(int startDocId, int endDocId, Integer dimension, File file) { StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); return dataSorter.groupByIntColumnCount(startDocId, endDocId, dimension); } /** * Iterator to iterate over the records from startDocId to endDocId */ @Override public Iterator<GenericRow> iterator(final int startDocId, final int endDocId) throws Exception { StarTreeDataTable dataSorter = new StarTreeDataTable(dataFile, dimensionSizeBytes, metricSizeBytes, getSortOrder()); final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId); return new Iterator<GenericRow>() { @Override public boolean hasNext() { return iterator.hasNext(); } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public GenericRow next() { Pair<byte[], byte[]> pair = iterator.next(); DimensionBuffer dimensionKey = DimensionBuffer.fromBytes(pair.getLeft()); MetricBuffer metricsHolder = MetricBuffer.fromBytes(pair.getRight(), metricTypes); return toGenericRow(dimensionKey, metricsHolder); } }; } public JSONObject getStarTreeAsJSON() throws Exception { JSONObject json = new JSONObject(); toJson(json, starTreeRootIndexNode, dictionaryMap); return json; } private void toJson(JSONObject json, StarTreeIndexNode node, Map<String, HashBiMap<Object, Integer>> dictionaryMap) throws Exception { String dimName = "ALL"; Object dimValue = "ALL"; if (node.getDimensionName() != StarTreeIndexNode.all()) { dimName = dimensionNames.get(node.getDimensionName()); } if (node.getDimensionValue() != StarTreeIndexNode.all()) { dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue()); } json.put("title", dimName + ":" + dimValue); if (node.getChildren() != null) { JSONObject[] childJsons = new JSONObject[node.getChildren().size()]; int index = 0; for (Integer child : node.getChildren().keySet()) { StarTreeIndexNode childNode = node.getChildren().get(child); JSONObject childJson = new JSONObject(); toJson(childJson, childNode, dictionaryMap); childJsons[index++] = childJson; } json.put("nodes", childJsons); } } @Override public void cleanup() { if (outDir != null) { FileUtils.deleteQuietly(outDir); } } @Override public StarTree getTree() { return starTree; } @Override public int getTotalRawDocumentCount() { return rawRecordCount; } @Override public int getTotalAggregateDocumentCount() { return aggRecordCount; } @Override public int getMaxLeafRecords() { return maxLeafRecords; } @Override public List<String> getDimensionsSplitOrder() { return dimensionsSplitOrder; } public Map<String, HashBiMap<Object, Integer>> getDictionaryMap() { return dictionaryMap; } public HashBiMap<String, Integer> getDimensionNameToIndexMap() { return dimensionNameToIndexMap; } @Override public Set<String> getSkipMaterializationForDimensions() { return skipMaterializationForDimensions; } }
tkao1000/pinot
pinot-core/src/main/java/com/linkedin/pinot/core/startree/OffHeapStarTreeBuilder.java
Java
apache-2.0
28,902
/* * Copyright 2011 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.plugins.signing; import com.google.common.base.Function; import groovy.lang.Closure; import org.gradle.api.artifacts.PublishArtifact; import org.gradle.api.file.FileCollection; import org.gradle.api.internal.file.collections.ImmutableFileCollection; import org.gradle.plugins.signing.signatory.Signatory; import org.gradle.plugins.signing.type.SignatureType; import org.gradle.util.ConfigureUtil; import java.io.File; import java.util.ArrayList; import java.util.List; /** * A sign operation creates digital signatures for one or more files or {@link PublishArtifact publish artifacts}. * * <p>The external representation of the signature is specified by the {@link #getSignatureType() signature type property}, while the {@link #signatory} property specifies who is to sign. <p> A sign * operation manages one or more {@link Signature} objects. The {@code sign} methods are used to register things to generate signatures for. The {@link #execute()} method generates the signatures for * all of the registered items at that time. */ abstract public class SignOperation implements SignatureSpec { /** * The file representation of the signature(s). */ private SignatureType signatureType; /** * The signatory to the generated digital signatures. */ private Signatory signatory; /** * Whether or not it is required that this signature be generated. */ private boolean required; private final List<Signature> signatures = new ArrayList<Signature>(); public String getDisplayName() { return "SignOperation"; } @Override public String toString() { return getDisplayName(); } @Override public void setSignatureType(SignatureType signatureType) { this.signatureType = signatureType; } @Override public SignatureType getSignatureType() { return signatureType; } @Override public void setSignatory(Signatory signatory) { this.signatory = signatory; } @Override public Signatory getSignatory() { return signatory; } @Override public void setRequired(boolean required) { this.required = required; } @Override public boolean isRequired() { return required; } /** * Registers signatures for the given artifacts. * * @return this * @see Signature#Signature(File, SignatureSpec, Object...) */ public SignOperation sign(PublishArtifact... artifacts) { for (PublishArtifact artifact : artifacts) { signatures.add(new Signature(artifact, this)); } return this; } /** * Registers signatures for the given files. * * @return this * @see Signature#Signature(File, SignatureSpec, Object...) */ public SignOperation sign(File... files) { for (File file : files) { signatures.add(new Signature(file, this)); } return this; } /** * Registers signatures (with the given classifier) for the given files * * @return this * @see Signature#Signature(PublishArtifact, SignatureSpec, Object...) */ public SignOperation sign(String classifier, File... files) { for (File file : files) { signatures.add(new Signature(file, classifier, this)); } return this; } /** * Change the signature type for signature generation. */ public SignOperation signatureType(SignatureType type) { this.signatureType = type; return this; } /** * Change the signatory for signature generation. */ public SignOperation signatory(Signatory signatory) { this.signatory = signatory; return this; } /** * Executes the given closure against this object. */ public SignOperation configure(Closure closure) { ConfigureUtil.configureSelf(closure, this); return this; } /** * Generates actual signature files for all of the registered signatures. * * <p>The signatures are generated with the configuration they have at this time, which includes the signature type and signatory of this operation at this time. <p> This method can be called * multiple times, with the signatures being generated with their current configuration each time. * * @return this * @see Signature#generate() */ public SignOperation execute() { for (Signature signature : signatures) { signature.generate(); } return this; } /** * The registered signatures. */ public List<Signature> getSignatures() { return new ArrayList<Signature>(signatures); } /** * Returns the single registered signature. * * @return The signature. * @throws IllegalStateException if there is not exactly one registered signature. */ public Signature getSingleSignature() { final int size = signatures.size(); switch (size) { case 1: return signatures.get(0); case 0: throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains no signatures."); default: throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains " + String.valueOf(size) + " signatures."); } } /** * All of the files that will be signed by this operation. */ public FileCollection getFilesToSign() { return newSignatureFileCollection(new Function<Signature, File>() { @Override public File apply(Signature input) { return input.getToSign(); } }); } /** * All of the signature files that will be generated by this operation. */ public FileCollection getSignatureFiles() { return newSignatureFileCollection(new Function<Signature, File>() { @Override public File apply(Signature input) { return input.getFile(); } }); } private FileCollection newSignatureFileCollection(Function<Signature, File> getFile) { return ImmutableFileCollection.of(collectSignatureFiles(getFile)); } private ArrayList<File> collectSignatureFiles(Function<Signature, File> getFile) { ArrayList<File> files = new ArrayList<File>(signatures.size()); for (Signature signature : signatures) { File file = getFile.apply(signature); if (file != null) { files.add(file); } } return files; } }
robinverduijn/gradle
subprojects/signing/src/main/java/org/gradle/plugins/signing/SignOperation.java
Java
apache-2.0
7,371
/* * Copyright 2013-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cloudfoundry.client.v2.spaces; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import org.cloudfoundry.Nullable; import org.immutables.value.Value; import java.util.List; /** * The request payload for the Update a Space operation */ @JsonSerialize @Value.Immutable abstract class _UpdateSpaceRequest { /** * Allow SSH */ @JsonProperty("allow_ssh") @Nullable abstract Boolean getAllowSsh(); /** * The auditor ids */ @JsonProperty("auditor_guids") @Nullable abstract List<String> getAuditorIds(); /** * The developer ids */ @JsonProperty("developer_guids") @Nullable abstract List<String> getDeveloperIds(); /** * The domain ids */ @JsonProperty("domain_guids") @Nullable abstract List<String> getDomainIds(); /** * The manager ids */ @JsonProperty("manager_guids") @Nullable abstract List<String> getManagerIds(); /** * The name */ @JsonProperty("name") @Nullable abstract String getName(); /** * The organization id */ @JsonProperty("organization_guid") @Nullable abstract String getOrganizationId(); /** * The security group ids */ @JsonProperty("security_group_guids") @Nullable abstract List<String> getSecurityGroupIds(); /** * The space id */ @JsonIgnore abstract String getSpaceId(); }
cloudfoundry/cf-java-client
cloudfoundry-client/src/main/java/org/cloudfoundry/client/v2/spaces/_UpdateSpaceRequest.java
Java
apache-2.0
2,193
/* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agent.lldb.manager.evt; import agent.lldb.lldb.DebugThreadInfo; /** * The event corresponding with SBThread.eBroadcastBitThreadResumed */ public class LldbThreadResumedEvent extends AbstractLldbEvent<DebugThreadInfo> { public LldbThreadResumedEvent(DebugThreadInfo info) { super(info); } }
NationalSecurityAgency/ghidra
Ghidra/Debug/Debugger-agent-lldb/src/main/java/agent/lldb/manager/evt/LldbThreadResumedEvent.java
Java
apache-2.0
893
/** * Copyright (C) 2013 * by 52 North Initiative for Geospatial Open Source Software GmbH * * Contact: Andreas Wytzisk * 52 North Initiative for Geospatial Open Source Software GmbH * Martin-Luther-King-Weg 24 * 48155 Muenster, Germany * info@52north.org * * This program is free software; you can redistribute and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. * * This program is distributed WITHOUT ANY WARRANTY; even without the implied * WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along with * this program (see gnu-gpl v2.txt). If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or * visit the Free Software Foundation web page, http://www.fsf.org. */ package org.n52.sos.binding.rest.resources; import org.n52.sos.binding.rest.requests.RestRequest; /** * @author <a href="mailto:e.h.juerrens@52north.org">Eike Hinderk J&uuml;rrens</a> * */ public class OptionsRestRequest implements RestRequest { private String resourceType; private boolean isGlobalResource; private boolean isResourceCollection; public OptionsRestRequest(String resourceType, boolean isGlobalResource, boolean isResourceCollection) { this.resourceType = resourceType; this.isGlobalResource = isGlobalResource; this.isResourceCollection = isResourceCollection; } public String getResourceType() { return resourceType; } public boolean isGlobalResource() { return isGlobalResource; } public boolean isResourceCollection() { return isResourceCollection; } }
sauloperez/sos
src/bindings/rest/code/src/main/java/org/n52/sos/binding/rest/resources/OptionsRestRequest.java
Java
apache-2.0
1,863
package org.adligo.tests4j.system.shared.trials; import org.adligo.tests4j.shared.common.ClassMethods; import org.adligo.tests4j.shared.xml.I_XML_Builder; public class TrialParamValue implements I_TrialParamValue { public static final String TAG_NAME = "value"; public static final String CLASS_NAME = "class"; public static final String PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING = "Parameter value must be a non Void primitive or String."; private Object value_; public TrialParamValue(Object value) { if (value == null) { throw new NullPointerException(); } Class<?> c = value.getClass(); if ( (ClassMethods.isPrimitiveClass(c) && !ClassMethods.isClass(Void.class, c)) || ClassMethods.isClass(String.class, c)) { value_ = value; } else { throw new IllegalArgumentException( PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING); } } @Override public String getClassName() { return value_.getClass().getName(); } @Override public Object getValue() { return value_; } @Override public void toXml(I_XML_Builder builder) { builder.addIndent(); builder.addStartTag(TAG_NAME); String name = ClassMethods.getSimpleName(value_.getClass()); builder.addAttribute(CLASS_NAME, name); builder.endHeader(); builder.addText(value_.toString()); builder.addEndTag(TAG_NAME); builder.endLine(); } }
adligo/tests4j.adligo.org
src/org/adligo/tests4j/system/shared/trials/TrialParamValue.java
Java
apache-2.0
1,384
package com.sequenceiq.freeipa.entity.util; import com.sequenceiq.cloudbreak.converter.DefaultEnumConverter; import com.sequenceiq.freeipa.api.v1.kerberos.model.KerberosType; public class KerberosTypeConverter extends DefaultEnumConverter<KerberosType> { @Override public KerberosType getDefault() { return KerberosType.FREEIPA; } }
hortonworks/cloudbreak
freeipa/src/main/java/com/sequenceiq/freeipa/entity/util/KerberosTypeConverter.java
Java
apache-2.0
356
package com.lyubenblagoev.postfixrest.security; import com.lyubenblagoev.postfixrest.entity.User; import com.lyubenblagoev.postfixrest.repository.UserRepository; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.core.userdetails.UserDetailsService; import org.springframework.security.core.userdetails.UsernameNotFoundException; import org.springframework.stereotype.Service; import java.util.Optional; @Service public class CustomUserDetailsService implements UserDetailsService { private final UserRepository userRepository; public CustomUserDetailsService(UserRepository userRepository) { this.userRepository = userRepository; } @Override public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException { return userRepository.findByEmail(username) .map(u -> new UserPrincipal(u)) .orElseThrow(() -> new UsernameNotFoundException("No user found for " + username)); } }
lyubenblagoev/postfix-rest-server
src/main/java/com/lyubenblagoev/postfixrest/security/CustomUserDetailsService.java
Java
apache-2.0
1,026
/******************************************************************************* * Copyright (c) 2012, 2015 Pivotal Software, Inc. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, * Version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Contributors: * Pivotal Software, Inc. - initial API and implementation ********************************************************************************/ package cn.dockerfoundry.ide.eclipse.server.core.internal; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import org.cloudfoundry.client.lib.domain.CloudService; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import org.eclipse.wst.server.core.IModule; import cn.dockerfoundry.ide.eclipse.server.core.internal.application.ModuleChangeEvent; import cn.dockerfoundry.ide.eclipse.server.core.internal.client.CloudRefreshEvent; /** * Fires server refresh events. Only one handler is active per workbench runtime * session. * */ public class ServerEventHandler { private static ServerEventHandler handler; public static ServerEventHandler getDefault() { if (handler == null) { handler = new ServerEventHandler(); } return handler; } private final List<CloudServerListener> applicationListeners = new CopyOnWriteArrayList<CloudServerListener>(); public synchronized void addServerListener(CloudServerListener listener) { if (listener != null && !applicationListeners.contains(listener)) { applicationListeners.add(listener); } } public synchronized void removeServerListener(CloudServerListener listener) { applicationListeners.remove(listener); } public void fireServicesUpdated(DockerFoundryServer server, List<DockerApplicationService> services) { fireServerEvent(new CloudRefreshEvent(server, null, CloudServerEvent.EVENT_UPDATE_SERVICES, services)); } public void firePasswordUpdated(DockerFoundryServer server) { fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_UPDATE_PASSWORD)); } public void fireServerRefreshed(DockerFoundryServer server) { fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_SERVER_REFRESHED)); } public void fireAppInstancesChanged(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_INSTANCES_UPDATED, module, Status.OK_STATUS)); } public void fireApplicationRefreshed(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APPLICATION_REFRESHED, module, Status.OK_STATUS)); } public void fireAppDeploymentChanged(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED, module, Status.OK_STATUS)); } public void fireError(DockerFoundryServer server, IModule module, IStatus status) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_CLOUD_OP_ERROR, module, status)); } public synchronized void fireServerEvent(CloudServerEvent event) { CloudServerListener[] listeners = applicationListeners.toArray(new CloudServerListener[0]); for (CloudServerListener listener : listeners) { listener.serverChanged(event); } } }
osswangxining/dockerfoundry
cn.dockerfoundry.ide.eclipse.server.core/src/cn/dockerfoundry/ide/eclipse/server/core/internal/ServerEventHandler.java
Java
apache-2.0
3,808
/* * Copyright 2016 Bjoern Bilger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jrestless.core.container; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.OutputStream; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response.Status; import org.glassfish.jersey.server.ContainerResponse; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponse; import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponseWriter; import com.jrestless.core.container.io.JRestlessResponseWriter; public class JRestlessContainerResponseWriterTest { private JRestlessContainerResponseWriter containerResponseWriter; private JRestlessContainerResponse response; @BeforeEach public void setup() { JRestlessResponseWriter responseWriter = mock(JRestlessResponseWriter.class); when(responseWriter.getEntityOutputStream()).thenReturn(new ByteArrayOutputStream()); response = spy(new JRestlessContainerResponse(responseWriter)); containerResponseWriter = new JRestlessContainerResponseWriter(response); } @Test public void commit_ResponseNotYetClosed_ShouldCloseResponse() { containerResponseWriter.commit(); verify(response, times(1)).close(); } @Test public void writeResponseStatusAndHeaders_ContextHeaderAndStatusGiven_ShouldUpdateResponseStatusAndHeaders() { MultivaluedMap<String, String> actualHeaders = new MultivaluedHashMap<>(); actualHeaders.add("header0", "value0_0"); actualHeaders.add("header0", "value0_1"); actualHeaders.add("header1", "value1_0"); MultivaluedMap<String, String> expectedHeaders = new MultivaluedHashMap<>(); expectedHeaders.add("header0", "value0_0"); expectedHeaders.add("header0", "value0_1"); expectedHeaders.add("header1", "value1_0"); ContainerResponse context = mock(ContainerResponse.class); when(context.getStatusInfo()).thenReturn(Status.CONFLICT); when(context.getStringHeaders()).thenReturn(actualHeaders); containerResponseWriter.writeResponseStatusAndHeaders(-1, context); assertEquals(Status.CONFLICT, response.getStatusType()); assertEquals(expectedHeaders, response.getHeaders()); } @Test public void writeResponseStatusAndHeaders_ShouldReturnEntityOutputStreamOfResponse() { ContainerResponse context = mock(ContainerResponse.class); when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>()); when(context.getStatusInfo()).thenReturn(Status.OK); OutputStream entityOutputStream = containerResponseWriter.writeResponseStatusAndHeaders(-1, context); assertSame(response.getEntityOutputStream(), entityOutputStream); } @Test public void failure_ResponseNotYetCommitted_ShouldSetInternalServerErrorStatusOnFail() { ContainerResponse context = mock(ContainerResponse.class); when(context.getStatusInfo()).thenReturn(Status.OK); when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>()); containerResponseWriter.writeResponseStatusAndHeaders(-1, context); containerResponseWriter.failure(new RuntimeException()); assertEquals(Status.INTERNAL_SERVER_ERROR, response.getStatusType()); } @Test public void failure_ResponseNotYetCommitted_ShouldCommitOnFailure() { containerResponseWriter = spy(containerResponseWriter); containerResponseWriter.failure(new RuntimeException()); verify(containerResponseWriter, times(1)).commit(); } @Test public void failure_ResponseNotYetCommitted_ShouldRethrowOnCommitFailure() { containerResponseWriter = spy(containerResponseWriter); containerResponseWriter.failure(new RuntimeException()); doThrow(CommitException.class).when(containerResponseWriter).commit(); assertThrows(RuntimeException.class, () -> containerResponseWriter.failure(new RuntimeException())); } @Test public void enableResponseBuffering_Always_ShouldBeDisabled() { assertFalse(containerResponseWriter.enableResponseBuffering()); } @Test public void setSuspendTimeout_Always_ShouldBeUnsupported() { assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.setSuspendTimeout(1, null)); } @Test public void suspend_Always_ShouldBeUnsupported() { assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.suspend(1, null, null)); } @SuppressWarnings("serial") private static class CommitException extends RuntimeException { } }
bbilger/jrestless
core/jrestless-core-container/src/test/java/com/jrestless/core/container/JRestlessContainerResponseWriterTest.java
Java
apache-2.0
5,472