code
stringlengths 10
749k
| repo_name
stringlengths 5
108
| path
stringlengths 7
333
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 10
749k
|
---|---|---|---|---|---|
/*
* Copyright (c) 2012-2018 Red Hat, Inc.
* This program and the accompanying materials are made
* available under the terms of the Eclipse Public License 2.0
* which is available at https://www.eclipse.org/legal/epl-2.0/
*
* SPDX-License-Identifier: EPL-2.0
*
* Contributors:
* Red Hat, Inc. - initial API and implementation
*/
package renametype.testFail80;
class A{
void m(){
class B{}
}
}
|
akervern/che
|
selenium/che-selenium-test/src/test/resources/projects/RenameType/src/main/java/renametype/testFail80/A.java
|
Java
|
epl-1.0
| 418 |
public class Class1<Q> {
class Gen<S> {}
public <T> T methodTypeParam(T t) {
return t;
}
public void classTypeParam(Q e) {}
public <F> void wildcardExtends(Gen<? extends F> class1) {}
public <F> void wildcardSuper(Gen<? super F> class1) {}
}
|
CharlesZ-Chen/checker-framework
|
checker/jtreg/nullness/issue824lib/Class1.java
|
Java
|
gpl-2.0
| 278 |
/*
* This is the source code of Telegram for Android v. 1.3.2.
* It is licensed under GNU GPL v. 2 or later.
* You should have received a copy of the license in this archive (see LICENSE).
*
* Copyright Nikolai Kudashov, 2013.
*/
package org.telegram.ui;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.text.InputType;
import android.util.TypedValue;
import android.view.Gravity;
import android.view.KeyEvent;
import android.view.LayoutInflater;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.inputmethod.EditorInfo;
import android.widget.EditText;
import android.widget.LinearLayout;
import android.widget.TextView;
import org.telegram.android.AndroidUtilities;
import org.telegram.android.LocaleController;
import org.telegram.messenger.ApplicationLoader;
import org.telegram.messenger.TLRPC;
import org.telegram.android.MessagesController;
import org.telegram.messenger.R;
import org.telegram.ui.ActionBar.ActionBar;
import org.telegram.ui.ActionBar.ActionBarMenu;
import org.telegram.ui.ActionBar.BaseFragment;
public class ChangeChatNameActivity extends BaseFragment {
private EditText firstNameField;
private View headerLabelView;
private int chat_id;
private View doneButton;
private final static int done_button = 1;
public ChangeChatNameActivity(Bundle args) {
super(args);
}
@Override
public boolean onFragmentCreate() {
super.onFragmentCreate();
chat_id = getArguments().getInt("chat_id", 0);
return true;
}
@Override
public View createView(Context context, LayoutInflater inflater) {
actionBar.setBackButtonImage(R.drawable.ic_ab_back);
actionBar.setAllowOverlayTitle(true);
actionBar.setTitle(LocaleController.getString("EditName", R.string.EditName));
actionBar.setActionBarMenuOnItemClick(new ActionBar.ActionBarMenuOnItemClick() {
@Override
public void onItemClick(int id) {
if (id == -1) {
finishFragment();
} else if (id == done_button) {
if (firstNameField.getText().length() != 0) {
saveName();
finishFragment();
}
}
}
});
ActionBarMenu menu = actionBar.createMenu();
doneButton = menu.addItemWithWidth(done_button, R.drawable.ic_done, AndroidUtilities.dp(56));
TLRPC.Chat currentChat = MessagesController.getInstance().getChat(chat_id);
fragmentView = new LinearLayout(context);
fragmentView.setLayoutParams(new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
((LinearLayout) fragmentView).setOrientation(LinearLayout.VERTICAL);
fragmentView.setOnTouchListener(new View.OnTouchListener() {
@Override
public boolean onTouch(View v, MotionEvent event) {
return true;
}
});
firstNameField = new EditText(context);
firstNameField.setText(currentChat.title);
firstNameField.setTextSize(TypedValue.COMPLEX_UNIT_DIP, 18);
firstNameField.setHintTextColor(0xff979797);
firstNameField.setTextColor(0xff212121);
firstNameField.setMaxLines(3);
firstNameField.setPadding(0, 0, 0, 0);
firstNameField.setGravity(LocaleController.isRTL ? Gravity.RIGHT : Gravity.LEFT);
firstNameField.setInputType(InputType.TYPE_TEXT_FLAG_CAP_SENTENCES | InputType.TYPE_TEXT_FLAG_MULTI_LINE | InputType.TYPE_TEXT_FLAG_AUTO_CORRECT);
firstNameField.setImeOptions(EditorInfo.IME_ACTION_DONE);
firstNameField.setGravity(LocaleController.isRTL ? Gravity.RIGHT : Gravity.LEFT);
AndroidUtilities.clearCursorDrawable(firstNameField);
firstNameField.setOnEditorActionListener(new TextView.OnEditorActionListener() {
@Override
public boolean onEditorAction(TextView textView, int i, KeyEvent keyEvent) {
if (i == EditorInfo.IME_ACTION_DONE && doneButton != null) {
doneButton.performClick();
return true;
}
return false;
}
});
((LinearLayout) fragmentView).addView(firstNameField);
LinearLayout.LayoutParams layoutParams = (LinearLayout.LayoutParams) firstNameField.getLayoutParams();
layoutParams.topMargin = AndroidUtilities.dp(24);
layoutParams.height = AndroidUtilities.dp(36);
layoutParams.leftMargin = AndroidUtilities.dp(24);
layoutParams.rightMargin = AndroidUtilities.dp(24);
layoutParams.width = LinearLayout.LayoutParams.MATCH_PARENT;
firstNameField.setLayoutParams(layoutParams);
if (chat_id > 0) {
firstNameField.setHint(LocaleController.getString("GroupName", R.string.GroupName));
} else {
firstNameField.setHint(LocaleController.getString("EnterListName", R.string.EnterListName));
}
firstNameField.setSelection(firstNameField.length());
return fragmentView;
}
@Override
public void onResume() {
super.onResume();
SharedPreferences preferences = ApplicationLoader.applicationContext.getSharedPreferences("mainconfig", Activity.MODE_PRIVATE);
boolean animations = preferences.getBoolean("view_animations", true);
if (!animations) {
firstNameField.requestFocus();
AndroidUtilities.showKeyboard(firstNameField);
}
}
@Override
public void onOpenAnimationEnd() {
firstNameField.requestFocus();
AndroidUtilities.showKeyboard(firstNameField);
}
private void saveName() {
MessagesController.getInstance().changeChatTitle(chat_id, firstNameField.getText().toString());
}
}
|
keremsoft/Muhappet
|
TMessagesProj/src/main/java/org/telegram/ui/ChangeChatNameActivity.java
|
Java
|
gpl-2.0
| 6,010 |
///////////////////////////////////////////////////////////////////////////////
// For information as to what this class does, see the Javadoc, below. //
// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, //
// 2007, 2008, 2009, 2010, 2014, 2015 by Peter Spirtes, Richard Scheines, Joseph //
// Ramsey, and Clark Glymour. //
// //
// This program is free software; you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation; either version 2 of the License, or //
// (at your option) any later version. //
// //
// This program is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with this program; if not, write to the Free Software //
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA //
///////////////////////////////////////////////////////////////////////////////
package edu.cmu.tetradapp.util;
import edu.cmu.tetrad.session.Session;
import edu.cmu.tetradapp.app.SessionEditor;
/**
* Interface for desktop controller methods, to allow app components to control
* the desktop without a package cycle. See TetradDesktop for meaning of
* methods.
*
* @author Joseph Ramsey
* @see edu.cmu.tetradapp.app.TetradDesktop
*/
public interface DesktopControllable {
void newSessionEditor();
SessionEditorIndirectRef getFrontmostSessionEditor();
void exitProgram();
boolean existsSessionByName(String name);
Session getSessionByName(String name);
void addSessionEditor(SessionEditorIndirectRef editor);
void closeEmptySessions();
void putMetadata(SessionWrapperIndirectRef sessionWrapper,
TetradMetadataIndirectRef metadata);
TetradMetadataIndirectRef getTetradMetadata(
SessionWrapperIndirectRef sessionWrapper);
void addEditorWindow(EditorWindowIndirectRef editorWindow, int layer);
void closeFrontmostSession();
void closeSessionByName(String name);
boolean closeAllSessions();
}
|
ps7z/tetrad
|
tetrad-gui/src/main/java/edu/cmu/tetradapp/util/DesktopControllable.java
|
Java
|
gpl-2.0
| 2,716 |
package pneumaticCraft.common.thirdparty.ae2;
import java.util.List;
import net.minecraft.item.ItemStack;
import pneumaticCraft.api.item.IInventoryItem;
import appeng.api.AEApi;
import appeng.api.storage.ICellRegistry;
import appeng.api.storage.IMEInventoryHandler;
import appeng.api.storage.IStorageHelper;
import appeng.api.storage.StorageChannel;
import appeng.api.storage.data.IAEItemStack;
import appeng.api.storage.data.IItemList;
public class AE2DiskInventoryItemHandler implements IInventoryItem{
private final ICellRegistry cellRegistry = AEApi.instance().registries().cell();
private final IStorageHelper storageHelper = AEApi.instance().storage();
@Override
public void getStacksInItem(ItemStack stack, List<ItemStack> curStacks){
IMEInventoryHandler<IAEItemStack> cellInventoryHandler = cellRegistry.getCellInventory(stack, null, StorageChannel.ITEMS);
if(cellInventoryHandler != null) {
IItemList<IAEItemStack> cellItemList = storageHelper.createItemList();
cellInventoryHandler.getAvailableItems(cellItemList);
for(IAEItemStack aeStack : cellItemList) {
ItemStack st = aeStack.getItemStack();
st.stackSize = (int)aeStack.getStackSize();//Do another getStacksize, as above retrieval caps to 64.
curStacks.add(st);
}
}
}
}
|
wormzjl/PneumaticCraft
|
src/pneumaticCraft/common/thirdparty/ae2/AE2DiskInventoryItemHandler.java
|
Java
|
gpl-3.0
| 1,379 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.math3.optimization.general;
import org.apache.commons.math3.exception.MathIllegalStateException;
import org.apache.commons.math3.analysis.UnivariateFunction;
import org.apache.commons.math3.analysis.solvers.BrentSolver;
import org.apache.commons.math3.analysis.solvers.UnivariateSolver;
import org.apache.commons.math3.exception.util.LocalizedFormats;
import org.apache.commons.math3.optimization.GoalType;
import org.apache.commons.math3.optimization.PointValuePair;
import org.apache.commons.math3.optimization.SimpleValueChecker;
import org.apache.commons.math3.optimization.ConvergenceChecker;
import org.apache.commons.math3.util.FastMath;
/**
* Non-linear conjugate gradient optimizer.
* <p>
* This class supports both the Fletcher-Reeves and the Polak-Ribière
* update formulas for the conjugate search directions. It also supports
* optional preconditioning.
* </p>
*
* @deprecated As of 3.1 (to be removed in 4.0).
* @since 2.0
*
*/
@Deprecated
public class NonLinearConjugateGradientOptimizer
extends AbstractScalarDifferentiableOptimizer {
/** Update formula for the beta parameter. */
private final ConjugateGradientFormula updateFormula;
/** Preconditioner (may be null). */
private final Preconditioner preconditioner;
/** solver to use in the line search (may be null). */
private final UnivariateSolver solver;
/** Initial step used to bracket the optimum in line search. */
private double initialStep;
/** Current point. */
private double[] point;
/**
* Constructor with default {@link SimpleValueChecker checker},
* {@link BrentSolver line search solver} and
* {@link IdentityPreconditioner preconditioner}.
*
* @param updateFormula formula to use for updating the β parameter,
* must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
* ConjugateGradientFormula#POLAK_RIBIERE}.
* @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
*/
@Deprecated
public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula) {
this(updateFormula,
new SimpleValueChecker());
}
/**
* Constructor with default {@link BrentSolver line search solver} and
* {@link IdentityPreconditioner preconditioner}.
*
* @param updateFormula formula to use for updating the β parameter,
* must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
* ConjugateGradientFormula#POLAK_RIBIERE}.
* @param checker Convergence checker.
*/
public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
ConvergenceChecker<PointValuePair> checker) {
this(updateFormula,
checker,
new BrentSolver(),
new IdentityPreconditioner());
}
/**
* Constructor with default {@link IdentityPreconditioner preconditioner}.
*
* @param updateFormula formula to use for updating the β parameter,
* must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
* ConjugateGradientFormula#POLAK_RIBIERE}.
* @param checker Convergence checker.
* @param lineSearchSolver Solver to use during line search.
*/
public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
ConvergenceChecker<PointValuePair> checker,
final UnivariateSolver lineSearchSolver) {
this(updateFormula,
checker,
lineSearchSolver,
new IdentityPreconditioner());
}
/**
* @param updateFormula formula to use for updating the β parameter,
* must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
* ConjugateGradientFormula#POLAK_RIBIERE}.
* @param checker Convergence checker.
* @param lineSearchSolver Solver to use during line search.
* @param preconditioner Preconditioner.
*/
public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
ConvergenceChecker<PointValuePair> checker,
final UnivariateSolver lineSearchSolver,
final Preconditioner preconditioner) {
super(checker);
this.updateFormula = updateFormula;
solver = lineSearchSolver;
this.preconditioner = preconditioner;
initialStep = 1.0;
}
/**
* Set the initial step used to bracket the optimum in line search.
* <p>
* The initial step is a factor with respect to the search direction,
* which itself is roughly related to the gradient of the function
* </p>
* @param initialStep initial step used to bracket the optimum in line search,
* if a non-positive value is used, the initial step is reset to its
* default value of 1.0
*/
public void setInitialStep(final double initialStep) {
if (initialStep <= 0) {
this.initialStep = 1.0;
} else {
this.initialStep = initialStep;
}
}
/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
point = getStartPoint();
final GoalType goal = getGoalType();
final int n = point.length;
double[] r = computeObjectiveGradient(point);
if (goal == GoalType.MINIMIZE) {
for (int i = 0; i < n; ++i) {
r[i] = -r[i];
}
}
// Initial search direction.
double[] steepestDescent = preconditioner.precondition(point, r);
double[] searchDirection = steepestDescent.clone();
double delta = 0;
for (int i = 0; i < n; ++i) {
delta += r[i] * searchDirection[i];
}
PointValuePair current = null;
int iter = 0;
int maxEval = getMaxEvaluations();
while (true) {
++iter;
final double objective = computeObjectiveValue(point);
PointValuePair previous = current;
current = new PointValuePair(point, objective);
if (previous != null && checker.converged(iter, previous, current)) {
// We have found an optimum.
return current;
}
// Find the optimal step in the search direction.
final UnivariateFunction lsf = new LineSearchFunction(searchDirection);
final double uB = findUpperBound(lsf, 0, initialStep);
// XXX Last parameters is set to a value close to zero in order to
// work around the divergence problem in the "testCircleFitting"
// unit test (see MATH-439).
final double step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
maxEval -= solver.getEvaluations(); // Subtract used up evaluations.
// Validate new point.
for (int i = 0; i < point.length; ++i) {
point[i] += step * searchDirection[i];
}
r = computeObjectiveGradient(point);
if (goal == GoalType.MINIMIZE) {
for (int i = 0; i < n; ++i) {
r[i] = -r[i];
}
}
// Compute beta.
final double deltaOld = delta;
final double[] newSteepestDescent = preconditioner.precondition(point, r);
delta = 0;
for (int i = 0; i < n; ++i) {
delta += r[i] * newSteepestDescent[i];
}
final double beta;
if (updateFormula == ConjugateGradientFormula.FLETCHER_REEVES) {
beta = delta / deltaOld;
} else {
double deltaMid = 0;
for (int i = 0; i < r.length; ++i) {
deltaMid += r[i] * steepestDescent[i];
}
beta = (delta - deltaMid) / deltaOld;
}
steepestDescent = newSteepestDescent;
// Compute conjugate search direction.
if (iter % n == 0 ||
beta < 0) {
// Break conjugation: reset search direction.
searchDirection = steepestDescent.clone();
} else {
// Compute new conjugate search direction.
for (int i = 0; i < n; ++i) {
searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
}
}
}
}
/**
* Find the upper bound b ensuring bracketing of a root between a and b.
*
* @param f function whose root must be bracketed.
* @param a lower bound of the interval.
* @param h initial step to try.
* @return b such that f(a) and f(b) have opposite signs.
* @throws MathIllegalStateException if no bracket can be found.
*/
private double findUpperBound(final UnivariateFunction f,
final double a, final double h) {
final double yA = f.value(a);
double yB = yA;
for (double step = h; step < Double.MAX_VALUE; step *= FastMath.max(2, yA / yB)) {
final double b = a + step;
yB = f.value(b);
if (yA * yB <= 0) {
return b;
}
}
throw new MathIllegalStateException(LocalizedFormats.UNABLE_TO_BRACKET_OPTIMUM_IN_LINE_SEARCH);
}
/** Default identity preconditioner. */
public static class IdentityPreconditioner implements Preconditioner {
/** {@inheritDoc} */
public double[] precondition(double[] variables, double[] r) {
return r.clone();
}
}
/** Internal class for line search.
* <p>
* The function represented by this class is the dot product of
* the objective function gradient and the search direction. Its
* value is zero when the gradient is orthogonal to the search
* direction, i.e. when the objective function value is a local
* extremum along the search direction.
* </p>
*/
private class LineSearchFunction implements UnivariateFunction {
/** Search direction. */
private final double[] searchDirection;
/** Simple constructor.
* @param searchDirection search direction
*/
LineSearchFunction(final double[] searchDirection) {
this.searchDirection = searchDirection;
}
/** {@inheritDoc} */
public double value(double x) {
// current point in the search direction
final double[] shiftedPoint = point.clone();
for (int i = 0; i < shiftedPoint.length; ++i) {
shiftedPoint[i] += x * searchDirection[i];
}
// gradient of the objective function
final double[] gradient = computeObjectiveGradient(shiftedPoint);
// dot product with the search direction
double dotProduct = 0;
for (int i = 0; i < gradient.length; ++i) {
dotProduct += gradient[i] * searchDirection[i];
}
return dotProduct;
}
}
}
|
happyjack27/autoredistrict
|
src/org/apache/commons/math3/optimization/general/NonLinearConjugateGradientOptimizer.java
|
Java
|
gpl-3.0
| 12,220 |
/** __ __
* _____ _/ /_/ /_ Computational Intelligence Library (CIlib)
* / ___/ / / / __ \ (c) CIRG @ UP
* / /__/ / / / /_/ / http://cilib.net
* \___/_/_/_/_.___/
*/
package net.sourceforge.cilib.functions.continuous.unconstrained;
import net.sourceforge.cilib.functions.ContinuousFunction;
import net.sourceforge.cilib.type.types.container.Vector;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
public class ShubertTest {
private ContinuousFunction function;
@Before
public void instantiate() {
this.function = new Shubert();
}
/**
* Test of evaluate method, of class {@link Shubert}.
*/
@Test
public void testEvaluate() {
Vector x = Vector.of(0,0);
assertEquals(19.875836249, function.f(x), 0.000000009);
x.setReal(0, -8.0);
x.setReal(1, -8.0);
assertEquals(7.507985827, function.f(x), 0.000000009);
x.setReal(0, -7.2);
x.setReal(1, -7.7);
assertEquals(-157.071676802, function.f(x), 0.000000009);
}
}
|
krharrison/cilib
|
library/src/test/java/net/sourceforge/cilib/functions/continuous/unconstrained/ShubertTest.java
|
Java
|
gpl-3.0
| 1,102 |
package edu.stanford.nlp.mt.visualize.phrase;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.util.HashMap;
import java.util.Map;
/**
*
* @author Spence Green
*/
public class PhraseModel {
// For validating the input file format
private static final int OPT_TOKS_PER_LINE = 5;
private int NUM_VISUAL_OPTION_ROWS = 10;
private boolean NORM_SCORES = false;
private boolean VERBOSE = false;
private final File sourceFile;
private final File optsFile;
private final Map<Integer, Translation> translations;
private final Map<Integer, TranslationLayout> layouts;
private int minTranslationId = Integer.MIN_VALUE;
private ScoreDistribution scoreDist;
private boolean isBuilt = false;
public PhraseModel(File source, File opts) {
sourceFile = source;
optsFile = opts;
translations = new HashMap<Integer, Translation>();
layouts = new HashMap<Integer, TranslationLayout>();
if (!sourceFile.exists())
throw new RuntimeException(String.format("%s: %s does not exist", this
.getClass().getName(), sourceFile.getPath()));
if (!optsFile.exists())
throw new RuntimeException(String.format("%s: %s does not exist", this
.getClass().getName(), sourceFile.getPath()));
}
public void setVerbose(boolean verbose) {
VERBOSE = verbose;
}
public boolean load(int firstId, int lastId, int scoreHalfRange) {
minTranslationId = firstId;
try {
scoreDist = new ScoreDistribution(scoreHalfRange);
LineNumberReader sourceReader = new LineNumberReader(new FileReader(
sourceFile));
LineNumberReader optsReader = new LineNumberReader(new FileReader(
optsFile));
String[] lastOpt = null;
int transId;
for (transId = 0; sourceReader.ready(); transId++) {
String source = sourceReader.readLine();
if (transId < firstId)
continue;
else if (transId > lastId)
break;
if (minTranslationId < 0)
minTranslationId = transId;
Translation translation = new Translation(transId, source);
if (lastOpt != null && Integer.parseInt(lastOpt[0]) == transId) {
String english = lastOpt[2].intern();
double score = Double.parseDouble(lastOpt[3]);
if (NORM_SCORES)
score /= (double) english.split("\\s+").length;
score = Math.exp(score);
scoreDist.add(score);
translation.addPhrase(Math.exp(score), english, lastOpt[4]);
}
lastOpt = null;
for (int optsRead = 0; optsReader.ready(); optsRead++) {
String transOpt = optsReader.readLine();
String[] optToks = transOpt.split("\\s*\\|\\|\\|\\s*");
assert optToks.length == OPT_TOKS_PER_LINE;
final int transIdForOpt = Integer.parseInt(optToks[0]);
if (transIdForOpt < transId)
continue;
else if (transIdForOpt != transId) {
lastOpt = optToks;
break;
}
String english = optToks[2].intern();
String coverage = optToks[4];
double score = Double.parseDouble(optToks[3]);
if (NORM_SCORES)
score /= (double) english.split("\\s+").length;
score = Math.exp(score);
scoreDist.add(score);
translation.addPhrase(score, english, coverage);
}
translations.put(transId, translation);
}
if (VERBOSE)
System.err.printf("%s: Read %d source sentences from %s\n", this
.getClass().getName(), transId, sourceFile.getPath());
sourceReader.close();
optsReader.close();
} catch (FileNotFoundException e) {
System.err.printf("%s: Could not open file\n%s\n", this.getClass()
.getName(), e.toString());
return false;
} catch (IOException e) {
System.err.printf("%s: Error while reading file\n%s\n", this.getClass()
.getName(), e.toString());
return false;
}
return true;
}
public boolean buildLayouts(boolean rightToLeft) {
for (Integer translationId : translations.keySet()) {
Translation t = translations.get(translationId);
TranslationLayout layout = new TranslationLayout(t, rightToLeft);
layout.createLayout(NUM_VISUAL_OPTION_ROWS);
layouts.put(translationId, layout);
}
scoreDist.computeDistribution();
isBuilt = true;
return true;
}
public int getNumTranslations() {
return (layouts != null) ? layouts.keySet().size() : 0;
}
public boolean isBuilt() {
return isBuilt;
}
public TranslationLayout getTranslationLayout(int translationId) {
return (layouts != null) ? layouts.get(translationId) : null;
}
public String getTranslationSource(int translationId) {
if (translations != null && translations.get(translationId) != null)
return translations.get(translationId).getSource();
return null;
}
public int getMinTranslationId() {
return minTranslationId;
}
public int getScoreRank(double score) {
return scoreDist.getStdDev(score);
}
public void normalizePhraseScores(boolean newState) {
NORM_SCORES = newState;
}
public void setNumberOfOptionRows(int rows) {
NUM_VISUAL_OPTION_ROWS = rows;
}
}
|
chrishokamp/phrasal
|
src-extra/edu/stanford/nlp/mt/visualize/phrase/PhraseModel.java
|
Java
|
gpl-3.0
| 5,385 |
/*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2015 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.kra.coi.print;
/**
* This class represents different types of reports for COI.
*/
public enum CoiReportType {
/** Report type for the COI reports that utilize templates. */
COI_TEMPLATE ("committee_template"),
/** Report type for the COI correspondence reports that utilize templates. */
COI_CORRESPONDENCE_TEMPLATE ("protocol_correspondence_template"),
COI_BATCH_CORRESPONDENCE("coiBatchCorrespondence"),
COI_APPROVED_DISCLOSURE("coiApprovedDisclosure");
private String coiReportType;
/**
*
* Constructs a CoiReportType.java.
* @param coiReportType
*/
CoiReportType(String coiReportType) {
this.coiReportType = coiReportType;
}
public String getCoiReportType() {
return coiReportType;
}
}
|
sanjupolus/KC6.oLatest
|
coeus-impl/src/main/java/org/kuali/kra/coi/print/CoiReportType.java
|
Java
|
agpl-3.0
| 1,614 |
/*
* Copyright (c) 2017 Memorial Sloan-Kettering Cancer Center.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS
* FOR A PARTICULAR PURPOSE. The software and documentation provided hereunder
* is on an "as is" basis, and Memorial Sloan-Kettering Cancer Center has no
* obligations to provide maintenance, support, updates, enhancements or
* modifications. In no event shall Memorial Sloan-Kettering Cancer Center be
* liable to any party for direct, indirect, special, incidental or
* consequential damages, including lost profits, arising out of the use of this
* software and its documentation, even if Memorial Sloan-Kettering Cancer
* Center has been advised of the possibility of such damage.
*/
/*
* This file is part of cBioPortal.
*
* cBioPortal is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.mskcc.cbio.portal.dao;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.*;
import org.mskcc.cbio.portal.model.CancerStudy;
import org.mskcc.cbio.portal.model.GenePanel;
/**
*
* @author heinsz
*/
public class DaoGenePanel {
private static Map<String, GenePanel> genePanelMap = initMap();
public static GenePanel getGenePanelByStableId(String stableId) {
return genePanelMap.get(stableId);
}
private static Map<String, GenePanel> initMap() {
Map<String, GenePanel> genePanelMap = null;
Connection con = null;
PreparedStatement pstmt = null;
ResultSet rs = null;
try {
con = JdbcUtil.getDbConnection(DaoCancerStudy.class);
pstmt = con.prepareStatement("SELECT * FROM gene_panel");
rs = pstmt.executeQuery();
genePanelMap = extractGenePanelMap(rs);
} catch (SQLException e) {
e.printStackTrace();
} finally {
JdbcUtil.closeAll(DaoCancerStudy.class, con, pstmt, rs);
}
return genePanelMap;
}
private static Map<String, GenePanel> extractGenePanelMap(ResultSet rs) throws SQLException {
Map<String, GenePanel> genePanelMap = new HashMap<>();
while(rs.next()) {
GenePanel gp = new GenePanel();
gp.setInternalId(rs.getInt("INTERNAL_ID"));
gp.setStableId(rs.getString("STABLE_ID"));
gp.setDescription(rs.getString("DESCRIPTION"));
genePanelMap.put(gp.getStableId(), gp);
}
return genePanelMap;
}
}
|
adamabeshouse/cbioportal
|
core/src/main/java/org/mskcc/cbio/portal/dao/DaoGenePanel.java
|
Java
|
agpl-3.0
| 3,168 |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2010, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.jboss.as.ejb3.subsystem;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.jboss.as.controller.AbstractAddStepHandler;
import org.jboss.as.controller.AttributeDefinition;
import org.jboss.as.controller.OperationContext;
import org.jboss.as.controller.OperationFailedException;
import org.jboss.as.controller.PathAddress;
import org.jboss.as.controller.descriptions.ModelDescriptionConstants;
import org.jboss.as.ejb3.cache.CacheFactoryBuilderServiceNameProvider;
import org.jboss.as.ejb3.cache.distributable.DistributableCacheFactoryBuilderServiceNameProvider;
import org.jboss.as.ejb3.cache.simple.SimpleCacheFactoryBuilderServiceConfigurator;
import org.jboss.dmr.ModelNode;
import org.jboss.msc.service.ServiceBuilder;
import org.jboss.msc.service.ServiceTarget;
import org.wildfly.clustering.service.IdentityServiceConfigurator;
import org.wildfly.clustering.service.ServiceConfigurator;
/**
* Configure, build and install CacheFactoryBuilders to support SFSB usage.
*
* @author Paul Ferraro
*/
public class CacheFactoryAdd extends AbstractAddStepHandler {
CacheFactoryAdd(AttributeDefinition... attributes) {
super(attributes);
}
@Override
protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException {
final String name = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();
ModelNode passivationStoreModel = CacheFactoryResourceDefinition.PASSIVATION_STORE.resolveModelAttribute(context,model);
String passivationStore = passivationStoreModel.isDefined() ? passivationStoreModel.asString() : null;
final Collection<String> unwrappedAliasValues = CacheFactoryResourceDefinition.ALIASES.unwrap(context,model);
final Set<String> aliases = unwrappedAliasValues != null ? new HashSet<>(unwrappedAliasValues) : Collections.<String>emptySet();
ServiceTarget target = context.getServiceTarget();
// set up the CacheFactoryBuilder service
ServiceConfigurator configurator = (passivationStore != null) ? new IdentityServiceConfigurator<>(new CacheFactoryBuilderServiceNameProvider(name).getServiceName(),
new DistributableCacheFactoryBuilderServiceNameProvider(passivationStore).getServiceName()) : new SimpleCacheFactoryBuilderServiceConfigurator<>(name);
ServiceBuilder<?> builder = configurator.build(target);
// set up aliases to the CacheFactoryBuilder service
for (String alias: aliases) {
new IdentityServiceConfigurator<>(new CacheFactoryBuilderServiceNameProvider(alias).getServiceName(), configurator.getServiceName()).build(target).install();
}
builder.install();
}
}
|
jstourac/wildfly
|
ejb3/src/main/java/org/jboss/as/ejb3/subsystem/CacheFactoryAdd.java
|
Java
|
lgpl-2.1
| 3,866 |
package org.intermine.api.util;
/*
* Copyright (C) 2002-2016 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
/**
* Utility methods for naming queries and lists.
*
* @author Julie Sullivan
*/
public final class NameUtil
{
private NameUtil() {
// don't
}
/**
* error message for bad names. should come from properties file instead, really
*/
public static final String INVALID_NAME_MSG = "Names for lists and queries may only contain "
+ "A-Z, a-z, 0-9, underscores and dashes.";
private static final String QUERY_NAME_PREFIX = "query_";
private static final Map<String, String> SPEC_CHAR_TO_TEXT = new HashMap<String, String>();
// A-Z, a-z, 0-9, underscores and dashes. And spaces. And dots.
private static final Pattern SPECIAL_CHARS_PATTERN = Pattern.compile("[^\\w\\s\\.\\-:]");
// A-Z, a-z, 0-9, underscores and dashes.
private static final Pattern NO_SPECIAL_CHARS_PATTERN = Pattern.compile("[^\\w\\-:]");
/*
* Generates a map of special characters to their name, used to swap out bad characters in
* query/list names
*/
static {
SPEC_CHAR_TO_TEXT.put("!", new String("EXCLAMATION_POINT"));
SPEC_CHAR_TO_TEXT.put("$", new String("DOLLAR_SIGN"));
SPEC_CHAR_TO_TEXT.put("%", new String("PERCENT_SIGN"));
SPEC_CHAR_TO_TEXT.put("^", new String("CARET"));
SPEC_CHAR_TO_TEXT.put("&", new String("AMPERSAND"));
SPEC_CHAR_TO_TEXT.put("(", new String("LEFT_PARENTHESIS"));
SPEC_CHAR_TO_TEXT.put(")", new String("RIGHT_PARENTHESIS"));
SPEC_CHAR_TO_TEXT.put("+", new String("PLUS_SIGN"));
SPEC_CHAR_TO_TEXT.put("=", new String("EQUALS_SIGN"));
SPEC_CHAR_TO_TEXT.put("{", new String("LEFT_BRACKET"));
SPEC_CHAR_TO_TEXT.put("}", new String("RIGHT_BRACKET"));
SPEC_CHAR_TO_TEXT.put("[", new String("LEFT_BRACKET"));
SPEC_CHAR_TO_TEXT.put("]", new String("RIGHT_BRACKET"));
SPEC_CHAR_TO_TEXT.put(":", new String("COLON"));
SPEC_CHAR_TO_TEXT.put(";", new String("SEMICOLON"));
SPEC_CHAR_TO_TEXT.put("@", new String("AT_SIGN"));
SPEC_CHAR_TO_TEXT.put(",", new String("COMMA"));
SPEC_CHAR_TO_TEXT.put("?", new String("QUESTION_MARK"));
SPEC_CHAR_TO_TEXT.put("~", new String("TILDE"));
SPEC_CHAR_TO_TEXT.put("#", new String("HASH"));
SPEC_CHAR_TO_TEXT.put("<", new String("LESS_THAN"));
SPEC_CHAR_TO_TEXT.put(">", new String("GREATER_THAN"));
SPEC_CHAR_TO_TEXT.put("'", new String("APOSTROPHE"));
SPEC_CHAR_TO_TEXT.put("/", new String("FORWARD_SLASH"));
SPEC_CHAR_TO_TEXT.put("\\", new String("BACK_SLASH"));
SPEC_CHAR_TO_TEXT.put("*", new String("ASTERISK"));
}
/**
* Verifies names (bags, queries, etc) only contain A-Z, a-z, 0-9, underscores and
* dashes. And spaces. And dots.
* @param name Name of bag/query/template to be validated
* @return isValid Returns true if this name is correct, false if this name contains a bad char
*/
public static boolean isValidName(String name) {
return validateName(name, true);
}
private static boolean validateName(String name, boolean specialChars) {
if (StringUtils.isBlank(name)) {
return false;
}
Matcher m = (specialChars ? SPECIAL_CHARS_PATTERN.matcher(name)
: NO_SPECIAL_CHARS_PATTERN.matcher(name));
return !m.find();
}
/**
* Verifies names (bags, queries, etc) only contain A-Z, a-z, 0-9, underscores and
* dashes.
* if specialChars boolean is TRUE, then dot and space are allowed. If specialChars is FALSE,
* it likely means the name is going to be handled by javascript, in URLS, etc and we don't
* want to have to encode it. eg. template name.
*
* @param name Name of bag/query/template to be validated
* @param specialChars if true, then special characters DOT and SPACE are allowed in name
* @return isValid Returns true if this name is correct, false if this name contains a bad char
*/
public static boolean isValidName(String name, boolean specialChars) {
return validateName(name, specialChars);
}
/**
* Takes a string and replaces special characters with the text value, e.g. it would change
* "a&b" to "a_AMPERSAND_b". This is used in the query/template imports to handle special
* characters.
* @param name Name of query/template
* @return rebuiltName Name of query/template with the special characters removed
*/
public static String replaceSpecialChars(String name) {
String tmp = name;
StringBuffer rebuiltName = new StringBuffer();
for (int i = 0; i < tmp.length(); i++) {
char c = tmp.charAt(i);
String str = String.valueOf(c);
if (!isValidName(str)) {
rebuiltName.append(getSpecCharToText(str));
} else {
rebuiltName.append(str);
}
}
return name.replaceAll("[^a-zA-Z 0-9]+", "");
}
/**
* Returns the word value of special characters (ie returns _AMPERSAND_ for &, etc).
* Used for the forced renaming of queries/templates in the query/template import.
*
* @param specialCharacter The special character, ie &
* @return wordEquivalent The special character's name, ie AMPERSAND
*/
private static String getSpecCharToText(String specialCharacter) {
String wordEquivalent = SPEC_CHAR_TO_TEXT.get(specialCharacter);
if (StringUtils.isEmpty(wordEquivalent)) {
wordEquivalent = "SPECIAL_CHARACTER_REMOVED";
}
wordEquivalent = "_" + wordEquivalent + "_";
return wordEquivalent;
}
/**
* Generate a new name for a list. Used in situations where the user has a new list without
* creating one via the upload form, e.g. when copying or posting a list from another site
* @param listName original name for the list
* @param listNames a list of all lists
* @return a unique name for the list
*/
public static String generateNewName(Set<String> listNames, String listName) {
int i = 1;
while (listNames.contains(listName + "_" + i)) {
i++;
}
return listName + "_" + i;
}
/**
* Checks that the name doesn't already exist and returns a numbered name if it does. Used in
* situations where prompting the user for a good name wouldn't work, eg. query import
* @param name the query or list name
* @param names list of current names
* @return a validated name for the query
*/
public static String validateName(Collection<String> names, String name) {
String newName = name.trim();
if (!isValidName(name)) {
newName = replaceSpecialChars(name);
}
if (names.contains(newName)) {
int i = 1;
while (true) {
String testName = newName + "_" + i;
if (!names.contains(testName)) {
return testName;
}
i++;
}
}
return newName;
}
/**
* Return a query name that isn't currently in use.
*
* @param savedQueries the Map of current saved queries
* @return the new query name
*/
public static String findNewQueryName(Set<String> savedQueries) {
return findNewQueryName(savedQueries, null);
}
/**
* Return a query name that isn't currently in use, returning the given name
* if it is available.
*
* @param savedQueries the Map of current saved queries
* @param name name to return if it's available
* @return the new query name
*/
public static String findNewQueryName(Set<String> savedQueries, String name) {
if (StringUtils.isNotEmpty(name) && !savedQueries.contains(name)) {
return name;
}
for (int i = 1;; i++) {
String testName = QUERY_NAME_PREFIX + i;
if (savedQueries == null || !savedQueries.contains(testName)) {
return testName;
}
}
}
}
|
JoeCarlson/intermine
|
intermine/api/main/src/org/intermine/api/util/NameUtil.java
|
Java
|
lgpl-2.1
| 8,687 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.service.cmr.action;
import java.util.Set;
import org.alfresco.api.AlfrescoPublicApi;
import org.alfresco.service.namespace.QName;
/**
* Rule action interface.
*
* @author Roy Wetherall
*/
@AlfrescoPublicApi
public interface ActionDefinition extends ParameterizedItemDefinition
{
/**
* Gets a list of the types that this action item is applicable for
*
* @return set of types never <tt>null</tt>
*/
public Set<QName> getApplicableTypes();
/**
* Get whether the basic action definition supports action tracking
* or not. This can be overridden for each {@link Action#getTrackStatus() action}
* but if not, this value is used. Defaults to <tt>false</tt>.
*
* @return <tt>true</tt> to track action execution status or <tt>false</tt> (default)
* to do no action tracking
*
* @since 3.4.1
*/
public boolean getTrackStatus();
}
|
Alfresco/alfresco-repository
|
src/main/java/org/alfresco/service/cmr/action/ActionDefinition.java
|
Java
|
lgpl-3.0
| 2,023 |
/*
* Copyright (C) 2015 higherfrequencytrading.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.openhft.chronicle.set;
import net.openhft.chronicle.hash.HashSegmentContext;
/**
* Context of {@link ChronicleSet}'s segment.
*
* @param <K> the key type of accessed {@code ChronicleSet}
* @param <R> the return type of {@link SetEntryOperations} specified for the queried set
* @see ChronicleSet#segmentContext(int)
*/
public interface SetSegmentContext<K, R>
extends HashSegmentContext<K, SetEntry<K>>, SetContext<K, R> {
}
|
lburgazzoli/Chronicle-Map
|
src/main/java/net/openhft/chronicle/set/SetSegmentContext.java
|
Java
|
lgpl-3.0
| 1,189 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.service.cmr.activities;
import java.io.Serializable;
public class FeedControl implements Serializable
{
private static final long serialVersionUID = -1934566916718472843L;
private String siteId;
private String appToolId;
public FeedControl(String siteId, String appToolId)
{
if (siteId == null)
{
siteId = "";
}
this.siteId = siteId;
if (appToolId == null)
{
appToolId = "";
}
this.appToolId = appToolId;
}
public String getSiteId()
{
return this.siteId;
}
public String getAppToolId()
{
return this.appToolId;
}
}
|
Alfresco/alfresco-repository
|
src/main/java/org/alfresco/service/cmr/activities/FeedControl.java
|
Java
|
lgpl-3.0
| 1,757 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file.strategy;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.builder.RouteBuilder;
public class FileIdempotentRenameReadLockTest extends FileIdempotentReadLockTest {
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("file:target/data/changed/in?initialDelay=0&delay=10&readLockCheckInterval=100&readLock=idempotent-rename&idempotentRepository=#myRepo")
.process(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
// we are in progress
int size = myRepo.getCacheSize();
assertTrue(size == 1 || size == 2);
}
}).to("mock:result");
}
};
}
}
|
DariusX/camel
|
core/camel-core/src/test/java/org/apache/camel/component/file/strategy/FileIdempotentRenameReadLockTest.java
|
Java
|
apache-2.0
| 1,846 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2012, Red Hat, Inc. and/or its affiliates, and individual
* contributors by the @authors tag. See the copyright.txt in the
* distribution for a full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.as.quickstarts.tasks;
import javax.ejb.Local;
/**
* Basic operations for manipulation with users
*
* @author Lukas Fryc
*
*/
@Local
public interface UserDao {
public User getForUsername(String username);
public void createUser(User user);
}
|
ejlp12/quickstart
|
tasks/src/main/java/org/jboss/as/quickstarts/tasks/UserDao.java
|
Java
|
apache-2.0
| 1,079 |
/*************************GO-LICENSE-START*********************************
* Copyright 2018 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*************************GO-LICENSE-END***********************************/
package com.thoughtworks.go.plugin.activation.test;
import com.thoughtworks.go.plugin.api.GoPlugin;
import com.thoughtworks.go.plugin.api.TestGoPluginExtensionPoint;
import com.thoughtworks.go.plugin.api.annotation.Extension;
@Extension
public class TestGoPluginExtensionThatImplementsTwoExtensionPoints extends DummyTestPlugin implements TestGoPluginExtensionPoint, GoPlugin {
}
|
varshavaradarajan/gocd
|
plugin-infra/go-plugin-infra/src/test/java/com/thoughtworks/go/plugin/activation/test/TestGoPluginExtensionThatImplementsTwoExtensionPoints.java
|
Java
|
apache-2.0
| 1,130 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.jdbc;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import com.google.auto.value.AutoValue;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.Random;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.transforms.Create;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.GroupByKey;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.Values;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PBegin;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PDone;
import org.apache.commons.dbcp2.BasicDataSource;
/**
* IO to read and write data on JDBC.
*
* <h3>Reading from JDBC datasource</h3>
*
* <p>JdbcIO source returns a bounded collection of {@code T} as a {@code PCollection<T>}. T is the
* type returned by the provided {@link RowMapper}.
*
* <p>To configure the JDBC source, you have to provide a {@link DataSourceConfiguration} using
* {@link DataSourceConfiguration#create(DataSource)} or
* {@link DataSourceConfiguration#create(String, String)} with either a
* {@link DataSource} (which must be {@link Serializable}) or the parameters needed to create it
* (driver class name and url). Optionally, {@link DataSourceConfiguration#withUsername(String)} and
* {@link DataSourceConfiguration#withPassword(String)} allows you to define DataSource username
* and password.
* For example:
*
* <pre>{@code
* pipeline.apply(JdbcIO.<KV<Integer, String>>read()
* .withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(
* "com.mysql.jdbc.Driver", "jdbc:mysql://hostname:3306/mydb")
* .withUsername("username")
* .withPassword("password"))
* .withQuery("select id,name from Person")
* .withRowMapper(new JdbcIO.RowMapper<KV<Integer, String>>() {
* public KV<Integer, String> mapRow(ResultSet resultSet) throws Exception {
* return KV.of(resultSet.getInt(1), resultSet.getString(2));
* }
* })
* }</pre>
*
* <p>Query parameters can be configured using a user-provided {@link StatementPreparator}.
* For example:</p>
*
* <pre>{@code
* pipeline.apply(JdbcIO.<KV<Integer, String>>read()
* .withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(
* "com.mysql.jdbc.Driver", "jdbc:mysql://hostname:3306/mydb",
* "username", "password"))
* .withQuery("select id,name from Person where name = ?")
* .withStatementPreparator(new JdbcIO.StatementPreparator() {
* public void setParameters(PreparedStatement preparedStatement) throws Exception {
* preparedStatement.setString(1, "Darwin");
* }
* })
* .withRowMapper(new JdbcIO.RowMapper<KV<Integer, String>>() {
* public KV<Integer, String> mapRow(ResultSet resultSet) throws Exception {
* return KV.of(resultSet.getInt(1), resultSet.getString(2));
* }
* })
* }</pre>
*
* <h3>Writing to JDBC datasource</h3>
*
* <p>JDBC sink supports writing records into a database. It writes a {@link PCollection} to the
* database by converting each T into a {@link PreparedStatement} via a user-provided {@link
* PreparedStatementSetter}.
*
* <p>Like the source, to configure the sink, you have to provide a {@link DataSourceConfiguration}.
*
* <pre>{@code
* pipeline
* .apply(...)
* .apply(JdbcIO.<KV<Integer, String>>write()
* .withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(
* "com.mysql.jdbc.Driver", "jdbc:mysql://hostname:3306/mydb")
* .withUsername("username")
* .withPassword("password"))
* .withStatement("insert into Person values(?, ?)")
* .withPreparedStatementSetter(new JdbcIO.PreparedStatementSetter<KV<Integer, String>>() {
* public void setParameters(KV<Integer, String> element, PreparedStatement query) {
* query.setInt(1, kv.getKey());
* query.setString(2, kv.getValue());
* }
* })
* }</pre>
*
* <p>NB: in case of transient failures, Beam runners may execute parts of JdbcIO.Write multiple
* times for fault tolerance. Because of that, you should avoid using {@code INSERT} statements,
* since that risks duplicating records in the database, or failing due to primary key conflicts.
* Consider using <a href="https://en.wikipedia.org/wiki/Merge_(SQL)">MERGE ("upsert")
* statements</a> supported by your database instead.
*/
public class JdbcIO {
/**
* Read data from a JDBC datasource.
*
* @param <T> Type of the data to be read.
*/
public static <T> Read<T> read() {
return new AutoValue_JdbcIO_Read.Builder<T>().build();
}
/**
* Write data to a JDBC datasource.
*
* @param <T> Type of the data to be written.
*/
public static <T> Write<T> write() {
return new AutoValue_JdbcIO_Write.Builder<T>().build();
}
private JdbcIO() {}
/**
* An interface used by {@link JdbcIO.Read} for converting each row of the {@link ResultSet} into
* an element of the resulting {@link PCollection}.
*/
public interface RowMapper<T> extends Serializable {
T mapRow(ResultSet resultSet) throws Exception;
}
/**
* A POJO describing a {@link DataSource}, either providing directly a {@link DataSource} or all
* properties allowing to create a {@link DataSource}.
*/
@AutoValue
public abstract static class DataSourceConfiguration implements Serializable {
@Nullable abstract String getDriverClassName();
@Nullable abstract String getUrl();
@Nullable abstract String getUsername();
@Nullable abstract String getPassword();
@Nullable abstract DataSource getDataSource();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setDriverClassName(String driverClassName);
abstract Builder setUrl(String url);
abstract Builder setUsername(String username);
abstract Builder setPassword(String password);
abstract Builder setDataSource(DataSource dataSource);
abstract DataSourceConfiguration build();
}
public static DataSourceConfiguration create(DataSource dataSource) {
checkArgument(dataSource != null, "DataSourceConfiguration.create(dataSource) called with "
+ "null data source");
checkArgument(dataSource instanceof Serializable,
"DataSourceConfiguration.create(dataSource) called with a dataSource not Serializable");
return new AutoValue_JdbcIO_DataSourceConfiguration.Builder()
.setDataSource(dataSource)
.build();
}
public static DataSourceConfiguration create(String driverClassName, String url) {
checkArgument(driverClassName != null,
"DataSourceConfiguration.create(driverClassName, url) called with null driverClassName");
checkArgument(url != null,
"DataSourceConfiguration.create(driverClassName, url) called with null url");
return new AutoValue_JdbcIO_DataSourceConfiguration.Builder()
.setDriverClassName(driverClassName)
.setUrl(url)
.build();
}
public DataSourceConfiguration withUsername(String username) {
return builder().setUsername(username).build();
}
public DataSourceConfiguration withPassword(String password) {
return builder().setPassword(password).build();
}
private void populateDisplayData(DisplayData.Builder builder) {
if (getDataSource() != null) {
builder.addIfNotNull(DisplayData.item("dataSource", getDataSource().getClass().getName()));
} else {
builder.addIfNotNull(DisplayData.item("jdbcDriverClassName", getDriverClassName()));
builder.addIfNotNull(DisplayData.item("jdbcUrl", getUrl()));
builder.addIfNotNull(DisplayData.item("username", getUsername()));
}
}
Connection getConnection() throws Exception {
if (getDataSource() != null) {
return (getUsername() != null)
? getDataSource().getConnection(getUsername(), getPassword())
: getDataSource().getConnection();
} else {
BasicDataSource basicDataSource = new BasicDataSource();
basicDataSource.setDriverClassName(getDriverClassName());
basicDataSource.setUrl(getUrl());
basicDataSource.setUsername(getUsername());
basicDataSource.setPassword(getPassword());
return basicDataSource.getConnection();
}
}
}
/**
* An interface used by the JdbcIO Write to set the parameters of the {@link PreparedStatement}
* used to setParameters into the database.
*/
public interface StatementPreparator extends Serializable {
void setParameters(PreparedStatement preparedStatement) throws Exception;
}
/** A {@link PTransform} to read data from a JDBC datasource. */
@AutoValue
public abstract static class Read<T> extends PTransform<PBegin, PCollection<T>> {
@Nullable abstract DataSourceConfiguration getDataSourceConfiguration();
@Nullable abstract String getQuery();
@Nullable abstract StatementPreparator getStatementPreparator();
@Nullable abstract RowMapper<T> getRowMapper();
@Nullable abstract Coder<T> getCoder();
abstract Builder<T> toBuilder();
@AutoValue.Builder
abstract static class Builder<T> {
abstract Builder<T> setDataSourceConfiguration(DataSourceConfiguration config);
abstract Builder<T> setQuery(String query);
abstract Builder<T> setStatementPreparator(StatementPreparator statementPreparator);
abstract Builder<T> setRowMapper(RowMapper<T> rowMapper);
abstract Builder<T> setCoder(Coder<T> coder);
abstract Read<T> build();
}
public Read<T> withDataSourceConfiguration(DataSourceConfiguration configuration) {
checkArgument(configuration != null, "JdbcIO.read().withDataSourceConfiguration"
+ "(configuration) called with null configuration");
return toBuilder().setDataSourceConfiguration(configuration).build();
}
public Read<T> withQuery(String query) {
checkArgument(query != null, "JdbcIO.read().withQuery(query) called with null query");
return toBuilder().setQuery(query).build();
}
public Read<T> withStatementPrepator(StatementPreparator statementPreparator) {
checkArgument(statementPreparator != null,
"JdbcIO.read().withStatementPreparator(statementPreparator) called "
+ "with null statementPreparator");
return toBuilder().setStatementPreparator(statementPreparator).build();
}
public Read<T> withRowMapper(RowMapper<T> rowMapper) {
checkArgument(rowMapper != null,
"JdbcIO.read().withRowMapper(rowMapper) called with null rowMapper");
return toBuilder().setRowMapper(rowMapper).build();
}
public Read<T> withCoder(Coder<T> coder) {
checkArgument(coder != null, "JdbcIO.read().withCoder(coder) called with null coder");
return toBuilder().setCoder(coder).build();
}
@Override
public PCollection<T> expand(PBegin input) {
return input
.apply(Create.of(getQuery()))
.apply(ParDo.of(new ReadFn<>(this))).setCoder(getCoder())
// generate a random key followed by a GroupByKey and then ungroup
// to prevent fusion
// see https://cloud.google.com/dataflow/service/dataflow-service-desc#preventing-fusion
// for details
.apply(ParDo.of(new DoFn<T, KV<Integer, T>>() {
private Random random;
@Setup
public void setup() {
random = new Random();
}
@ProcessElement
public void processElement(ProcessContext context) {
context.output(KV.of(random.nextInt(), context.element()));
}
}))
.apply(GroupByKey.<Integer, T>create())
.apply(Values.<Iterable<T>>create())
.apply(Flatten.<T>iterables());
}
@Override
public void validate(PBegin input) {
checkState(getQuery() != null,
"JdbcIO.read() requires a query to be set via withQuery(query)");
checkState(getRowMapper() != null,
"JdbcIO.read() requires a rowMapper to be set via withRowMapper(rowMapper)");
checkState(getCoder() != null,
"JdbcIO.read() requires a coder to be set via withCoder(coder)");
checkState(getDataSourceConfiguration() != null,
"JdbcIO.read() requires a DataSource configuration to be set via "
+ "withDataSourceConfiguration(dataSourceConfiguration)");
}
@Override
public void populateDisplayData(DisplayData.Builder builder) {
super.populateDisplayData(builder);
builder.add(DisplayData.item("query", getQuery()));
builder.add(DisplayData.item("rowMapper", getRowMapper().getClass().getName()));
builder.add(DisplayData.item("coder", getCoder().getClass().getName()));
getDataSourceConfiguration().populateDisplayData(builder);
}
/** A {@link DoFn} executing the SQL query to read from the database. */
static class ReadFn<T> extends DoFn<String, T> {
private JdbcIO.Read<T> spec;
private Connection connection;
private ReadFn(Read<T> spec) {
this.spec = spec;
}
@Setup
public void setup() throws Exception {
connection = spec.getDataSourceConfiguration().getConnection();
}
@ProcessElement
public void processElement(ProcessContext context) throws Exception {
String query = context.element();
try (PreparedStatement statement = connection.prepareStatement(query)) {
if (this.spec.getStatementPreparator() != null) {
this.spec.getStatementPreparator().setParameters(statement);
}
try (ResultSet resultSet = statement.executeQuery()) {
while (resultSet.next()) {
context.output(spec.getRowMapper().mapRow(resultSet));
}
}
}
}
@Teardown
public void teardown() throws Exception {
if (connection != null) {
connection.close();
}
}
}
}
/**
* An interface used by the JdbcIO Write to set the parameters of the {@link PreparedStatement}
* used to setParameters into the database.
*/
public interface PreparedStatementSetter<T> extends Serializable {
void setParameters(T element, PreparedStatement preparedStatement) throws Exception;
}
/** A {@link PTransform} to write to a JDBC datasource. */
@AutoValue
public abstract static class Write<T> extends PTransform<PCollection<T>, PDone> {
@Nullable abstract DataSourceConfiguration getDataSourceConfiguration();
@Nullable abstract String getStatement();
@Nullable abstract PreparedStatementSetter<T> getPreparedStatementSetter();
abstract Builder<T> toBuilder();
@AutoValue.Builder
abstract static class Builder<T> {
abstract Builder<T> setDataSourceConfiguration(DataSourceConfiguration config);
abstract Builder<T> setStatement(String statement);
abstract Builder<T> setPreparedStatementSetter(PreparedStatementSetter<T> setter);
abstract Write<T> build();
}
public Write<T> withDataSourceConfiguration(DataSourceConfiguration config) {
return toBuilder().setDataSourceConfiguration(config).build();
}
public Write<T> withStatement(String statement) {
return toBuilder().setStatement(statement).build();
}
public Write<T> withPreparedStatementSetter(PreparedStatementSetter<T> setter) {
return toBuilder().setPreparedStatementSetter(setter).build();
}
@Override
public PDone expand(PCollection<T> input) {
input.apply(ParDo.of(new WriteFn<T>(this)));
return PDone.in(input.getPipeline());
}
@Override
public void validate(PCollection<T> input) {
checkArgument(getDataSourceConfiguration() != null,
"JdbcIO.write() requires a configuration to be set via "
+ ".withDataSourceConfiguration(configuration)");
checkArgument(getStatement() != null,
"JdbcIO.write() requires a statement to be set via .withStatement(statement)");
checkArgument(getPreparedStatementSetter() != null,
"JdbcIO.write() requires a preparedStatementSetter to be set via "
+ ".withPreparedStatementSetter(preparedStatementSetter)");
}
private static class WriteFn<T> extends DoFn<T, Void> {
private static final int DEFAULT_BATCH_SIZE = 1000;
private final Write<T> spec;
private Connection connection;
private PreparedStatement preparedStatement;
private int batchCount;
public WriteFn(Write<T> spec) {
this.spec = spec;
}
@Setup
public void setup() throws Exception {
connection = spec.getDataSourceConfiguration().getConnection();
connection.setAutoCommit(false);
preparedStatement = connection.prepareStatement(spec.getStatement());
}
@StartBundle
public void startBundle(Context context) {
batchCount = 0;
}
@ProcessElement
public void processElement(ProcessContext context) throws Exception {
T record = context.element();
preparedStatement.clearParameters();
spec.getPreparedStatementSetter().setParameters(record, preparedStatement);
preparedStatement.addBatch();
batchCount++;
if (batchCount >= DEFAULT_BATCH_SIZE) {
finishBundle(context);
}
}
@FinishBundle
public void finishBundle(Context context) throws Exception {
if (batchCount > 0) {
preparedStatement.executeBatch();
connection.commit();
batchCount = 0;
}
}
@Teardown
public void teardown() throws Exception {
try {
if (preparedStatement != null) {
preparedStatement.close();
}
} finally {
if (connection != null) {
connection.close();
}
}
}
}
}
}
|
jasonkuster/beam
|
sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java
|
Java
|
apache-2.0
| 19,250 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package opennlp.tools.ml;
import java.io.IOException;
import java.util.Map;
import opennlp.tools.ml.model.SequenceClassificationModel;
import opennlp.tools.ml.model.SequenceStream;
public interface SequenceTrainer {
public static final String SEQUENCE_VALUE = "Sequence";
public void init(Map<String, String> trainParams, Map<String, String> reportMap);
public SequenceClassificationModel<String> train(SequenceStream events) throws IOException;
}
|
Eagles2F/opennlp
|
opennlp-tools/src/main/java/opennlp/tools/ml/SequenceTrainer.java
|
Java
|
apache-2.0
| 1,262 |
/*******************************************************************************
* Copyright 2014 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.ai.utils.random;
import com.badlogic.gdx.math.MathUtils;
/** @author davebaol */
public final class TriangularFloatDistribution extends FloatDistribution {
private final float low;
private final float high;
private final float mode;
public TriangularFloatDistribution (float high) {
this(-high, high);
}
public TriangularFloatDistribution (float low, float high) {
this(low, high, (high - low) * .5f);
}
public TriangularFloatDistribution (float low, float high, float mode) {
this.low = low;
this.high = high;
this.mode = mode;
}
@Override
public float nextFloat () {
if (-low == high && mode == 0) return MathUtils.randomTriangular(high); // It's faster
return MathUtils.randomTriangular(low, high, mode);
}
public float getLow () {
return low;
}
public float getHigh () {
return high;
}
public float getMode () {
return mode;
}
}
|
lanen/gdx-ai
|
gdx-ai/src/com/badlogic/gdx/ai/utils/random/TriangularFloatDistribution.java
|
Java
|
apache-2.0
| 1,665 |
/*
* Copyright 2008-2009 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.store;
import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import voldemort.client.RoutingTier;
import voldemort.serialization.SerializerDefinition;
import voldemort.store.slop.strategy.HintedHandoffStrategyType;
import voldemort.store.system.SystemStoreConstants;
import voldemort.utils.Utils;
import com.google.common.base.Objects;
/**
* The configuration information for a store.
*
*
*/
public class StoreDefinition implements Serializable {
private static final long serialVersionUID = 1;
private final String name;
private final String type;
private final String description;
private final SerializerDefinition keySerializer;
private final SerializerDefinition valueSerializer;
private final SerializerDefinition transformsSerializer;
private final RoutingTier routingPolicy;
private final int replicationFactor;
private final Integer preferredWrites;
private final int requiredWrites;
private final Integer preferredReads;
private final int requiredReads;
private final Integer retentionPeriodDays;
private final Integer retentionScanThrottleRate;
private final Integer retentionFrequencyDays;
private final String routingStrategyType;
private final String viewOf;
private final HashMap<Integer, Integer> zoneReplicationFactor;
private final Integer zoneCountReads;
private final Integer zoneCountWrites;
private final String valueTransformation;
private final String serializerFactory;
private final HintedHandoffStrategyType hintedHandoffStrategyType;
private final Integer hintPrefListSize;
private final List<String> owners;
private final long memoryFootprintMB;
public StoreDefinition(String name,
String type,
String description,
SerializerDefinition keySerializer,
SerializerDefinition valueSerializer,
SerializerDefinition transformsSerializer,
RoutingTier routingPolicy,
String routingStrategyType,
int replicationFactor,
Integer preferredReads,
int requiredReads,
Integer preferredWrites,
int requiredWrites,
String viewOfStore,
String valTrans,
HashMap<Integer, Integer> zoneReplicationFactor,
Integer zoneCountReads,
Integer zoneCountWrites,
Integer retentionDays,
Integer retentionThrottleRate,
Integer retentionFrequencyDays,
String factory,
HintedHandoffStrategyType hintedHandoffStrategyType,
Integer hintPrefListSize,
List<String> owners,
long memoryFootprintMB) {
this.name = Utils.notNull(name);
this.type = type;
this.description = description;
this.replicationFactor = replicationFactor;
this.preferredReads = preferredReads;
this.requiredReads = requiredReads;
this.preferredWrites = preferredWrites;
this.requiredWrites = requiredWrites;
this.routingPolicy = routingPolicy;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.transformsSerializer = transformsSerializer;
this.retentionPeriodDays = retentionDays;
this.retentionScanThrottleRate = retentionThrottleRate;
this.retentionFrequencyDays = retentionFrequencyDays;
this.memoryFootprintMB = memoryFootprintMB;
this.routingStrategyType = routingStrategyType;
this.viewOf = viewOfStore;
this.valueTransformation = valTrans;
this.zoneReplicationFactor = zoneReplicationFactor;
this.zoneCountReads = zoneCountReads;
this.zoneCountWrites = zoneCountWrites;
this.serializerFactory = factory;
this.hintedHandoffStrategyType = hintedHandoffStrategyType;
this.hintPrefListSize = hintPrefListSize;
this.owners = owners;
}
private void throwIllegalException(String errorMessage) {
throw new IllegalArgumentException(" Store '" + this.name + "'. Error: " + errorMessage);
}
protected void checkParameterLegality() {
// null checks
Utils.notNull(this.type);
Utils.notNull(routingPolicy);
Utils.notNull(keySerializer);
Utils.notNull(valueSerializer);
if(requiredReads < 1)
throwIllegalException("Cannot have a requiredReads number less than 1.");
else if(requiredReads > replicationFactor)
throwIllegalException("Cannot have more requiredReads then there are replicas.");
if(requiredWrites < 1)
throwIllegalException("Cannot have a requiredWrites number less than 1.");
else if(requiredWrites > replicationFactor)
throwIllegalException("Cannot have more requiredWrites then there are replicas.");
if(preferredWrites != null) {
if(preferredWrites < requiredWrites)
throwIllegalException("preferredWrites must be greater or equal to requiredWrites.");
if(preferredWrites > replicationFactor)
throwIllegalException("Cannot have more preferredWrites then there are replicas.");
}
if(preferredReads != null) {
if(preferredReads < requiredReads)
throwIllegalException("preferredReads must be greater or equal to requiredReads.");
if(preferredReads > replicationFactor)
throwIllegalException("Cannot have more preferredReads then there are replicas.");
}
if(retentionPeriodDays != null && retentionPeriodDays < 0)
throwIllegalException("Retention days must be non-negative.");
if(!SystemStoreConstants.isSystemStore(name) && zoneReplicationFactor != null
&& zoneReplicationFactor.size() != 0) {
if(zoneCountReads == null || zoneCountReads < 0)
throwIllegalException("Zone Counts reads must be non-negative / non-null");
if(zoneCountWrites == null || zoneCountWrites < 0)
throwIllegalException("Zone Counts writes must be non-negative");
int sumZoneReplicationFactor = 0;
int replicatingZones = 0;
for(Integer zoneId: zoneReplicationFactor.keySet()) {
int currentZoneRepFactor = zoneReplicationFactor.get(zoneId);
sumZoneReplicationFactor += currentZoneRepFactor;
if(currentZoneRepFactor > 0)
replicatingZones++;
}
if(replicatingZones <= 0) {
throwIllegalException("Cannot have no zones to replicate to. "
+ "Should have some positive zoneReplicationFactor");
}
// Check if sum of individual zones is equal to total replication
// factor
if(sumZoneReplicationFactor != replicationFactor) {
throwIllegalException("Sum total of zones (" + sumZoneReplicationFactor
+ ") does not match the total replication factor ("
+ replicationFactor + ")");
}
// Check if number of zone-count-reads and zone-count-writes are
// less than zones replicating to
if(zoneCountReads >= replicatingZones) {
throwIllegalException("Number of zones to block for while reading ("
+ zoneCountReads
+ ") should be less then replicating zones ("
+ replicatingZones + ")");
}
if(zoneCountWrites >= replicatingZones) {
throwIllegalException("Number of zones to block for while writing ("
+ zoneCountWrites
+ ") should be less then replicating zones ("
+ replicatingZones + ")");
}
}
}
public String getDescription() {
return this.description;
}
public String getSerializerFactory() {
return this.serializerFactory;
}
public boolean hasTransformsSerializer() {
return transformsSerializer != null;
}
public String getName() {
return name;
}
public int getRequiredWrites() {
return requiredWrites;
}
public SerializerDefinition getKeySerializer() {
return keySerializer;
}
public SerializerDefinition getValueSerializer() {
return valueSerializer;
}
public SerializerDefinition getTransformsSerializer() {
return transformsSerializer;
}
public RoutingTier getRoutingPolicy() {
return this.routingPolicy;
}
public int getReplicationFactor() {
return this.replicationFactor;
}
public String getRoutingStrategyType() {
return routingStrategyType;
}
public int getRequiredReads() {
return this.requiredReads;
}
public boolean hasPreferredWrites() {
return preferredWrites != null;
}
public int getPreferredWrites() {
return preferredWrites == null ? getRequiredWrites() : preferredWrites;
}
public int getPreferredReads() {
return preferredReads == null ? getRequiredReads() : preferredReads;
}
public boolean hasPreferredReads() {
return preferredReads != null;
}
public String getType() {
return type;
}
public boolean hasRetentionPeriod() {
return this.retentionPeriodDays != null && this.retentionPeriodDays > 0;
}
public Integer getRetentionDays() {
return this.retentionPeriodDays;
}
public boolean hasRetentionScanThrottleRate() {
return this.retentionScanThrottleRate != null;
}
public Integer getRetentionScanThrottleRate() {
return this.retentionScanThrottleRate;
}
public boolean hasRetentionFrequencyDays() {
return this.retentionFrequencyDays != null;
}
public Integer getRetentionFrequencyDays() {
return this.retentionFrequencyDays;
}
public boolean isView() {
return this.viewOf != null;
}
public String getViewTargetStoreName() {
return viewOf;
}
public boolean hasValueTransformation() {
return this.valueTransformation != null;
}
public String getValueTransformation() {
return valueTransformation;
}
public HashMap<Integer, Integer> getZoneReplicationFactor() {
return zoneReplicationFactor;
}
public Integer getZoneCountReads() {
return zoneCountReads;
}
public boolean hasZoneCountReads() {
return zoneCountReads != null;
}
public Integer getZoneCountWrites() {
return zoneCountWrites;
}
public boolean hasZoneCountWrites() {
return zoneCountWrites != null;
}
public HintedHandoffStrategyType getHintedHandoffStrategyType() {
return hintedHandoffStrategyType;
}
public boolean hasHintedHandoffStrategyType() {
return hintedHandoffStrategyType != null;
}
public Integer getHintPrefListSize() {
return hintPrefListSize;
}
public boolean hasHintPreflistSize() {
return hintPrefListSize != null;
}
public List<String> getOwners() {
return this.owners;
}
public long getMemoryFootprintMB() {
return this.memoryFootprintMB;
}
public boolean hasMemoryFootprint() {
return memoryFootprintMB != 0;
}
@Override
public boolean equals(Object o) {
if(this == o)
return true;
else if(o == null)
return false;
else if(!(o.getClass() == StoreDefinition.class))
return false;
StoreDefinition def = (StoreDefinition) o;
return getName().equals(def.getName())
&& getType().equals(def.getType())
&& getReplicationFactor() == def.getReplicationFactor()
&& getRequiredReads() == def.getRequiredReads()
&& Objects.equal(getPreferredReads(), def.getPreferredReads())
&& getRequiredWrites() == def.getRequiredWrites()
&& Objects.equal(getPreferredWrites(), def.getPreferredWrites())
&& getKeySerializer().equals(def.getKeySerializer())
&& getValueSerializer().equals(def.getValueSerializer())
&& Objects.equal(getTransformsSerializer() != null ? getTransformsSerializer()
: null,
def.getTransformsSerializer() != null ? def.getTransformsSerializer()
: null)
&& getRoutingPolicy() == def.getRoutingPolicy()
&& Objects.equal(getViewTargetStoreName(), def.getViewTargetStoreName())
&& Objects.equal(getValueTransformation() != null ? getValueTransformation().getClass()
: null,
def.getValueTransformation() != null ? def.getValueTransformation()
.getClass() : null)
&& Objects.equal(getZoneReplicationFactor() != null ? getZoneReplicationFactor().getClass()
: null,
def.getZoneReplicationFactor() != null ? def.getZoneReplicationFactor()
.getClass()
: null)
&& Objects.equal(getZoneCountReads(), def.getZoneCountReads())
&& Objects.equal(getZoneCountWrites(), def.getZoneCountWrites())
&& Objects.equal(getRetentionDays(), def.getRetentionDays())
&& Objects.equal(getRetentionScanThrottleRate(), def.getRetentionScanThrottleRate())
&& Objects.equal(getSerializerFactory() != null ? getSerializerFactory() : null,
def.getSerializerFactory() != null ? def.getSerializerFactory()
: null)
&& Objects.equal(getHintedHandoffStrategyType(), def.getHintedHandoffStrategyType())
&& Objects.equal(getHintPrefListSize(), def.getHintPrefListSize())
&& Objects.equal(getMemoryFootprintMB(), def.getMemoryFootprintMB());
}
@Override
public int hashCode() {
return Objects.hashCode(getName(),
getType(),
getDescription(),
getKeySerializer(),
getValueSerializer(),
getTransformsSerializer(),
getRoutingPolicy(),
getRoutingStrategyType(),
getReplicationFactor(),
getRequiredReads(),
getRequiredWrites(),
getPreferredReads(),
getPreferredWrites(),
getViewTargetStoreName(),
getValueTransformation() == null ? null
: getValueTransformation().getClass(),
getZoneReplicationFactor() == null ? null
: getZoneReplicationFactor().getClass(),
getZoneCountReads(),
getZoneCountWrites(),
getRetentionDays(),
getRetentionScanThrottleRate(),
getSerializerFactory(),
hasHintedHandoffStrategyType() ? getHintedHandoffStrategyType()
: null,
hasHintPreflistSize() ? getHintPrefListSize() : null,
getOwners(),
getMemoryFootprintMB());
}
@Override
public String toString() {
return "StoreDefinition(name = " + getName() + ", type = " + getType() + ", description = "
+ getDescription() + ", key-serializer = " + getKeySerializer()
+ ", value-serializer = " + getValueSerializer() + ", routing = "
+ getRoutingPolicy() + ", routing-strategy = " + getRoutingStrategyType()
+ ", replication = " + getReplicationFactor() + ", required-reads = "
+ getRequiredReads() + ", preferred-reads = " + getPreferredReads()
+ ", required-writes = " + getRequiredWrites() + ", preferred-writes = "
+ getPreferredWrites() + ", view-target = " + getViewTargetStoreName()
+ ", value-transformation = " + getValueTransformation() + ", retention-days = "
+ getRetentionDays() + ", throttle-rate = " + getRetentionScanThrottleRate()
+ ", zone-replication-factor = " + getZoneReplicationFactor()
+ ", zone-count-reads = " + getZoneCountReads() + ", zone-count-writes = "
+ getZoneCountWrites() + ", serializer factory = " + getSerializerFactory() + ")"
+ ", hinted-handoff-strategy = " + getHintedHandoffStrategyType()
+ ", hint-preflist-size = " + getHintPrefListSize() + ", owners = " + getOwners()
+ ", memory-footprint(MB)" + getMemoryFootprintMB() + ")";
}
}
|
cshaxu/voldemort
|
src/java/voldemort/store/StoreDefinition.java
|
Java
|
apache-2.0
| 18,959 |
//
// This path was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this path will be lost upon recompilation of the source schema.
// Generated on: 2010.06.14 at 12:38:27 PM CEST
//
package org.opencb.commons.bioformats.protein.uniprot.v135jaxb;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlType;
/**
* A location can be either a position or have
* both a begin and end.
* <p/>
* <p/>
* <p>Java class for locationType complex type.
* <p/>
* <p>The following schema fragment specifies the expected content contained within this class.
* <p/>
* <pre>
* <complexType name="locationType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <choice>
* <sequence>
* <element name="begin" type="{http://uniprot.org/uniprot}positionType"/>
* <element name="end" type="{http://uniprot.org/uniprot}positionType"/>
* </sequence>
* <element name="position" type="{http://uniprot.org/uniprot}positionType"/>
* </choice>
* <attribute name="sequence" type="{http://www.w3.org/2001/XMLSchema}string" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "locationType", propOrder = {
"begin",
"end",
"position"
})
public class LocationType {
protected PositionType begin;
protected PositionType end;
protected PositionType position;
@XmlAttribute
protected String sequence;
/**
* Gets the value of the begin property.
*
* @return possible object is
* {@link PositionType }
*/
public PositionType getBegin() {
return begin;
}
/**
* Sets the value of the begin property.
*
* @param value allowed object is
* {@link PositionType }
*/
public void setBegin(PositionType value) {
this.begin = value;
}
/**
* Gets the value of the end property.
*
* @return possible object is
* {@link PositionType }
*/
public PositionType getEnd() {
return end;
}
/**
* Sets the value of the end property.
*
* @param value allowed object is
* {@link PositionType }
*/
public void setEnd(PositionType value) {
this.end = value;
}
/**
* Gets the value of the position property.
*
* @return possible object is
* {@link PositionType }
*/
public PositionType getPosition() {
return position;
}
/**
* Sets the value of the position property.
*
* @param value allowed object is
* {@link PositionType }
*/
public void setPosition(PositionType value) {
this.position = value;
}
/**
* Gets the value of the sequence property.
*
* @return possible object is
* {@link String }
*/
public String getSequence() {
return sequence;
}
/**
* Sets the value of the sequence property.
*
* @param value allowed object is
* {@link String }
*/
public void setSequence(String value) {
this.sequence = value;
}
}
|
pawanpal01/java-common-libs
|
bioformats/src/main/java/org/opencb/commons/bioformats/protein/uniprot/v135jaxb/LocationType.java
|
Java
|
apache-2.0
| 3,597 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.wordpress.api.service.impl;
import java.util.List;
import org.apache.camel.component.wordpress.api.auth.WordpressBasicAuthentication;
import org.apache.camel.component.wordpress.api.model.Content;
import org.apache.camel.component.wordpress.api.model.Format;
import org.apache.camel.component.wordpress.api.model.Post;
import org.apache.camel.component.wordpress.api.model.PostSearchCriteria;
import org.apache.camel.component.wordpress.api.service.WordpressServicePosts;
import org.apache.camel.component.wordpress.api.test.WordpressMockServerTestSupport;
import org.apache.camel.component.wordpress.api.test.WordpressServerHttpRequestHandler;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.greaterThan;
import static org.junit.Assert.assertThat;
public class WordpressServicePostsAdapterTest extends WordpressMockServerTestSupport {
private static WordpressServicePosts servicePosts;
@BeforeClass
public static void before() {
servicePosts = serviceProvider.getService(WordpressServicePosts.class);
servicePosts.setWordpressAuthentication(new WordpressBasicAuthentication(WordpressServerHttpRequestHandler.USERNAME, WordpressServerHttpRequestHandler.PASSWORD));
}
@Test
public void testRetrievePost() {
final Post post = servicePosts.retrieve(1);
assertThat(post, not(nullValue()));
assertThat(post.getId(), is(greaterThan(0)));
}
@Test
public void testCreatePost() {
final Post entity = new Post();
entity.setAuthor(2);
entity.setTitle(new Content("hello from postman 2"));
entity.setContent(new Content("hello world 2"));
entity.setFormat(Format.standard);
final Post post = servicePosts.create(entity);
assertThat(post, not(nullValue()));
assertThat(post.getId(), is(9));
}
@Test
public void testListPosts() {
final PostSearchCriteria criteria = new PostSearchCriteria();
criteria.setPage(1);
criteria.setPerPage(10);
final List<Post> posts = servicePosts.list(criteria);
assertThat(posts, is(not(emptyCollectionOf(Post.class))));
assertThat(posts.size(), is(10));
}
}
|
DariusX/camel
|
components/camel-wordpress/src/test/java/org/apache/camel/component/wordpress/api/service/impl/WordpressServicePostsAdapterTest.java
|
Java
|
apache-2.0
| 3,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.security.access;
import static org.apache.hadoop.hbase.security.access.Permission.Action.READ;
import static org.apache.hadoop.hbase.security.access.Permission.Action.WRITE;
import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl;
import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl;
import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ SecurityTests.class, LargeTests.class })
public class TestSnapshotScannerHDFSAclController {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSnapshotScannerHDFSAclController.class);
@Rule
public TestName name = new TestName();
private static final Logger LOG =
LoggerFactory.getLogger(TestSnapshotScannerHDFSAclController.class);
private static final String UN_GRANT_USER = "un_grant_user";
private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static Configuration conf = TEST_UTIL.getConfiguration();
private static Admin admin = null;
private static FileSystem FS = null;
private static Path rootDir = null;
private static User unGrantUser = null;
private static SnapshotScannerHDFSAclHelper helper;
private static Table aclTable;
@BeforeClass
public static void setupBeforeClass() throws Exception {
// enable hdfs acl and set umask to 027
conf.setBoolean("dfs.namenode.acls.enabled", true);
conf.set("fs.permissions.umask-mode", "027");
// enable hbase hdfs acl feature
conf.setBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, true);
// enable secure
conf.set(User.HBASE_SECURITY_CONF_KEY, "simple");
conf.set(SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_TMP_DIR,
SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_TMP_DIR_DEFAULT);
SecureTestUtil.enableSecurity(conf);
// add SnapshotScannerHDFSAclController coprocessor
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + ","
+ SnapshotScannerHDFSAclController.class.getName());
TEST_UTIL.startMiniCluster();
SnapshotScannerHDFSAclController coprocessor = TEST_UTIL.getHBaseCluster().getMaster()
.getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class);
TEST_UTIL.waitFor(30000, () -> coprocessor.checkInitialized("check initialized"));
TEST_UTIL.waitTableAvailable(PermissionStorage.ACL_TABLE_NAME);
admin = TEST_UTIL.getAdmin();
rootDir = TEST_UTIL.getDefaultRootDirPath();
FS = rootDir.getFileSystem(conf);
unGrantUser = User.createUserForTesting(conf, UN_GRANT_USER, new String[] {});
helper = new SnapshotScannerHDFSAclHelper(conf, admin.getConnection());
// set hbase directory permission
FsPermission commonDirectoryPermission =
new FsPermission(conf.get(SnapshotScannerHDFSAclHelper.COMMON_DIRECTORY_PERMISSION,
SnapshotScannerHDFSAclHelper.COMMON_DIRECTORY_PERMISSION_DEFAULT));
Path path = rootDir;
while (path != null) {
FS.setPermission(path, commonDirectoryPermission);
path = path.getParent();
}
// set restore directory permission
Path restoreDir = new Path(SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_TMP_DIR_DEFAULT);
if (!FS.exists(restoreDir)) {
FS.mkdirs(restoreDir);
FS.setPermission(restoreDir,
new FsPermission(
conf.get(SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
}
path = restoreDir.getParent();
while (path != null) {
FS.setPermission(path, commonDirectoryPermission);
path = path.getParent();
}
aclTable = admin.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
private void snapshotAndWait(final String snapShotName, final TableName tableName)
throws Exception{
admin.snapshot(snapShotName, tableName);
LOG.info("Sleep for three seconds, waiting for HDFS Acl setup");
Threads.sleep(3000);
}
@Test
public void testGrantGlobal1() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "s1";
String snapshot2 = namespace + "s2";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot1, table);
// grant G(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName));
// grant G(W) with merging existing permissions
admin.grant(
new UserPermission(grantUserName, Permission.newBuilder().withActions(WRITE).build()), true);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName));
// grant G(W) without merging
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
// grant G(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
// take a snapshot and ACLs are inherited automatically
snapshotAndWait(snapshot2, table);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6);
assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName));
deleteTable(table);
}
@Test
public void testGrantGlobal2() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace1 = name.getMethodName();
TableName table1 = TableName.valueOf(namespace1, name.getMethodName() + ".1");
String namespace2 = namespace1 + "2";
TableName table2 = TableName.valueOf(namespace2, name.getMethodName() + ".2");
String snapshot1 = namespace1 + "s1";
String snapshot2 = namespace2 + "s2";
// grant G(R), grant namespace1(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
// create table in namespace1 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
admin.grant(new UserPermission(grantUserName,
Permission.newBuilder(namespace1).withActions(READ).build()),
false);
// grant G(W)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE);
// create table in namespace2 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2);
snapshotAndWait(snapshot2, table2);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace1));
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace2));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace1), grantUserName, true, true);
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace2), grantUserName, false, false);
deleteTable(table1);
deleteTable(table2);
}
@Test
public void testGrantGlobal3() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName() + ".1");
TableName table2 = TableName.valueOf(namespace, name.getMethodName() + ".2");
String snapshot1 = namespace + "s1";
String snapshot2 = namespace + "s2";
// grant G(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
// grant table1(R)
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ);
// grant G(W)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE);
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2);
snapshotAndWait(snapshot2, table2);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table2));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
checkUserAclEntry(FS, helper.getTableRootPaths(table2, false), grantUserName, false, false);
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, true, true);
deleteTable(table1);
deleteTable(table2);
}
@Test
public void testGrantNamespace1() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName() + ".1");
TableName table2 = TableName.valueOf(namespace, name.getMethodName() + ".2");
String snapshot1 = namespace + "s1";
String snapshot2 = namespace + "s2";
// create table1 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
// grant N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
// create table2 and snapshot, ACLs can be inherited automatically
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2);
snapshotAndWait(snapshot2, table2);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, unGrantUser, snapshot1, -1);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, true);
// grant N(W)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, false, false);
deleteTable(table1);
deleteTable(table2);
}
@Test
public void testGrantNamespace2() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "s1";
// create table1 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
// grant N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
// grant table1(R)
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ);
// grant N(W)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, false);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, true, true);
deleteTable(table1);
}
@Test
public void testGrantNamespace3() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
// create table1 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
// grant namespace(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
// grant global(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
// grant namespace(W)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, true);
assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, true, true);
deleteTable(table);
}
@Test
public void testGrantTable() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "s1";
String snapshot2 = namespace + "s2";
LOG.info("Create table");
try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table1)) {
TestHDFSAclHelper.put(t);
snapshotAndWait(snapshot1, table1);
// table owner can scan table snapshot
LOG.info("Scan snapshot");
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL,
User.createUserForTesting(conf, "owner", new String[] {}), snapshot1, 6);
// grant table1 family(R)
SecureTestUtil.grantOnTable(TEST_UTIL, grantUserName, table1, TestHDFSAclHelper.COLUMN1, null,
READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
// grant table1(R)
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ);
TestHDFSAclHelper.put2(t);
snapshotAndWait(snapshot2, table1);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 10);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, true, true);
}
// grant table1(W) with merging existing permissions
admin.grant(
new UserPermission(grantUserName, Permission.newBuilder(table1).withActions(WRITE).build()),
true);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, true, true);
// grant table1(W) without merging existing permissions
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, WRITE);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, false, false);
deleteTable(table1);
}
@Test
public void testGrantMobTable() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "s1";
try (Table t = TestHDFSAclHelper.createMobTable(TEST_UTIL, table)) {
TestHDFSAclHelper.put(t);
snapshotAndWait(snapshot, table);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true);
}
deleteTable(table);
}
@Test
public void testRevokeGlobal1() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
deleteTable(table1);
}
@Test
public void testRevokeGlobal2() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
String snapshot1 = namespace + "s1";
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
// grant G(R), grant N(R), grant T(R) -> revoke G(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ);
SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, true);
deleteTable(table1);
}
@Test
public void testRevokeGlobal3() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
// grant G(R), grant T(R) -> revoke G(R)
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ);
SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, false);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1));
checkUserAclEntry(FS, helper.getTableRootPaths(table1, false), grantUserName, true, true);
deleteTable(table1);
}
@Test
public void testRevokeNamespace1() throws Exception {
String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table1 = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "s1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
snapshotAndWait(snapshot1, table1);
// revoke N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(namespace).build()));
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, false, false);
// grant N(R), grant G(R) -> revoke N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(namespace).build()));
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, true);
deleteTable(table1);
}
@Test
public void testRevokeNamespace2() throws Exception {
String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "s1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
// grant N(R), grant T(R) -> revoke N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
SecureTestUtil.revokeFromNamespace(TEST_UTIL, grantUserName, namespace, READ);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, false);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false),
grantUserName, true, true);
deleteTable(table);
}
@Test
public void testRevokeTable1() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
// grant T(R) -> revoke table family
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
SecureTestUtil.revokeFromTable(TEST_UTIL, grantUserName, table, TestHDFSAclHelper.COLUMN1, null,
READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
// grant T(R) -> revoke T(R)
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build()));
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1);
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, false, false);
deleteTable(table);
}
@Test
public void testRevokeTable2() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
// grant T(R), grant N(R) -> revoke T(R)
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build()));
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, true);
deleteTable(table);
}
@Test
public void testRevokeTable3() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
// grant T(R), grant G(R) -> revoke T(R)
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build()));
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true);
assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, true, true);
deleteTable(table);
}
@Test
public void testTruncateTable() throws Exception {
String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String grantUserName2 = grantUserName + "2";
User grantUser2 = User.createUserForTesting(conf, grantUserName2, new String[] {});
String namespace = name.getMethodName();
TableName tableName = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "s1";
String snapshot2 = namespace + "s2";
try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, tableName)) {
TestHDFSAclHelper.put(t);
// snapshot
snapshotAndWait(snapshot, tableName);
// grant user2 namespace permission
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName2, namespace, READ);
// grant user table permission
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, tableName, READ);
// truncate table
admin.disableTable(tableName);
admin.truncateTable(tableName, true);
TestHDFSAclHelper.put2(t);
// snapshot
snapshotAndWait(snapshot2, tableName);
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 9);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot2, 9);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName2, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName2, true, true);
assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, tableName));
checkUserAclEntry(FS, helper.getTableRootPaths(tableName, false), grantUserName, true, true);
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, false);
}
deleteTable(tableName);
}
@Test
public void testDeleteTable() throws Exception {
String namespace = name.getMethodName();
String grantUserName1 = namespace + "1";
String grantUserName2 = namespace + "2";
User grantUser1 = User.createUserForTesting(conf, grantUserName1, new String[] {});
User grantUser2 = User.createUserForTesting(conf, grantUserName2, new String[] {});
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot1 = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
// snapshot
snapshotAndWait(snapshot1, table);
// grant user table permission
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName1, table, READ);
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName2, namespace, READ);
// delete table
admin.disableTable(table);
admin.deleteTable(table);
// grantUser2 and grantUser3 should have data/ns acl
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser1, snapshot1, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot1, 6);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName2, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName2, true, true);
assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName1, table));
checkUserAclEntry(FS, helper.getPathHelper().getDataTableDir(table), grantUserName1, false,
false);
checkUserAclEntry(FS, helper.getPathHelper().getMobTableDir(table), grantUserName1, false,
false);
checkUserAclEntry(FS, helper.getPathHelper().getArchiveTableDir(table), grantUserName1, true,
false);
// check tmp table directory does not exist
Path tmpTableDir = helper.getPathHelper().getTmpTableDir(table);
assertFalse(FS.exists(tmpTableDir));
deleteTable(table);
}
@Test
public void testDeleteNamespace() throws Exception {
String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
// snapshot
snapshotAndWait(snapshot, table);
// grant namespace permission
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
// delete table
admin.disableTable(table);
admin.deleteTable(table);
// delete namespace
admin.deleteNamespace(namespace);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace));
checkUserAclEntry(FS, helper.getPathHelper().getArchiveNsDir(namespace), grantUserName, true,
false);
// check tmp namespace dir does not exist
assertFalse(FS.exists(helper.getPathHelper().getTmpNsDir(namespace)));
assertFalse(FS.exists(helper.getPathHelper().getDataNsDir(namespace)));
// assertFalse(fs.exists(FS, helper.getPathHelper().getMobDataNsDir(namespace)));
deleteTable(table);
}
@Test
public void testCleanArchiveTableDir() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
// HFileCleaner will not delete archive table directory even if it's a empty directory
HFileCleaner cleaner = TEST_UTIL.getHBaseCluster().getMaster().getHFileCleaner();
cleaner.choreForTesting();
Path archiveTableDir = HFileArchiveUtil.getTableArchivePath(rootDir, table);
assertTrue(FS.exists(archiveTableDir));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true);
// Check SnapshotScannerHDFSAclCleaner method
assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveTableDir(archiveTableDir));
assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveNamespaceDir(archiveTableDir.getParent()));
assertTrue(
SnapshotScannerHDFSAclCleaner.isArchiveDataDir(archiveTableDir.getParent().getParent()));
assertFalse(SnapshotScannerHDFSAclCleaner
.isArchiveDataDir(archiveTableDir.getParent().getParent().getParent()));
deleteTable(table);
}
@Test
public void testModifyTable1() throws Exception {
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName());
String snapshot = namespace + "t1";
String tableUserName = name.getMethodName();
User tableUser = User.createUserForTesting(conf, tableUserName, new String[] {});
String tableUserName2 = tableUserName + "2";
User tableUser2 = User.createUserForTesting(conf, tableUserName2, new String[] {});
String tableUserName3 = tableUserName + "3";
User tableUser3 = User.createUserForTesting(conf, tableUserName3, new String[] {});
String nsUserName = tableUserName + "-ns";
User nsUser = User.createUserForTesting(conf, nsUserName, new String[] {});
String globalUserName = tableUserName + "-global";
User globalUser = User.createUserForTesting(conf, globalUserName, new String[] {});
String globalUserName2 = tableUserName + "-global-2";
User globalUser2 = User.createUserForTesting(conf, globalUserName2, new String[] {});
SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName, READ);
TestHDFSAclHelper.createNamespace(TEST_UTIL, namespace);
SecureTestUtil.grantOnNamespace(TEST_UTIL, nsUserName, namespace, READ);
TableDescriptor td = TestHDFSAclHelper.createUserScanSnapshotDisabledTable(TEST_UTIL, table);
snapshotAndWait(snapshot, table);
SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName2, READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName, table, READ);
SecureTestUtil.grantOnTable(TEST_UTIL, tableUserName2, table, TestHDFSAclHelper.COLUMN1, null,
READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName3, table, WRITE);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser2, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser3, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, nsUser, snapshot, -1);
// Global permission is set before table is created, the acl is inherited
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser, snapshot, 6);
// Global permission is set after table is created, the table dir acl is skip
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser2, snapshot, -1);
// enable user scan snapshot
admin.modifyTable(TableDescriptorBuilder.newBuilder(td)
.setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build());
// check scan snapshot
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser, snapshot, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser2, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser3, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, nsUser, snapshot, 6);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser, snapshot, 6);
// check acl table storage and ACLs in dirs
assertTrue(hasUserGlobalHdfsAcl(aclTable, globalUserName));
checkUserAclEntry(FS, helper.getGlobalRootPaths(), globalUserName, true, true);
assertTrue(hasUserNamespaceHdfsAcl(aclTable, nsUserName, namespace));
checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), nsUserName, true, true);
assertTrue(hasUserTableHdfsAcl(aclTable, tableUserName, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), tableUserName, true, true);
for (String user : new String[] { tableUserName2, tableUserName3 }) {
assertFalse(hasUserTableHdfsAcl(aclTable, user, table));
checkUserAclEntry(FS, helper.getTableRootPaths(table, false), user, false, false);
}
deleteTable(table);
}
@Test
public void testModifyTable2() throws Exception {
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName() + ".1");
String snapshot = namespace + "t1";
TableName table2 = TableName.valueOf(namespace, name.getMethodName() + ".2");
String tableUserName = name.getMethodName();
User tableUser = User.createUserForTesting(conf, tableUserName, new String[] {});
String tableUserName2 = tableUserName + "2";
User tableUser2 = User.createUserForTesting(conf, tableUserName2, new String[] {});
String tableUserName3 = tableUserName + "3";
User tableUser3 = User.createUserForTesting(conf, tableUserName3, new String[] {});
String nsUserName = tableUserName + "-ns";
User nsUser = User.createUserForTesting(conf, nsUserName, new String[] {});
String globalUserName = tableUserName + "-global";
User globalUser = User.createUserForTesting(conf, globalUserName, new String[] {});
String globalUserName2 = tableUserName + "-global-2";
User globalUser2 = User.createUserForTesting(conf, globalUserName2, new String[] {});
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName, READ);
SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName2, READ);
SecureTestUtil.grantOnNamespace(TEST_UTIL, nsUserName, namespace, READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName, table, READ);
SecureTestUtil.grantOnTable(TEST_UTIL, tableUserName2, table, TestHDFSAclHelper.COLUMN1, null,
READ);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName3, table, WRITE);
SecureTestUtil.grantOnNamespace(TEST_UTIL, tableUserName2, namespace, READ);
TestHDFSAclHelper.createTable(TEST_UTIL, table2);
TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName3, table2, READ);
// disable user scan snapshot
admin.modifyTable(TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
.setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "false").build());
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser2, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser3, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, nsUser, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser, snapshot, -1);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser2, snapshot, -1);
// check access
String[] users = new String[] { globalUserName, globalUserName2, nsUserName, tableUserName,
tableUserName2, tableUserName3 };
for (Path path : helper.getTableRootPaths(table, false)) {
for (String user : users) {
checkUserAclEntry(FS, path, user, false, false);
}
}
String[] nsUsers = new String[] { globalUserName, globalUserName2, nsUserName };
for (Path path : helper.getNamespaceRootPaths(namespace)) {
checkUserAclEntry(FS, path, tableUserName, false, false);
checkUserAclEntry(FS, path, tableUserName2, true, true);
checkUserAclEntry(FS, path, tableUserName3, true, false);
for (String user : nsUsers) {
checkUserAclEntry(FS, path, user, true, true);
}
}
assertTrue(hasUserNamespaceHdfsAcl(aclTable, nsUserName, namespace));
assertTrue(hasUserNamespaceHdfsAcl(aclTable, tableUserName2, namespace));
assertFalse(hasUserTableHdfsAcl(aclTable, tableUserName, table));
deleteTable(table);
deleteTable(table2);
}
@Test
public void testRestartMaster() throws Exception {
final String grantUserName = name.getMethodName();
User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
String namespace = name.getMethodName();
TableName table = TableName.valueOf(namespace, name.getMethodName() + ".1");
TableName table2 = TableName.valueOf(namespace, name.getMethodName() + ".2");
String snapshot = namespace + "t1";
admin.createNamespace(NamespaceDescriptor.create(namespace).build());
// create table2
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2);
// make some region files in tmp dir and check if master archive these region correctly
Path tmpTableDir = helper.getPathHelper().getTmpTableDir(table2);
// make a empty region dir, this is an error region
FS.mkdirs(new Path(tmpTableDir, "1"));
// copy regions from data dir, this is a valid region
for (Path regionDir : FSUtils.getRegionDirs(FS,
helper.getPathHelper().getDataTableDir(table2))) {
FSUtils.copyFilesParallel(FS, regionDir, FS,
new Path(tmpTableDir, regionDir.getName() + "abc"), conf, 1);
}
assertEquals(4, FS.listStatus(tmpTableDir).length);
// grant N(R)
SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ);
// restart cluster and tmp directory will not be deleted
TEST_UTIL.getMiniHBaseCluster().shutdown();
TEST_UTIL.restartHBaseCluster(1);
TEST_UTIL.waitUntilNoRegionsInTransition();
// reset the cached configs after restart
conf = TEST_UTIL.getConfiguration();
admin = TEST_UTIL.getAdmin();
helper = new SnapshotScannerHDFSAclHelper(conf, admin.getConnection());
Path tmpNsDir = helper.getPathHelper().getTmpNsDir(namespace);
assertTrue(FS.exists(tmpNsDir));
// check all regions in tmp table2 dir are archived
assertEquals(0, FS.listStatus(tmpTableDir).length);
// create table1 and snapshot
TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table);
aclTable = TEST_UTIL.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME);
snapshotAndWait(snapshot, table);
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6);
deleteTable(table);
deleteTable(table2);
}
static void checkUserAclEntry(FileSystem fs, List<Path> paths, String user,
boolean requireAccessAcl, boolean requireDefaultAcl) throws Exception {
for (Path path : paths) {
checkUserAclEntry(fs, path, user, requireAccessAcl, requireDefaultAcl);
}
}
static void checkUserAclEntry(FileSystem fs, Path path, String userName, boolean requireAccessAcl,
boolean requireDefaultAcl) throws IOException {
boolean accessAclEntry = false;
boolean defaultAclEntry = false;
if (fs.exists(path)) {
for (AclEntry aclEntry : fs.getAclStatus(path).getEntries()) {
String user = aclEntry.getName();
if (user != null && user.equals(userName)) {
if (aclEntry.getScope() == AclEntryScope.DEFAULT) {
defaultAclEntry = true;
} else if (aclEntry.getScope() == AclEntryScope.ACCESS) {
accessAclEntry = true;
}
}
}
}
String message = "require user: " + userName + ", path: " + path.toString() + " acl";
assertEquals(message, requireAccessAcl, accessAclEntry);
assertEquals(message, requireDefaultAcl, defaultAclEntry);
}
static void deleteTable(TableName tableName) {
try {
admin.disableTable(tableName);
admin.deleteTable(tableName);
} catch (IOException e) {
LOG.warn("Failed to delete table: {}", tableName);
}
}
}
|
mahak/hbase
|
hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java
|
Java
|
apache-2.0
| 47,209 |
/*
* Copyright 2012-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.endpoint;
import java.util.Collection;
/**
* Discovers endpoints and provides an {@link EndpointInfo} for each of them.
*
* @param <T> the type of the operation
* @author Andy Wilkinson
* @author Stephane Nicoll
* @since 2.0.0
*/
@FunctionalInterface
public interface EndpointDiscoverer<T extends Operation> {
/**
* Perform endpoint discovery.
* @return the discovered endpoints
*/
Collection<EndpointInfo<T>> discoverEndpoints();
}
|
bbrouwer/spring-boot
|
spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/EndpointDiscoverer.java
|
Java
|
apache-2.0
| 1,115 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.cost;
import com.google.common.collect.ImmutableMap;
import io.prestosql.Session;
import io.prestosql.execution.warnings.WarningCollector;
import io.prestosql.metadata.Metadata;
import io.prestosql.security.AllowAllAccessControl;
import io.prestosql.spi.type.BigintType;
import io.prestosql.spi.type.DecimalType;
import io.prestosql.spi.type.IntegerType;
import io.prestosql.spi.type.SmallintType;
import io.prestosql.spi.type.TinyintType;
import io.prestosql.spi.type.Type;
import io.prestosql.sql.analyzer.ExpressionAnalyzer;
import io.prestosql.sql.analyzer.Scope;
import io.prestosql.sql.planner.ExpressionInterpreter;
import io.prestosql.sql.planner.NoOpSymbolResolver;
import io.prestosql.sql.planner.Symbol;
import io.prestosql.sql.planner.TypeAnalyzer;
import io.prestosql.sql.planner.TypeProvider;
import io.prestosql.sql.tree.ArithmeticBinaryExpression;
import io.prestosql.sql.tree.ArithmeticUnaryExpression;
import io.prestosql.sql.tree.AstVisitor;
import io.prestosql.sql.tree.Cast;
import io.prestosql.sql.tree.CoalesceExpression;
import io.prestosql.sql.tree.Expression;
import io.prestosql.sql.tree.FunctionCall;
import io.prestosql.sql.tree.Literal;
import io.prestosql.sql.tree.Node;
import io.prestosql.sql.tree.NodeRef;
import io.prestosql.sql.tree.NullLiteral;
import io.prestosql.sql.tree.SymbolReference;
import javax.inject.Inject;
import java.util.Map;
import java.util.OptionalDouble;
import static io.prestosql.cost.StatsUtil.toStatsRepresentation;
import static io.prestosql.sql.analyzer.ExpressionAnalyzer.createConstantAnalyzer;
import static io.prestosql.sql.planner.LiteralInterpreter.evaluate;
import static io.prestosql.util.MoreMath.max;
import static io.prestosql.util.MoreMath.min;
import static java.lang.Double.NaN;
import static java.lang.Double.isFinite;
import static java.lang.Double.isNaN;
import static java.lang.Math.abs;
import static java.util.Collections.emptyMap;
import static java.util.Objects.requireNonNull;
public class ScalarStatsCalculator
{
private final Metadata metadata;
private final TypeAnalyzer typeAnalyzer;
@Inject
public ScalarStatsCalculator(Metadata metadata, TypeAnalyzer typeAnalyzer)
{
this.metadata = requireNonNull(metadata, "metadata cannot be null");
this.typeAnalyzer = requireNonNull(typeAnalyzer, "typeAnalyzer is null");
}
public SymbolStatsEstimate calculate(Expression scalarExpression, PlanNodeStatsEstimate inputStatistics, Session session, TypeProvider types)
{
return new Visitor(inputStatistics, session, types).process(scalarExpression);
}
private class Visitor
extends AstVisitor<SymbolStatsEstimate, Void>
{
private final PlanNodeStatsEstimate input;
private final Session session;
private final TypeProvider types;
Visitor(PlanNodeStatsEstimate input, Session session, TypeProvider types)
{
this.input = input;
this.session = session;
this.types = types;
}
@Override
protected SymbolStatsEstimate visitNode(Node node, Void context)
{
return SymbolStatsEstimate.unknown();
}
@Override
protected SymbolStatsEstimate visitSymbolReference(SymbolReference node, Void context)
{
return input.getSymbolStatistics(Symbol.from(node));
}
@Override
protected SymbolStatsEstimate visitNullLiteral(NullLiteral node, Void context)
{
return nullStatsEstimate();
}
@Override
protected SymbolStatsEstimate visitLiteral(Literal node, Void context)
{
ExpressionAnalyzer analyzer = createConstantAnalyzer(metadata, new AllowAllAccessControl(), session, ImmutableMap.of(), WarningCollector.NOOP);
Type type = analyzer.analyze(node, Scope.create());
Object value = evaluate(metadata, session.toConnectorSession(), analyzer.getExpressionTypes(), node);
OptionalDouble doubleValue = toStatsRepresentation(metadata, session, type, value);
SymbolStatsEstimate.Builder estimate = SymbolStatsEstimate.builder()
.setNullsFraction(0)
.setDistinctValuesCount(1);
if (doubleValue.isPresent()) {
estimate.setLowValue(doubleValue.getAsDouble());
estimate.setHighValue(doubleValue.getAsDouble());
}
return estimate.build();
}
@Override
protected SymbolStatsEstimate visitFunctionCall(FunctionCall node, Void context)
{
Map<NodeRef<Expression>, Type> expressionTypes = getExpressionTypes(session, node, types);
ExpressionInterpreter interpreter = ExpressionInterpreter.expressionOptimizer(node, metadata, session, expressionTypes);
Object value = interpreter.optimize(NoOpSymbolResolver.INSTANCE);
if (value == null || value instanceof NullLiteral) {
return nullStatsEstimate();
}
if (value instanceof Expression && !(value instanceof Literal)) {
// value is not a constant
return SymbolStatsEstimate.unknown();
}
// value is a constant
return SymbolStatsEstimate.builder()
.setNullsFraction(0)
.setDistinctValuesCount(1)
.build();
}
private Map<NodeRef<Expression>, Type> getExpressionTypes(Session session, Expression expression, TypeProvider types)
{
ExpressionAnalyzer expressionAnalyzer = ExpressionAnalyzer.createWithoutSubqueries(
metadata,
new AllowAllAccessControl(),
session,
types,
emptyMap(),
node -> new IllegalStateException("Unexpected node: %s" + node),
WarningCollector.NOOP,
false);
expressionAnalyzer.analyze(expression, Scope.create());
return expressionAnalyzer.getExpressionTypes();
}
@Override
protected SymbolStatsEstimate visitCast(Cast node, Void context)
{
SymbolStatsEstimate sourceStats = process(node.getExpression());
// todo - make this general postprocessing rule.
double distinctValuesCount = sourceStats.getDistinctValuesCount();
double lowValue = sourceStats.getLowValue();
double highValue = sourceStats.getHighValue();
if (isIntegralType(typeAnalyzer.getType(session, types, node))) {
// todo handle low/high value changes if range gets narrower due to cast (e.g. BIGINT -> SMALLINT)
if (isFinite(lowValue)) {
lowValue = Math.round(lowValue);
}
if (isFinite(highValue)) {
highValue = Math.round(highValue);
}
if (isFinite(lowValue) && isFinite(highValue)) {
double integersInRange = highValue - lowValue + 1;
if (!isNaN(distinctValuesCount) && distinctValuesCount > integersInRange) {
distinctValuesCount = integersInRange;
}
}
}
return SymbolStatsEstimate.builder()
.setNullsFraction(sourceStats.getNullsFraction())
.setLowValue(lowValue)
.setHighValue(highValue)
.setDistinctValuesCount(distinctValuesCount)
.build();
}
private boolean isIntegralType(Type type)
{
if (type instanceof BigintType || type instanceof IntegerType || type instanceof SmallintType || type instanceof TinyintType) {
return true;
}
if (type instanceof DecimalType) {
return ((DecimalType) type).getScale() == 0;
}
return false;
}
@Override
protected SymbolStatsEstimate visitArithmeticUnary(ArithmeticUnaryExpression node, Void context)
{
SymbolStatsEstimate stats = process(node.getValue());
switch (node.getSign()) {
case PLUS:
return stats;
case MINUS:
return SymbolStatsEstimate.buildFrom(stats)
.setLowValue(-stats.getHighValue())
.setHighValue(-stats.getLowValue())
.build();
default:
throw new IllegalStateException("Unexpected sign: " + node.getSign());
}
}
@Override
protected SymbolStatsEstimate visitArithmeticBinary(ArithmeticBinaryExpression node, Void context)
{
requireNonNull(node, "node is null");
SymbolStatsEstimate left = process(node.getLeft());
SymbolStatsEstimate right = process(node.getRight());
SymbolStatsEstimate.Builder result = SymbolStatsEstimate.builder()
.setAverageRowSize(Math.max(left.getAverageRowSize(), right.getAverageRowSize()))
.setNullsFraction(left.getNullsFraction() + right.getNullsFraction() - left.getNullsFraction() * right.getNullsFraction())
.setDistinctValuesCount(min(left.getDistinctValuesCount() * right.getDistinctValuesCount(), input.getOutputRowCount()));
double leftLow = left.getLowValue();
double leftHigh = left.getHighValue();
double rightLow = right.getLowValue();
double rightHigh = right.getHighValue();
if (isNaN(leftLow) || isNaN(leftHigh) || isNaN(rightLow) || isNaN(rightHigh)) {
result.setLowValue(NaN)
.setHighValue(NaN);
}
else if (node.getOperator() == ArithmeticBinaryExpression.Operator.DIVIDE && rightLow < 0 && rightHigh > 0) {
result.setLowValue(Double.NEGATIVE_INFINITY)
.setHighValue(Double.POSITIVE_INFINITY);
}
else if (node.getOperator() == ArithmeticBinaryExpression.Operator.MODULUS) {
double maxDivisor = max(abs(rightLow), abs(rightHigh));
if (leftHigh <= 0) {
result.setLowValue(max(-maxDivisor, leftLow))
.setHighValue(0);
}
else if (leftLow >= 0) {
result.setLowValue(0)
.setHighValue(min(maxDivisor, leftHigh));
}
else {
result.setLowValue(max(-maxDivisor, leftLow))
.setHighValue(min(maxDivisor, leftHigh));
}
}
else {
double v1 = operate(node.getOperator(), leftLow, rightLow);
double v2 = operate(node.getOperator(), leftLow, rightHigh);
double v3 = operate(node.getOperator(), leftHigh, rightLow);
double v4 = operate(node.getOperator(), leftHigh, rightHigh);
double lowValue = min(v1, v2, v3, v4);
double highValue = max(v1, v2, v3, v4);
result.setLowValue(lowValue)
.setHighValue(highValue);
}
return result.build();
}
private double operate(ArithmeticBinaryExpression.Operator operator, double left, double right)
{
switch (operator) {
case ADD:
return left + right;
case SUBTRACT:
return left - right;
case MULTIPLY:
return left * right;
case DIVIDE:
return left / right;
case MODULUS:
return left % right;
default:
throw new IllegalStateException("Unsupported ArithmeticBinaryExpression.Operator: " + operator);
}
}
@Override
protected SymbolStatsEstimate visitCoalesceExpression(CoalesceExpression node, Void context)
{
requireNonNull(node, "node is null");
SymbolStatsEstimate result = null;
for (Expression operand : node.getOperands()) {
SymbolStatsEstimate operandEstimates = process(operand);
if (result != null) {
result = estimateCoalesce(result, operandEstimates);
}
else {
result = operandEstimates;
}
}
return requireNonNull(result, "result is null");
}
private SymbolStatsEstimate estimateCoalesce(SymbolStatsEstimate left, SymbolStatsEstimate right)
{
// Question to reviewer: do you have a method to check if fraction is empty or saturated?
if (left.getNullsFraction() == 0) {
return left;
}
else if (left.getNullsFraction() == 1.0) {
return right;
}
else {
return SymbolStatsEstimate.builder()
.setLowValue(min(left.getLowValue(), right.getLowValue()))
.setHighValue(max(left.getHighValue(), right.getHighValue()))
.setDistinctValuesCount(left.getDistinctValuesCount() +
min(right.getDistinctValuesCount(), input.getOutputRowCount() * left.getNullsFraction()))
.setNullsFraction(left.getNullsFraction() * right.getNullsFraction())
// TODO check if dataSize estimation method is correct
.setAverageRowSize(max(left.getAverageRowSize(), right.getAverageRowSize()))
.build();
}
}
}
private static SymbolStatsEstimate nullStatsEstimate()
{
return SymbolStatsEstimate.builder()
.setDistinctValuesCount(0)
.setNullsFraction(1)
.build();
}
}
|
treasure-data/presto
|
presto-main/src/main/java/io/prestosql/cost/ScalarStatsCalculator.java
|
Java
|
apache-2.0
| 14,817 |
/*
* Copyright 2007 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.test.metric;
import com.google.inject.Guice;
import com.google.inject.Injector;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
public class ConfigModuleTest extends AutoFieldClearTestCase {
private ByteArrayOutputStream out = new ByteArrayOutputStream();
private ByteArrayOutputStream err = new ByteArrayOutputStream();
private PrintStream outStream = new PrintStream(out);
private PrintStream errStream = new PrintStream(err);
public void testParseNoArgs() {
Guice.createInjector(new ConfigModule(new String[0], outStream, errStream)).
getInstance(CommandLineConfig.class);
String expectedStartOfError = "You must supply";
assertEquals(expectedStartOfError, err.toString().substring(0, expectedStartOfError.length()));
assertTrue(err.toString().indexOf("Exiting...") > -1);
}
public void testParseClasspathAndSingleClass() throws Exception {
Injector injector = Guice.createInjector(new ConfigModule(new String[]{
"-cp", "not/default/path", "com.google.TestClass"}, outStream, errStream));
injector.getInstance(CommandLineConfig.class);
CommandLineConfig commandLineConfig = injector.getInstance(CommandLineConfig.class);
assertEquals("", err.toString());
assertEquals("not/default/path", commandLineConfig.cp);
List<String> expectedArgs = new ArrayList<String>();
expectedArgs.add("com.google.TestClass");
assertNotNull(commandLineConfig.entryList);
assertEquals(expectedArgs, commandLineConfig.entryList);
}
public void testParseMultipleClassesAndPackages() throws Exception {
Injector injector = Guice.createInjector(new ConfigModule(new String[]{
"-cp", "not/default/path",
"com.google.FirstClass",
"com.google.second.package",
"com.google.third.package"}, outStream, errStream));
CommandLineConfig commandLineConfig = injector.getInstance(CommandLineConfig.class);
assertEquals("", err.toString());
assertEquals("not/default/path", commandLineConfig.cp);
List<String> expectedArgs = new ArrayList<String>();
expectedArgs.add("com.google.FirstClass");
expectedArgs.add("com.google.second.package");
expectedArgs.add("com.google.third.package");
assertNotNull(commandLineConfig.entryList);
assertEquals(expectedArgs, commandLineConfig.entryList);
}
/*
* If the main() method is called directly by another class,
* as in the case of the TestabilityTask for Ant,
* multiple classpaths may be passed as a single String arg
* separated by spaces (" ")
*/
public void testParseMultipleClassesAndPackagesSingleArg() throws Exception {
Injector injector = Guice.createInjector(new ConfigModule(new String[]{
"-cp", "not/default/path",
"com.google.FirstClass com.google.second.package com.google.third.package"},
outStream, errStream));
CommandLineConfig commandLineConfig = injector.getInstance(CommandLineConfig.class);
// TODO(alexeagle): this test is really testing the JavaTestabilityModule
new JavaTestabilityModule(commandLineConfig);
assertEquals("", err.toString());
assertEquals("not/default/path", commandLineConfig.cp);
List<String> expectedArgs = new ArrayList<String>();
expectedArgs.add("com.google.FirstClass");
expectedArgs.add("com.google.second.package");
expectedArgs.add("com.google.third.package");
assertNotNull(commandLineConfig.entryList);
assertEquals(expectedArgs, commandLineConfig.entryList);
}
public void testParseComplexityAndGlobal() throws Exception {
Injector injector = Guice.createInjector(new ConfigModule(new String[]{
"-cp", "not/default/path",
"-cyclomatic", "10",
"-global", "1",
"com.google.TestClass"},
outStream, errStream));
CommandLineConfig commandLineConfig = injector.getInstance(CommandLineConfig.class);
assertEquals("", err.toString());
assertEquals("Classpath", "not/default/path", commandLineConfig.cp);
List<String> expectedArgs = new ArrayList<String>();
expectedArgs.add("com.google.TestClass");
assertNotNull(commandLineConfig.entryList);
assertEquals(expectedArgs, commandLineConfig.entryList);
assertEquals("Cyclomatic", 10.0, commandLineConfig.cyclomaticMultiplier);
assertEquals("Global", 1.0, commandLineConfig.globalMultiplier);
}
public void testJarFileNoClasspath() throws Exception {
Guice.createInjector(new ConfigModule(new String[] {"junit.runner", "-cp"},
outStream, errStream)).getInstance(CommandLineConfig.class);
/**
* we expect the error to say something about proper usage of the arguments.
* The -cp needs a value
*/
assertTrue(out.toString().length() == 0);
assertTrue(err.toString().length() > 0);
}
public void testParseSrcFileUrlFlags() throws Exception {
String lineUrl = "http://code.google.com/p/testability-explorer/source/browse/trunk/src/{path}#{line}";
String fileUrl = "http://code.google.com/p/testability-explorer/source/browse/trunk/src/{path}";
Injector injector = Guice.createInjector(new ConfigModule(new String[]{
"-srcFileLineUrl", lineUrl, "-srcFileUrl", fileUrl},
outStream, errStream));
CommandLineConfig commandLineConfig = injector.getInstance(CommandLineConfig.class);
assertEquals(lineUrl, commandLineConfig.srcFileLineUrl);
assertEquals(fileUrl, commandLineConfig.srcFileUrl);
}
}
|
lutts/testability-explorer
|
testability-explorer/src/test/java/com/google/test/metric/ConfigModuleTest.java
|
Java
|
apache-2.0
| 6,159 |
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is part of dcm4che, an implementation of DICOM(TM) in
* Java(TM), available at http://sourceforge.net/projects/dcm4che.
*
* The Initial Developer of the Original Code is
* TIANI Medgraph AG.
* Portions created by the Initial Developer are Copyright (C) 2003-2005
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Gunter Zeilinger <gunter.zeilinger@tiani.com>
* Franz Willer <franz.willer@gwi-ag.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
package org.dcm4chex.archive.ejb.entity;
import java.sql.Timestamp;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import javax.ejb.CreateException;
import javax.ejb.EntityBean;
import javax.ejb.FinderException;
import javax.ejb.RemoveException;
import org.apache.log4j.Logger;
import org.dcm4chex.archive.common.FileStatus;
import org.dcm4chex.archive.ejb.interfaces.FileDTO;
import org.dcm4chex.archive.ejb.interfaces.FileSystemLocal;
import org.dcm4chex.archive.ejb.interfaces.InstanceLocal;
import org.dcm4chex.archive.ejb.interfaces.MD5;
/**
* @author <a href="mailto:gunter@tiani.com">Gunter Zeilinger</a>
* @version $Revision: 17779 $ $Date: 2013-05-08 19:11:00 +0800 (周三, 08 5月 2013) $
*
* @ejb.bean name="File" type="CMP" view-type="local" primkey-field="pk"
* local-jndi-name="ejb/File"
* @jboss.container-configuration name="Instance Per Transaction CMP 2.x EntityBean"
* @ejb.persistence table-name="files"
* @ejb.transaction type="Required"
* @jboss.entity-command name="hsqldb-fetch-key"
* @jboss.audit-created-time field-name="createdTime"
*
* @ejb.finder signature="java.util.Collection findFilesToCompress(long fspk, java.lang.String cuid, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToCompress(long fspk, java.lang.String cuid, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 0 AND f.fileTsuid IN ('1.2.840.10008.1.2','1.2.840.10008.1.2.1','1.2.840.10008.1.2.2') AND f.fileSystem.pk = ?1 AND f.instance.sopCuid = ?2 AND f.createdTime < ?3 LIMIT ?4"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findToCheckMd5(java.lang.String dirPath, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findToCheckMd5(java.lang.String dirPath, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileSystem.directoryPath = ?1 AND f.fileMd5Field IS NOT NULL AND (f.timeOfLastMd5Check IS NULL OR f.timeOfLastMd5Check < ?2) LIMIT ?3"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findByStatusAndFileSystem(java.lang.String dirPath, int status, java.sql.Timestamp notBefore, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findByStatusAndFileSystem(java.lang.String dirPath, int status, java.sql.Timestamp notBefore, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileSystem.directoryPath = ?1 AND f.fileStatus = ?2 AND f.createdTime >= ?3 AND f.createdTime < ?4 LIMIT ?5"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findByFileSystem(java.lang.String dirPath, int offset, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findByFileSystem(java.lang.String dirPath, int offset, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileSystem.directoryPath = ?1 OFFSET ?2 LIMIT ?3"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompress(java.lang.String fsGroupId, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompress(java.lang.String fsGroupId, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f.instance.sopCuid = ?2 AND f.instance.series.sourceAET = ?3 AND f.createdTime < ?4 LIMIT ?5"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompress(java.lang.String fsGroupId, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompress(java.lang.String fsGroupId, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f.instance.sopCuid = ?2 AND f.instance.series.bodyPartExamined = ?3 AND f.instance.series.sourceAET = ?4 AND f.createdTime < ?5 LIMIT ?6"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompressWithExternalRetrieveAET(java.lang.String fsGroupId, java.lang.String retrieveAET, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompressWithExternalRetrieveAET(java.lang.String fsGroupId, java.lang.String retrieveAET, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f.instance.externalRetrieveAET = ?2 AND f.instance.sopCuid = ?3 AND f.instance.series.sourceAET = ?4 AND f.createdTime < ?5 LIMIT ?6"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompressWithExternalRetrieveAET(java.lang.String fsGroupId, java.lang.String retrieveAET, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompressWithExternalRetrieveAET(java.lang.String fsGroupId, java.lang.String retrieveAET, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f.instance.externalRetrieveAET = ?2 AND f.instance.sopCuid = ?3 AND f.instance.series.bodyPartExamined = ?4 AND f.instance.series.sourceAET = ?5 AND f.createdTime < ?6 LIMIT ?7"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompressWithCopyOnOtherFileSystemGroup(java.lang.String fsGroupId, java.lang.String otherFSGroupId, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompressWithCopyOnOtherFileSystemGroup(java.lang.String fsGroupId, java.lang.String otherFSGroupId, java.lang.String cuid, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT DISTINCT OBJECT(f) FROM File AS f, IN (f.instance.files) AS f2 WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f2.fileSystem.groupID = ?2 AND f.instance.sopCuid = ?3 AND f.instance.series.sourceAET = ?4 AND f.createdTime < ?5 LIMIT ?6"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesToLossyCompressWithCopyOnOtherFileSystemGroup(java.lang.String fsGroupId, java.lang.String otherFSGroupId, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findFilesToLossyCompressWithCopyOnOtherFileSystemGroup(java.lang.String fsGroupId, java.lang.String otherFSGroupId, java.lang.String cuid, java.lang.String bodyPart, java.lang.String sourceAET, java.sql.Timestamp before, int limit)"
* query="SELECT DISTINCT OBJECT(f) FROM File AS f, IN (f.instance.files) AS f2 WHERE f.fileStatus = 0 AND f.fileTsuid NOT IN ('1.2.840.10008.1.2.4.50','1.2.840.10008.1.2.4.51','1.2.840.10008.1.2.4.81','1.2.840.10008.1.2.4.91') AND f.fileSystem.status IN (0,1) AND f.fileSystem.groupID = ?1 AND f2.fileSystem.groupID = ?2 AND f.instance.sopCuid = ?3 AND f.instance.series.bodyPartExamined = ?4 AND f.instance.series.sourceAET = ?5 AND f.createdTime < ?6 LIMIT ?7"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findToSyncArchived(java.lang.String fsPath, int limit)"
* query="" transaction-type="Supports"
* @jboss.query signature="java.util.Collection findToSyncArchived(java.lang.String fsPath, int limit)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = 2 AND f.instance.archived = false AND f.fileSystem.directoryPath = ?1 LIMIT ?2"
* strategy="on-find" eager-load-group="*"
* @ejb.finder signature="java.util.Collection findFilesOfTarFile(java.lang.String fsId, java.lang.String tarFilename)"
* query="SELECT OBJECT(f) FROM File AS f WHERE f.fileSystem.directoryPath = ?1 AND f.filePath LIKE ?2"
* transaction-type="Supports"
*
* @jboss.query
* signature="java.util.Set ejbSelectGeneric(java.lang.String jbossQl, java.lang.Object[] args)"
* dynamic="true"
* strategy="on-load"
* page-size="20"
* eager-load-group="*"
*
* @jboss.query
* signature="java.sql.Timestamp ejbSelectGenericTime(java.lang.String jbossQl, java.lang.Object[] args)"
* dynamic="true"
*
*/
public abstract class FileBean implements EntityBean {
private static final Logger log = Logger.getLogger(FileBean.class);
/**
* Auto-generated Primary Key
*
* @ejb.interface-method
* @ejb.pk-field
* @ejb.persistence column-name="pk"
* @jboss.persistence auto-increment="true"
*/
public abstract Long getPk();
public abstract void setPk(Long pk);
/**
* @ejb.interface-method
* @ejb.persistence column-name="created_time"
*/
public abstract java.sql.Timestamp getCreatedTime();
public abstract void setCreatedTime(java.sql.Timestamp time);
/**
* @ejb.interface-method
* @ejb.persistence column-name="md5_check_time"
*/
public abstract java.sql.Timestamp getTimeOfLastMd5Check();
/**
* @ejb.interface-method
*/
public abstract void setTimeOfLastMd5Check(java.sql.Timestamp time);
/**
* File Path (relative path to Directory).
*
* @ejb.interface-method
* @ejb.persistence column-name="filepath"
*/
public abstract String getFilePath();
/**
* @ejb.interface-method
*/
public abstract void setFilePath(String path);
/**
* Transfer Syntax UID
*
* @ejb.interface-method
* @ejb.persistence column-name="file_tsuid"
*/
public abstract String getFileTsuid();
/**
* @ejb.interface-method
*/
public abstract void setFileTsuid(String tsuid);
/**
* MD5 checksum as hex string
*
* @ejb.interface-method
* @ejb.persistence column-name="file_md5"
*/
public abstract String getFileMd5Field();
public abstract void setFileMd5Field(String md5);
/**
* @ejb.interface-method
* @ejb.persistence column-name="file_status"
*/
public abstract int getFileStatus();
/**
* @ejb.interface-method
*/
public abstract void setFileStatus(int status);
/**
* MD5 checksum in binary format
*
* @ejb.interface-method
*/
public byte[] getFileMd5() {
return MD5.toBytes(getFileMd5Field());
}
/**
* @ejb.interface-method
*/
public void setFileMd5(byte[] md5) {
setFileMd5Field(MD5.toString(md5));
}
/**
* File Size
*
* @ejb.interface-method
* @ejb.persistence column-name="file_size"
*/
public abstract long getFileSize();
/**
* @ejb.interface-method
*/
public abstract void setFileSize(long size);
/**
* @ejb.interface-method
* @ejb.relation name="instance-files"
* role-name="files-of-instance"
* cascade-delete="yes"
* @jboss.relation fk-column="instance_fk"
* related-pk-field="pk"
*/
public abstract void setInstance(InstanceLocal inst);
/**
* @ejb.interface-method
*/
public abstract InstanceLocal getInstance();
/**
* @ejb.interface-method
* @ejb.relation name="filesystem-files"
* role-name="files-of-filesystem"
* target-role-name="filesystem-of-file"
* target-ejb="FileSystem"
* target-multiple="yes"
* @jboss.relation fk-column="filesystem_fk"
* related-pk-field="pk"
*/
public abstract void setFileSystem(FileSystemLocal fs);
/**
* @ejb.interface-method
*/
public abstract FileSystemLocal getFileSystem();
/**
* @ejb.interface-method
*/
public boolean isRedundant() {
InstanceLocal inst = getInstance();
return inst == null || inst.getFiles().size() > 1;
}
/**
* @ejb.interface-method
*/
public FileDTO getFileDTO() {
FileSystemLocal fs = getFileSystem();
FileDTO retval = new FileDTO();
retval.setPk(getPk().longValue());
retval.setRetrieveAET(fs.getRetrieveAET());
retval.setFileSystemPk(fs.getPk().longValue());
retval.setFileSystemGroupID(fs.getGroupID());
retval.setDirectoryPath(fs.getDirectoryPath());
retval.setAvailability(fs.getAvailability());
retval.setUserInfo(fs.getUserInfo());
retval.setFilePath(getFilePath());
retval.setFileTsuid(getFileTsuid());
retval.setFileSize(getFileSize());
retval.setFileMd5(getFileMd5());
retval.setFileStatus(getFileStatus());
InstanceLocal inst = getInstance();
if (inst != null) {
retval.setSopInstanceUID(inst.getSopIuid());
retval.setSopClassUID(inst.getSopCuid());
retval.setExternalRetrieveAET(inst.getExternalRetrieveAET());
}
return retval;
}
/**
* @ejb.interface-method
*/
public String asString() {
return prompt();
}
private String prompt() {
return "File[pk=" + getPk() + ", filepath=" + getFilePath()
+ ", tsuid=" + getFileTsuid() + ", filesystem->"
+ getFileSystem() + ", inst->" + getInstance() + "]";
}
/**
* Create file.
*
* @ejb.create-method
*/
public Long ejbCreate(String path, String tsuid, long size, byte[] md5,
int status, InstanceLocal instance, FileSystemLocal filesystem)
throws CreateException {
setFilePath(path);
setFileTsuid(tsuid);
setFileSize(size);
setFileMd5(md5);
setFileStatus(status);
return null;
}
public void ejbPostCreate(String path, String tsuid, long size, byte[] md5,
int status, InstanceLocal instance, FileSystemLocal filesystem)
throws CreateException {
setInstance(instance);
setFileSystem(filesystem);
log.info("Created " + prompt());
}
public void ejbRemove() throws RemoveException {
log.info("Deleting " + prompt());
}
/**
* @ejb.select query=""
* transaction-type="Supports"
*/
public abstract Set ejbSelectGeneric(String jbossQl, Object[] args)
throws FinderException;
/**
* @ejb.select query=""
* transaction-type="Supports"
*/
public abstract Timestamp ejbSelectGenericTime(String jbossQl, Object[] args)
throws FinderException;
/**
* @ejb.home-method
*/
public Collection ejbHomeSelectByStatusAndFileSystem(List dirPath, int status,
Timestamp notBefore, Timestamp before, int limit) throws FinderException {
StringBuilder jbossQl = new StringBuilder()
.append("SELECT OBJECT(f) FROM File AS f WHERE f.fileStatus = ?1")
.append(" AND f.createdTime >= ?2 AND f.createdTime < ?3 AND f.fileSystem.directoryPath IN (");
Object[] args = new Object[dirPath.size()+4];
args[0] = status; args[1] = notBefore; args[2] = before;
int idx = 3;
for (int i = 1, len = dirPath.size(); i < len; i++) {
args[idx++] =dirPath.get(i);
jbossQl.append("?").append(idx).append(", ");
}
args[idx++] = dirPath.get(0);
jbossQl.append("?").append(idx).append(")");
args[idx++] = limit;
jbossQl.append(" LIMIT ?").append(idx);
return ejbSelectGeneric(jbossQl.toString(), args);
}
/**
* @ejb.home-method
*/
public Timestamp ejbHomeMinCreatedTimeOnFsWithFileStatus(List dirPath, int status) throws FinderException {
Object[] args = new Object[dirPath.size()+1];
StringBuilder jbossQl = new StringBuilder()
.append("SELECT MIN(f.createdTime) FROM File f WHERE f.fileSystem.directoryPath IN (?1");
args[0] = dirPath.get(0);
int i = 1;
for (int len = dirPath.size() ; i < len ; ) {
args[i] = dirPath.get(i);
jbossQl.append(",?").append(++i);
}
args[i] = status;
jbossQl.append(") AND f.fileStatus = ?").append(++i);
return ejbSelectGenericTime(jbossQl.toString(), args);
}
}
|
medicayun/medicayundicom
|
dcm4jboss-all/tags/DCM4CHEE_2_18_4/dcm4jboss-ejb/src/java/org/dcm4chex/archive/ejb/entity/FileBean.java
|
Java
|
apache-2.0
| 20,484 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.mongodb.integration;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.StreamSupport;
import com.mongodb.client.MongoCollection;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mongodb.MongoDbConstants;
import org.bson.Document;
import org.junit.jupiter.api.Test;
import static com.mongodb.client.model.Filters.eq;
import static org.apache.camel.component.mongodb.MongoDbConstants.MONGO_ID;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class MongoDbDynamicityIT extends AbstractMongoDbITSupport {
@Test
public void testInsertDynamicityDisabled() {
assertEquals(0, testCollection.countDocuments());
mongo.getDatabase("otherDB").drop();
db.getCollection("otherCollection").drop();
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
String body = "{\"_id\": \"testInsertDynamicityDisabled\", \"a\" : \"1\"}";
Map<String, Object> headers = new HashMap<>();
headers.put(MongoDbConstants.DATABASE, "otherDB");
headers.put(MongoDbConstants.COLLECTION, "otherCollection");
// Object result =
template.requestBodyAndHeaders("direct:noDynamicity", body, headers);
Document b = testCollection.find(eq(MONGO_ID, "testInsertDynamicityDisabled")).first();
assertNotNull(b, "No record with 'testInsertDynamicityDisabled' _id");
body = "{\"_id\": \"testInsertDynamicityDisabledExplicitly\", \"a\" : \"1\"}";
// result =
template.requestBodyAndHeaders("direct:noDynamicityExplicit", body, headers);
b = testCollection.find(eq(MONGO_ID, "testInsertDynamicityDisabledExplicitly")).first();
assertNotNull(b, "No record with 'testInsertDynamicityDisabledExplicitly' _id");
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
}
@Test
public void testInsertDynamicityEnabledDBOnly() {
assertEquals(0, testCollection.countDocuments());
mongo.getDatabase("otherDB").drop();
db.getCollection("otherCollection").drop();
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
String body = "{\"_id\": \"testInsertDynamicityEnabledDBOnly\", \"a\" : \"1\"}";
Map<String, Object> headers = new HashMap<>();
headers.put(MongoDbConstants.DATABASE, "otherDB");
// Object result =
template.requestBodyAndHeaders("direct:dynamicityEnabled", body, headers);
MongoCollection<Document> localDynamicCollection
= mongo.getDatabase("otherDB").getCollection(testCollection.getNamespace().getCollectionName(), Document.class);
Document b = localDynamicCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledDBOnly")).first();
assertNotNull(b, "No record with 'testInsertDynamicityEnabledDBOnly' _id");
b = testCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledDBOnly")).first();
assertNull(b, "There is a record with 'testInsertDynamicityEnabledDBOnly' _id in the test collection");
assertTrue(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should exist");
}
@Test
public void testInsertDynamicityEnabledCollectionOnly() {
assertEquals(0, testCollection.countDocuments());
mongo.getDatabase("otherDB").drop();
db.getCollection("otherCollection").drop();
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
String body = "{\"_id\": \"testInsertDynamicityEnabledCollectionOnly\", \"a\" : \"1\"}";
Map<String, Object> headers = new HashMap<>();
headers.put(MongoDbConstants.COLLECTION, "otherCollection");
// Object result =
template.requestBodyAndHeaders("direct:dynamicityEnabled", body, headers);
MongoCollection<Document> loaclDynamicCollection = db.getCollection("otherCollection", Document.class);
Document b = loaclDynamicCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledCollectionOnly")).first();
assertNotNull(b, "No record with 'testInsertDynamicityEnabledCollectionOnly' _id");
b = testCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledDBOnly")).first();
assertNull(b, "There is a record with 'testInsertDynamicityEnabledCollectionOnly' _id in the test collection");
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
}
@Test
public void testInsertDynamicityEnabledDBAndCollection() {
assertEquals(0, testCollection.countDocuments());
mongo.getDatabase("otherDB").drop();
db.getCollection("otherCollection").drop();
assertFalse(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should not exist");
String body = "{\"_id\": \"testInsertDynamicityEnabledDBAndCollection\", \"a\" : \"1\"}";
Map<String, Object> headers = new HashMap<>();
headers.put(MongoDbConstants.DATABASE, "otherDB");
headers.put(MongoDbConstants.COLLECTION, "otherCollection");
// Object result =
template.requestBodyAndHeaders("direct:dynamicityEnabled", body, headers);
MongoCollection<Document> loaclDynamicCollection
= mongo.getDatabase("otherDB").getCollection("otherCollection", Document.class);
Document b = loaclDynamicCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledDBAndCollection")).first();
assertNotNull(b, "No record with 'testInsertDynamicityEnabledDBAndCollection' _id");
b = testCollection.find(eq(MONGO_ID, "testInsertDynamicityEnabledDBOnly")).first();
assertNull(b, "There is a record with 'testInsertDynamicityEnabledDBAndCollection' _id in the test collection");
assertTrue(StreamSupport.stream(mongo.listDatabaseNames().spliterator(), false).anyMatch("otherDB"::equals),
"The otherDB database should exist");
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
public void configure() {
from("direct:noDynamicity")
.to("mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=insert");
from("direct:noDynamicityExplicit").to(
"mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=insert&dynamicity=false");
from("direct:dynamicityEnabled").to(
"mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=insert&dynamicity=true");
}
};
}
}
|
nikhilvibhav/camel
|
components/camel-mongodb/src/test/java/org/apache/camel/component/mongodb/integration/MongoDbDynamicityIT.java
|
Java
|
apache-2.0
| 8,400 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class RebalanceAfterActiveTests extends ESAllocationTestCase {
private final Logger logger = LogManager.getLogger(RebalanceAfterActiveTests.class);
public void testRebalanceOnlyAfterAllShardsAreActive() {
final long[] sizes = new long[5];
for (int i =0; i < sizes.length; i++) {
sizes[i] = randomIntBetween(0, Integer.MAX_VALUE);
}
AllocationService strategy = createAllocationService(Settings.builder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(),
() -> new ClusterInfo() {
@Override
public Long getShardSize(ShardRouting shardRouting) {
if (shardRouting.getIndexName().equals("test")) {
return sizes[shardRouting.getId()];
}
return null;
}
});
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6"))
.add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10")))
.build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("start the replica shards, rebalancing should start");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
// we only allow one relocation at a time
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(5));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
int num = 0;
for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
num++;
}
}
assertTrue(num > 0);
}
logger.info("complete relocation, other half of relocation should happen");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
// we now only relocate 3, since 2 remain where they are!
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7));
assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
}
}
}
logger.info("complete relocation, that's it!");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
// make sure we have an even relocation
for (RoutingNode routingNode : routingNodes) {
assertThat(routingNode.size(), equalTo(1));
}
}
}
|
coding0011/elasticsearch
|
server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
|
Java
|
apache-2.0
| 9,424 |
/*
* Copyright 2016 MovingBlocks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.terasology.rendering.nui.layers.mainMenu;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.terasology.assets.ResourceUrn;
import org.terasology.config.Config;
import org.terasology.engine.GameEngine;
import org.terasology.engine.modes.StateLoading;
import org.terasology.engine.paths.PathManager;
import org.terasology.game.GameManifest;
import org.terasology.i18n.TranslationSystem;
import org.terasology.network.NetworkMode;
import org.terasology.registry.CoreRegistry;
import org.terasology.registry.In;
import org.terasology.rendering.nui.CoreScreenLayer;
import org.terasology.rendering.nui.WidgetUtil;
import org.terasology.rendering.nui.animation.MenuAnimationSystems;
import org.terasology.rendering.nui.databinding.ReadOnlyBinding;
import org.terasology.rendering.nui.layers.mainMenu.savedGames.GameInfo;
import org.terasology.rendering.nui.layers.mainMenu.savedGames.GameProvider;
import org.terasology.rendering.nui.widgets.UILabel;
import org.terasology.rendering.nui.widgets.UIList;
import org.terasology.utilities.FilesUtil;
import java.nio.file.Path;
public class SelectGameScreen extends CoreScreenLayer {
public static final ResourceUrn ASSET_URI = new ResourceUrn("engine:selectGameScreen");
private static final Logger logger = LoggerFactory.getLogger(SelectGameScreen.class);
@In
private Config config;
@In
private TranslationSystem translationSystem;
private boolean loadingAsServer;
@Override
public void initialise() {
setAnimationSystem(MenuAnimationSystems.createDefaultSwipeAnimation());
UILabel gameTypeTitle = find("gameTypeTitle", UILabel.class);
if (gameTypeTitle != null) {
gameTypeTitle.bindText(new ReadOnlyBinding<String>() {
@Override
public String get() {
if (loadingAsServer) {
return translationSystem.translate("${engine:menu#select-multiplayer-game-sub-title}");
} else {
return translationSystem.translate("${engine:menu#select-singleplayer-game-sub-title}");
}
}
});
}
final UILabel saveGamePath = find("saveGamePath", UILabel.class);
if (saveGamePath != null) {
saveGamePath.setText(
translationSystem.translate("${engine:menu#save-game-path} ") +
PathManager.getInstance().getSavesPath().toAbsolutePath().toString());
}
final UIList<GameInfo> gameList = find("gameList", UIList.class);
refreshList(gameList);
gameList.subscribe((widget, item) -> loadGame(item));
CreateGameScreen screen = getManager().createScreen(CreateGameScreen.ASSET_URI, CreateGameScreen.class);
WidgetUtil.trySubscribe(this, "create", button -> {
screen.setLoadingAsServer(loadingAsServer);
triggerForwardAnimation(screen);
});
WidgetUtil.trySubscribe(this, "load", button -> {
GameInfo gameInfo = gameList.getSelection();
if (gameInfo != null) {
loadGame(gameInfo);
}
});
WidgetUtil.trySubscribe(this, "delete", button -> {
GameInfo gameInfo = gameList.getSelection();
if (gameInfo != null) {
Path world = PathManager.getInstance().getSavePath(gameInfo.getManifest().getTitle());
try {
FilesUtil.recursiveDelete(world);
gameList.getList().remove(gameInfo);
gameList.setSelection(null);
} catch (Exception e) {
logger.error("Failed to delete saved game", e);
getManager().pushScreen(MessagePopup.ASSET_URI, MessagePopup.class).setMessage("Error Deleting Game", e.getMessage());
}
}
});
WidgetUtil.trySubscribe(this, "close", button -> triggerBackAnimation());
}
@Override
public boolean isLowerLayerVisible() {
return false;
}
@Override
public void onOpened() {
super.onOpened();
if (loadingAsServer && !config.getPlayer().hasEnteredUsername()) {
getManager().pushScreen(EnterUsernamePopup.ASSET_URI, EnterUsernamePopup.class);
}
}
private void loadGame(GameInfo item) {
try {
GameManifest manifest = item.getManifest();
config.getWorldGeneration().setDefaultSeed(manifest.getSeed());
config.getWorldGeneration().setWorldTitle(manifest.getTitle());
CoreRegistry.get(GameEngine.class).changeState(new StateLoading(manifest, (loadingAsServer) ? NetworkMode.DEDICATED_SERVER : NetworkMode.NONE));
} catch (Exception e) {
logger.error("Failed to load saved game", e);
getManager().pushScreen(MessagePopup.ASSET_URI, MessagePopup.class).setMessage("Error Loading Game", e.getMessage());
}
}
public boolean isLoadingAsServer() {
return loadingAsServer;
}
public void setLoadingAsServer(boolean loadingAsServer) {
this.loadingAsServer = loadingAsServer;
}
private void refreshList(UIList<GameInfo> gameList) {
gameList.setList(GameProvider.getSavedGames());
}
}
|
Vizaxo/Terasology
|
engine/src/main/java/org/terasology/rendering/nui/layers/mainMenu/SelectGameScreen.java
|
Java
|
apache-2.0
| 5,942 |
/*
* %CopyrightBegin%
*
* Copyright Ericsson AB 2004-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
*
* %CopyrightEnd%
*/
import com.ericsson.otp.erlang.*;
public class connection_server {
static java.lang.String Name = "connection_server";
public static void main(String[] argv) {
try {
System.out.println("connection_server booting...");
for (int j = 0; j < argv.length; j++)
System.out.println("argv[" + j + "] = \"" + argv[j] + "\"");
if (argv.length != 4) {
System.out.println("Wrong number of arguments!");
System.exit(1);
}
// Start node and mbox
OtpNode node = new OtpNode(argv[0], argv[1]);
OtpMbox mbox = node.createMbox();
if (! mbox.registerName(Name)) {
System.out.println("Could not register name " + Name);
System.exit(3);
}
// Announce our presence
OtpErlangObject[] amsg = new OtpErlangObject[3];
amsg[0] = new OtpErlangAtom(Name);
amsg[1] = new OtpErlangAtom(argv[0]);
amsg[2] = mbox.self();
OtpErlangTuple atuple = new OtpErlangTuple(amsg);
mbox.send(argv[3], argv[2], atuple);
// Do connects ...
while (true) {
OtpErlangObject o = mbox.receive();
if (o == null)
continue;
if (o instanceof OtpErlangTuple) {
OtpErlangTuple msg = (OtpErlangTuple) o;
OtpErlangPid from = (OtpErlangPid)(msg.elementAt(0));
OtpErlangAtom conn_node = (OtpErlangAtom) msg.elementAt(1);
System.out.println("Got request to connect to: "
+ conn_node);
OtpErlangObject[] rmsg = new OtpErlangObject[3];
rmsg[0] = mbox.self();
rmsg[1] = conn_node;
if (node.ping(conn_node.atomValue(), 1000)) {
System.out.println("Successfully connected to "
+ conn_node.toString());
rmsg[2] = new OtpErlangAtom("true");
}
else {
System.out.println("Failed to connect to "
+ conn_node.toString());
rmsg[2] = new OtpErlangAtom("false");
}
OtpErlangTuple rtuple = new OtpErlangTuple(rmsg);
mbox.send(from, rtuple);
}
else { // probably 'bye'
System.out.println("connection_server halting...");
System.exit(0);
}
}
}
catch (Exception e) {
System.out.println("" + e);
System.exit(2);
}
}
}
|
racker/omnibus
|
source/otp_src_R14B02/lib/jinterface/test/nc_SUITE_data/connection_server.java
|
Java
|
apache-2.0
| 2,807 |
/*
* Copyright (c) 2002-2007, Marc Prud'hommeaux. All rights reserved.
*
* This software is distributable under the BSD license. See the terms of the BSD license in the
* documentation provided with this software.
*/
package org.apache.geode.management.internal.cli.shell.jline;
import org.springframework.shell.support.util.OsUtils;
import java.io.BufferedReader;
import java.io.InputStreamReader;
/**
* A buffer that can contain ANSI text.
*
* @author <a href="mailto:mwp1@cornell.edu">Marc Prud'hommeaux</a>
*/
public class ANSIBuffer {
private boolean ansiEnabled = true;
private final StringBuffer ansiBuffer = new StringBuffer();
private final StringBuffer plainBuffer = new StringBuffer();
public ANSIBuffer() {}
public ANSIBuffer(final String str) {
append(str);
}
public void setAnsiEnabled(final boolean ansi) {
this.ansiEnabled = ansi;
}
public boolean getAnsiEnabled() {
return this.ansiEnabled;
}
public String getAnsiBuffer() {
return ansiBuffer.toString();
}
public String getPlainBuffer() {
return plainBuffer.toString();
}
public String toString(final boolean ansi) {
return ansi ? getAnsiBuffer() : getPlainBuffer();
}
public String toString() {
return toString(ansiEnabled);
}
public ANSIBuffer append(final String str) {
ansiBuffer.append(str);
plainBuffer.append(str);
return this;
}
public ANSIBuffer attrib(final String str, final int code) {
ansiBuffer.append(ANSICodes.attrib(code)).append(str).append(ANSICodes.attrib(ANSICodes.OFF));
plainBuffer.append(str);
return this;
}
public ANSIBuffer red(final String str) {
return attrib(str, ANSICodes.FG_RED);
}
public ANSIBuffer blue(final String str) {
return attrib(str, ANSICodes.FG_BLUE);
}
public ANSIBuffer green(final String str) {
return attrib(str, ANSICodes.FG_GREEN);
}
public ANSIBuffer black(final String str) {
return attrib(str, ANSICodes.FG_BLACK);
}
public ANSIBuffer yellow(final String str) {
return attrib(str, ANSICodes.FG_YELLOW);
}
public ANSIBuffer magenta(final String str) {
return attrib(str, ANSICodes.FG_MAGENTA);
}
public ANSIBuffer cyan(final String str) {
return attrib(str, ANSICodes.FG_CYAN);
}
public ANSIBuffer bold(final String str) {
return attrib(str, ANSICodes.BOLD);
}
public ANSIBuffer underscore(final String str) {
return attrib(str, ANSICodes.UNDERSCORE);
}
public ANSIBuffer blink(final String str) {
return attrib(str, ANSICodes.BLINK);
}
public ANSIBuffer reverse(final String str) {
return attrib(str, ANSICodes.REVERSE);
}
public static class ANSICodes {
static final int OFF = 0;
static final int BOLD = 1;
static final int UNDERSCORE = 4;
static final int BLINK = 5;
static final int REVERSE = 7;
static final int CONCEALED = 8;
static final int FG_BLACK = 30;
static final int FG_RED = 31;
static final int FG_GREEN = 32;
static final int FG_YELLOW = 33;
static final int FG_BLUE = 34;
static final int FG_MAGENTA = 35;
static final int FG_CYAN = 36;
static final int FG_WHITE = 37;
static final char ESC = 27;
/**
* Constructor is private since this is a utility class.
*/
private ANSICodes() {}
/**
* Sets the screen mode. The mode will be one of the following values:
*
* <pre>
* mode description
* ----------------------------------------
* 0 40 x 148 x 25 monochrome (text)
* 1 40 x 148 x 25 color (text)
* 2 80 x 148 x 25 monochrome (text)
* 3 80 x 148 x 25 color (text)
* 4 320 x 148 x 200 4-color (graphics)
* 5 320 x 148 x 200 monochrome (graphics)
* 6 640 x 148 x 200 monochrome (graphics)
* 7 Enables line wrapping
* 13 320 x 148 x 200 color (graphics)
* 14 640 x 148 x 200 color (16-color graphics)
* 15 640 x 148 x 350 monochrome (2-color graphics)
* 16 640 x 148 x 350 color (16-color graphics)
* 17 640 x 148 x 480 monochrome (2-color graphics)
* 18 640 x 148 x 480 color (16-color graphics)
* 19 320 x 148 x 200 color (256-color graphics)
* </pre>
*/
public static String setmode(final int mode) {
return ESC + "[=" + mode + "h";
}
/**
* Same as setmode () except for mode = 7, which disables line wrapping (useful for writing the
* right-most column without scrolling to the next line).
*/
public static String resetmode(final int mode) {
return ESC + "[=" + mode + "l";
}
/**
* Clears the screen and moves the cursor to the home postition.
*/
public static String clrscr() {
return ESC + "[2J";
}
/**
* Removes all characters from the current cursor position until the end of the line.
*/
public static String clreol() {
return ESC + "[K";
}
/**
* Moves the cursor n positions to the left. If n is greater or equal to the current cursor
* column, the cursor is moved to the first column.
*/
public static String left(final int n) {
return ESC + "[" + n + "D";
}
/**
* Moves the cursor n positions to the right. If n plus the current cursor column is greater
* than the rightmost column, the cursor is moved to the rightmost column.
*/
public static String right(final int n) {
return ESC + "[" + n + "C";
}
/**
* Moves the cursor n rows up without changing the current column. If n is greater than or equal
* to the current row, the cursor is placed in the first row.
*/
public static String up(final int n) {
return ESC + "[" + n + "A";
}
/**
* Moves the cursor n rows down. If n plus the current row is greater than the bottom row, the
* cursor is moved to the bottom row.
*/
public static String down(final int n) {
return ESC + "[" + n + "B";
}
/*
* Moves the cursor to the given row and column. (1,1) represents the upper left corner. The
* lower right corner of a usual DOS screen is (25, 80).
*/
public static String gotoxy(final int row, final int column) {
return ESC + "[" + row + ";" + column + "H";
}
/**
* Saves the current cursor position.
*/
public static String save() {
return ESC + "[s";
}
/**
* Restores the saved cursor position.
*/
public static String restore() {
return ESC + "[u";
}
/**
* Sets the character attribute. It will be one of the following character attributes:
*
* <pre>
* Text attributes
* 0 All attributes off
* 1 Bold on
* 4 Underscore (on monochrome display adapter only)
* 5 Blink on
* 7 Reverse video on
* 8 Concealed on
*
* Foreground colors
* 30 Black
* 31 Red
* 32 Green
* 33 Yellow
* 34 Blue
* 35 Magenta
* 36 Cyan
* 37 White
*
* Background colors
* 40 Black
* 41 Red
* 42 Green
* 43 Yellow
* 44 Blue
* 45 Magenta
* 46 Cyan
* 47 White
* </pre>
*
* The attributes remain in effect until the next attribute command is sent.
*/
public static String attrib(final int attr) {
return ESC + "[" + attr + "m";
}
/**
* Sets the key with the given code to the given value. code must be derived from the following
* table, value must be any semicolon-separated combination of String (enclosed in double
* quotes) and numeric values. For example, to set F1 to the String "Hello F1", followed by a
* CRLF sequence, one can use: ANSI.setkey ("0;59", "\"Hello F1\";13;10"). Heres's the table of
* key values:
*
* <pre>
* Key Code SHIFT+code CTRL+code ALT+code
* --------------------------------------------------------------- F1 0;59 0;84 0;94 0;104 F2
* 0;60 0;85 0;95 0;105 F3 0;61 0;86 0;96 0;106 F4 0;62 0;87 0;97 0;107 F5 0;63 0;88 0;98 0;108
* F6 0;64 0;89 0;99 0;109 F7 0;65 0;90 0;100 0;110 F8 0;66 0;91 0;101 0;111 F9 0;67 0;92 0;102
* 0;112 F10 0;68 0;93 0;103 0;113 F11 0;133 0;135 0;137 0;139 F12 0;134 0;136 0;138 0;140 HOME
* (num keypad) 0;71 55 0;119 -- UP ARROW (num keypad) 0;72 56 (0;141) -- PAGE UP (num keypad)
* 0;73 57 0;132 -- LEFT ARROW (num keypad) 0;75 52 0;115 -- RIGHT ARROW (num keypad) 0;77 54
* 0;116 -- END (num keypad) 0;79 49 0;117 -- DOWN ARROW (num keypad) 0;80 50 (0;145) -- PAGE
* DOWN (num keypad) 0;81 51 0;118 -- INSERT (num keypad) 0;82 48 (0;146) -- DELETE (num keypad)
* 0;83 46 (0;147) -- HOME (224;71) (224;71) (224;119) (224;151) UP ARROW (224;72) (224;72)
* (224;141) (224;152) PAGE UP (224;73) (224;73) (224;132) (224;153) LEFT ARROW (224;75)
* (224;75) (224;115) (224;155) RIGHT ARROW (224;77) (224;77) (224;116) (224;157) END (224;79)
* (224;79) (224;117) (224;159) DOWN ARROW (224;80) (224;80) (224;145) (224;154) PAGE DOWN
* (224;81) (224;81) (224;118) (224;161) INSERT (224;82) (224;82) (224;146) (224;162) DELETE
* (224;83) (224;83) (224;147) (224;163) PRINT SCREEN -- -- 0;114 -- PAUSE/BREAK -- -- 0;0 --
* BACKSPACE 8 8 127 (0) ENTER 13 -- 10 (0 TAB 9 0;15 (0;148) (0;165) NULL 0;3 -- -- -- A 97 65
* 1 0;30 B 98 66 2 0;48 C 99 66 3 0;46 D 100 68 4 0;32 E 101 69 5 0;18 F 102 70 6 0;33 G 103 71
* 7 0;34 H 104 72 8 0;35 I 105 73 9 0;23 J 106 74 10 0;36 K 107 75 11 0;37 L 108 76 12 0;38 M
* 109 77 13 0;50 N 110 78 14 0;49 O 111 79 15 0;24 P 112 80 16 0;25 Q 113 81 17 0;16 R 114 82
* 18 0;19 S 115 83 19 0;31 T 116 84 20 0;20 U 117 85 21 0;22 V 118 86 22 0;47 W 119 87 23 0;17
* X 120 88 24 0;45 Y 121 89 25 0;21 Z 122 90 26 0;44 1 49 33 -- 0;120 2 50 64 0 0;121 3 51 35
* -- 0;122 4 52 36 -- 0;123 5 53 37 -- 0;124 6 54 94 30 0;125 7 55 38 -- 0;126 8 56 42 -- 0;126
* 9 57 40 -- 0;127 0 48 41 -- 0;129 - 45 95 31 0;130 = 61 43 --- 0;131 [ 91 123 27 0;26 ] 93
* 125 29 0;27 92 124 28 0;43 ; 59 58 -- 0;39 ' 39 34 -- 0;40 , 44 60 -- 0;51 . 46 62 -- 0;52 /
* 47 63 -- 0;53 ` 96 126 -- (0;41) ENTER (keypad) 13 -- 10 (0;166) / (keypad) 47 47 (0;142)
* (0;74) * (keypad) 42 (0;144) (0;78) -- - (keypad) 45 45 (0;149) (0;164) + (keypad) 43 43
* (0;150) (0;55) 5 (keypad) (0;76) 53 (0;143) --
*/
public static String setkey(final String code, final String value) {
return ESC + "[" + code + ";" + value + "p";
}
}
public static void main(final String[] args) throws Exception {
// sequence, one can use: ANSI.setkey ("0;59", "\"Hello F1\";13;10").
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
System.out.print(ANSICodes.setkey("97", "97;98;99;13") + ANSICodes.attrib(ANSICodes.OFF));
System.out.flush();
String line;
while ((line = reader.readLine()) != null) {
System.out.println("GOT: " + line);
}
}
private static final boolean ROO_BRIGHT_COLORS = Boolean.getBoolean("roo.bright");
private static final boolean SHELL_BRIGHT_COLORS = Boolean.getBoolean("spring.shell.bright");
private static final boolean BRIGHT_COLORS = ROO_BRIGHT_COLORS || SHELL_BRIGHT_COLORS;
public static ANSIBuffer getANSIBuffer() {
final char esc = (char) 27;
return new ANSIBuffer() {
@Override
public ANSIBuffer reverse(final String str) {
if (OsUtils.isWindows()) {
return super.reverse(str).append(ANSICodes.attrib(esc));
}
return super.reverse(str);
};
@Override
public ANSIBuffer attrib(final String str, final int code) {
if (BRIGHT_COLORS && 30 <= code && code <= 37) {
// This is a color code: add a 'bright' code
return append(esc + "[" + code + ";1m").append(str).append(ANSICodes.attrib(0));
}
return super.attrib(str, code);
}
};
}
}
|
prasi-in/geode
|
geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/jline/ANSIBuffer.java
|
Java
|
apache-2.0
| 12,139 |
/**
* MuleSoft Examples
* Copyright 2014 MuleSoft, Inc.
*
* This product includes software developed at
* MuleSoft, Inc. (http://www.mulesoft.com/).
*/
package com.mulesoft.se.orders;
public enum Status {
ACCEPTED, REJECTED;
}
|
hirosan/anypoint-examples
|
service-orchestration-and-choice-routing/src/main/java/com/mulesoft/se/orders/Status.java
|
Java
|
apache-2.0
| 237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zookeeper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.zookeeper.server.watch.PathParentIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manage watchers and handle events generated by the {@link ClientCnxn} object.
*
* This class is intended to be packaged-private so that it doesn't serve
* as part of ZooKeeper client API.
*/
class ZKWatchManager implements ClientWatchManager {
private static final Logger LOG = LoggerFactory.getLogger(ZKWatchManager.class);
private final Map<String, Set<Watcher>> dataWatches = new HashMap<>();
private final Map<String, Set<Watcher>> existWatches = new HashMap<>();
private final Map<String, Set<Watcher>> childWatches = new HashMap<>();
private final Map<String, Set<Watcher>> persistentWatches = new HashMap<>();
private final Map<String, Set<Watcher>> persistentRecursiveWatches = new HashMap<>();
private final boolean disableAutoWatchReset;
private volatile Watcher defaultWatcher;
ZKWatchManager(boolean disableAutoWatchReset, Watcher defaultWatcher) {
this.disableAutoWatchReset = disableAutoWatchReset;
this.defaultWatcher = defaultWatcher;
}
void setDefaultWatcher(Watcher defaultWatcher) {
this.defaultWatcher = defaultWatcher;
}
Watcher getDefaultWatcher() {
return defaultWatcher;
}
List<String> getDataWatchList() {
synchronized (dataWatches) {
return new ArrayList<>(dataWatches.keySet());
}
}
List<String> getChildWatchList() {
synchronized (childWatches) {
return new ArrayList<>(childWatches.keySet());
}
}
List<String> getExistWatchList() {
synchronized (existWatches) {
return new ArrayList<>(existWatches.keySet());
}
}
List<String> getPersistentWatchList() {
synchronized (persistentWatches) {
return new ArrayList<>(persistentWatches.keySet());
}
}
List<String> getPersistentRecursiveWatchList() {
synchronized (persistentRecursiveWatches) {
return new ArrayList<>(persistentRecursiveWatches.keySet());
}
}
Map<String, Set<Watcher>> getDataWatches() {
return dataWatches;
}
Map<String, Set<Watcher>> getExistWatches() {
return existWatches;
}
Map<String, Set<Watcher>> getChildWatches() {
return childWatches;
}
Map<String, Set<Watcher>> getPersistentWatches() {
return persistentWatches;
}
Map<String, Set<Watcher>> getPersistentRecursiveWatches() {
return persistentRecursiveWatches;
}
private void addTo(Set<Watcher> from, Set<Watcher> to) {
if (from != null) {
to.addAll(from);
}
}
public Map<Watcher.Event.EventType, Set<Watcher>> removeWatcher(
String clientPath,
Watcher watcher,
Watcher.WatcherType watcherType,
boolean local,
int rc
) throws KeeperException {
// Validate the provided znode path contains the given watcher of
// watcherType
containsWatcher(clientPath, watcher, watcherType);
Map<Watcher.Event.EventType, Set<Watcher>> removedWatchers = new HashMap<>();
HashSet<Watcher> childWatchersToRem = new HashSet<>();
removedWatchers.put(Watcher.Event.EventType.ChildWatchRemoved, childWatchersToRem);
HashSet<Watcher> dataWatchersToRem = new HashSet<>();
removedWatchers.put(Watcher.Event.EventType.DataWatchRemoved, dataWatchersToRem);
HashSet<Watcher> persistentWatchersToRem = new HashSet<>();
removedWatchers.put(Watcher.Event.EventType.PersistentWatchRemoved, persistentWatchersToRem);
boolean removedWatcher = false;
switch (watcherType) {
case Children: {
synchronized (childWatches) {
removedWatcher = removeWatches(childWatches, watcher, clientPath, local, rc, childWatchersToRem);
}
break;
}
case Data: {
synchronized (dataWatches) {
removedWatcher = removeWatches(dataWatches, watcher, clientPath, local, rc, dataWatchersToRem);
}
synchronized (existWatches) {
boolean removedDataWatcher = removeWatches(existWatches, watcher, clientPath, local, rc, dataWatchersToRem);
removedWatcher |= removedDataWatcher;
}
break;
}
case Any: {
synchronized (childWatches) {
removedWatcher = removeWatches(childWatches, watcher, clientPath, local, rc, childWatchersToRem);
}
synchronized (dataWatches) {
boolean removedDataWatcher = removeWatches(dataWatches, watcher, clientPath, local, rc, dataWatchersToRem);
removedWatcher |= removedDataWatcher;
}
synchronized (existWatches) {
boolean removedDataWatcher = removeWatches(existWatches, watcher, clientPath, local, rc, dataWatchersToRem);
removedWatcher |= removedDataWatcher;
}
synchronized (persistentWatches) {
boolean removedPersistentWatcher = removeWatches(persistentWatches,
watcher, clientPath, local, rc, persistentWatchersToRem);
removedWatcher |= removedPersistentWatcher;
}
synchronized (persistentRecursiveWatches) {
boolean removedPersistentRecursiveWatcher = removeWatches(persistentRecursiveWatches,
watcher, clientPath, local, rc, persistentWatchersToRem);
removedWatcher |= removedPersistentRecursiveWatcher;
}
}
}
// Watcher function doesn't exists for the specified params
if (!removedWatcher) {
throw new KeeperException.NoWatcherException(clientPath);
}
return removedWatchers;
}
private boolean contains(String path, Watcher watcherObj, Map<String, Set<Watcher>> pathVsWatchers) {
boolean watcherExists = true;
if (pathVsWatchers == null || pathVsWatchers.size() == 0) {
watcherExists = false;
} else {
Set<Watcher> watchers = pathVsWatchers.get(path);
if (watchers == null) {
watcherExists = false;
} else if (watcherObj == null) {
watcherExists = watchers.size() > 0;
} else {
watcherExists = watchers.contains(watcherObj);
}
}
return watcherExists;
}
/**
* Validate the provided znode path contains the given watcher and
* watcherType
*
* @param path
* - client path
* @param watcher
* - watcher object reference
* @param watcherType
* - type of the watcher
* @throws KeeperException.NoWatcherException
*/
void containsWatcher(String path, Watcher watcher, Watcher.WatcherType watcherType) throws
KeeperException.NoWatcherException {
boolean containsWatcher = false;
switch (watcherType) {
case Children: {
synchronized (childWatches) {
containsWatcher = contains(path, watcher, childWatches);
}
synchronized (persistentWatches) {
boolean contains_temp = contains(path, watcher,
persistentWatches);
containsWatcher |= contains_temp;
}
synchronized (persistentRecursiveWatches) {
boolean contains_temp = contains(path, watcher,
persistentRecursiveWatches);
containsWatcher |= contains_temp;
}
break;
}
case Data: {
synchronized (dataWatches) {
containsWatcher = contains(path, watcher, dataWatches);
}
synchronized (existWatches) {
boolean contains_temp = contains(path, watcher, existWatches);
containsWatcher |= contains_temp;
}
synchronized (persistentWatches) {
boolean contains_temp = contains(path, watcher,
persistentWatches);
containsWatcher |= contains_temp;
}
synchronized (persistentRecursiveWatches) {
boolean contains_temp = contains(path, watcher,
persistentRecursiveWatches);
containsWatcher |= contains_temp;
}
break;
}
case Any: {
synchronized (childWatches) {
containsWatcher = contains(path, watcher, childWatches);
}
synchronized (dataWatches) {
boolean contains_temp = contains(path, watcher, dataWatches);
containsWatcher |= contains_temp;
}
synchronized (existWatches) {
boolean contains_temp = contains(path, watcher, existWatches);
containsWatcher |= contains_temp;
}
synchronized (persistentWatches) {
boolean contains_temp = contains(path, watcher,
persistentWatches);
containsWatcher |= contains_temp;
}
synchronized (persistentRecursiveWatches) {
boolean contains_temp = contains(path, watcher,
persistentRecursiveWatches);
containsWatcher |= contains_temp;
}
}
}
// Watcher function doesn't exists for the specified params
if (!containsWatcher) {
throw new KeeperException.NoWatcherException(path);
}
}
protected boolean removeWatches(
Map<String, Set<Watcher>> pathVsWatcher,
Watcher watcher,
String path,
boolean local,
int rc,
Set<Watcher> removedWatchers) throws KeeperException {
if (!local && rc != KeeperException.Code.OK.intValue()) {
throw KeeperException.create(KeeperException.Code.get(rc), path);
}
boolean success = false;
// When local flag is true, remove watchers for the given path
// irrespective of rc. Otherwise shouldn't remove watchers locally
// when sees failure from server.
if (rc == KeeperException.Code.OK.intValue() || (local && rc != KeeperException.Code.OK.intValue())) {
// Remove all the watchers for the given path
if (watcher == null) {
Set<Watcher> pathWatchers = pathVsWatcher.remove(path);
if (pathWatchers != null) {
// found path watchers
removedWatchers.addAll(pathWatchers);
success = true;
}
} else {
Set<Watcher> watchers = pathVsWatcher.get(path);
if (watchers != null) {
if (watchers.remove(watcher)) {
// found path watcher
removedWatchers.add(watcher);
// cleanup <path vs watchlist>
if (watchers.size() <= 0) {
pathVsWatcher.remove(path);
}
success = true;
}
}
}
}
return success;
}
/* (non-Javadoc)
* @see org.apache.zookeeper.ClientWatchManager#materialize(Event.KeeperState,
* Event.EventType, java.lang.String)
*/
@Override
public Set<Watcher> materialize(
Watcher.Event.KeeperState state,
Watcher.Event.EventType type,
String clientPath
) {
final Set<Watcher> result = new HashSet<>();
switch (type) {
case None:
if (defaultWatcher != null) {
result.add(defaultWatcher);
}
boolean clear = disableAutoWatchReset && state != Watcher.Event.KeeperState.SyncConnected;
synchronized (dataWatches) {
for (Set<Watcher> ws : dataWatches.values()) {
result.addAll(ws);
}
if (clear) {
dataWatches.clear();
}
}
synchronized (existWatches) {
for (Set<Watcher> ws : existWatches.values()) {
result.addAll(ws);
}
if (clear) {
existWatches.clear();
}
}
synchronized (childWatches) {
for (Set<Watcher> ws : childWatches.values()) {
result.addAll(ws);
}
if (clear) {
childWatches.clear();
}
}
synchronized (persistentWatches) {
for (Set<Watcher> ws: persistentWatches.values()) {
result.addAll(ws);
}
}
synchronized (persistentRecursiveWatches) {
for (Set<Watcher> ws: persistentRecursiveWatches.values()) {
result.addAll(ws);
}
}
return result;
case NodeDataChanged:
case NodeCreated:
synchronized (dataWatches) {
addTo(dataWatches.remove(clientPath), result);
}
synchronized (existWatches) {
addTo(existWatches.remove(clientPath), result);
}
addPersistentWatches(clientPath, result);
break;
case NodeChildrenChanged:
synchronized (childWatches) {
addTo(childWatches.remove(clientPath), result);
}
addPersistentWatches(clientPath, result);
break;
case NodeDeleted:
synchronized (dataWatches) {
addTo(dataWatches.remove(clientPath), result);
}
// TODO This shouldn't be needed, but just in case
synchronized (existWatches) {
Set<Watcher> list = existWatches.remove(clientPath);
if (list != null) {
addTo(list, result);
LOG.warn("We are triggering an exists watch for delete! Shouldn't happen!");
}
}
synchronized (childWatches) {
addTo(childWatches.remove(clientPath), result);
}
addPersistentWatches(clientPath, result);
break;
default:
String errorMsg = String.format(
"Unhandled watch event type %s with state %s on path %s",
type,
state,
clientPath);
LOG.error(errorMsg);
throw new RuntimeException(errorMsg);
}
return result;
}
private void addPersistentWatches(String clientPath, Set<Watcher> result) {
synchronized (persistentWatches) {
addTo(persistentWatches.get(clientPath), result);
}
synchronized (persistentRecursiveWatches) {
for (String path : PathParentIterator.forAll(clientPath).asIterable()) {
addTo(persistentRecursiveWatches.get(path), result);
}
}
}
}
|
maoling/zookeeper
|
zookeeper-server/src/main/java/org/apache/zookeeper/ZKWatchManager.java
|
Java
|
apache-2.0
| 16,438 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.operator.aggregation.state;
import io.airlift.stats.cardinality.HyperLogLog;
import io.prestosql.spi.function.AccumulatorState;
import io.prestosql.spi.function.AccumulatorStateMetadata;
@AccumulatorStateMetadata(stateSerializerClass = HyperLogLogStateSerializer.class, stateFactoryClass = HyperLogLogStateFactory.class)
public interface HyperLogLogState
extends AccumulatorState
{
HyperLogLog getHyperLogLog();
void setHyperLogLog(HyperLogLog value);
void addMemoryUsage(int value);
}
|
smartnews/presto
|
presto-main/src/main/java/io/prestosql/operator/aggregation/state/HyperLogLogState.java
|
Java
|
apache-2.0
| 1,093 |
package com.lody.welike.utils;
import android.os.Handler;
import android.os.Looper;
/**
* 封装了在<b>子线程时</b>到<b>主线程</b>运行一段逻辑的操作.
*
* @author Lody
* @version 1.0
*/
public class UiHandler {
private static Handler sUiHandler;
/**
* 在主线程运行一段逻辑
*
* @param runnable
*/
public static void runOnUiThread(Runnable runnable) {
initUIHandlerIfNeed();
sUiHandler.post(runnable);
}
/**
* 在主线程延时运行一段逻辑
*
* @param runnable
* @param delayMills
*/
public static void runOnUiThreadDelayed(Runnable runnable, long delayMills) {
initUIHandlerIfNeed();
sUiHandler.postDelayed(runnable, delayMills);
}
private static void initUIHandlerIfNeed() {
if (sUiHandler == null) {
synchronized (UiHandler.class) {
if (sUiHandler == null) {
sUiHandler = new Handler(Looper.getMainLooper());
}
}
}
}
}
|
YlJava110/WelikeAndroid
|
WelikeAndroid/src/com/lody/welike/utils/UiHandler.java
|
Java
|
apache-2.0
| 1,072 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.gcs;
import com.google.api.services.storage.Storage;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.env.Environment;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.util.function.Function;
import static org.elasticsearch.common.settings.Setting.Property;
import static org.elasticsearch.common.settings.Setting.boolSetting;
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.settings.Setting.simpleString;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
class GoogleCloudStorageRepository extends BlobStoreRepository {
// package private for testing
static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB);
static final String TYPE = "gcs";
static final Setting<String> BUCKET =
simpleString("bucket", Property.NodeScope, Property.Dynamic);
static final Setting<String> BASE_PATH =
simpleString("base_path", Property.NodeScope, Property.Dynamic);
static final Setting<Boolean> COMPRESS =
boolSetting("compress", false, Property.NodeScope, Property.Dynamic);
static final Setting<ByteSizeValue> CHUNK_SIZE =
byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic);
static final Setting<String> CLIENT_NAME = new Setting<>("client", "default", Function.identity());
private final ByteSizeValue chunkSize;
private final boolean compress;
private final BlobPath basePath;
private final GoogleCloudStorageBlobStore blobStore;
GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry,
GoogleCloudStorageService storageService) throws Exception {
super(metadata, environment.settings(), namedXContentRegistry);
String bucket = getSetting(BUCKET, metadata);
String clientName = CLIENT_NAME.get(metadata.settings());
String basePath = BASE_PATH.get(metadata.settings());
if (Strings.hasLength(basePath)) {
BlobPath path = new BlobPath();
for (String elem : basePath.split("/")) {
path = path.add(elem);
}
this.basePath = path;
} else {
this.basePath = BlobPath.cleanPath();
}
this.compress = getSetting(COMPRESS, metadata);
this.chunkSize = getSetting(CHUNK_SIZE, metadata);
logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress);
Storage client = SocketAccess.doPrivilegedIOException(() -> storageService.createClient(clientName));
this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client);
}
@Override
protected BlobStore blobStore() {
return blobStore;
}
@Override
protected BlobPath basePath() {
return basePath;
}
@Override
protected boolean isCompress() {
return compress;
}
@Override
protected ByteSizeValue chunkSize() {
return chunkSize;
}
/**
* Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty.
*/
static <T> T getSetting(Setting<T> setting, RepositoryMetaData metadata) {
T value = setting.get(metadata.settings());
if (value == null) {
throw new RepositoryException(metadata.name(), "Setting [" + setting.getKey() + "] is not defined for repository");
}
if ((value instanceof String) && (Strings.hasText((String) value)) == false) {
throw new RepositoryException(metadata.name(), "Setting [" + setting.getKey() + "] is empty for repository");
}
return value;
}
}
|
s1monw/elasticsearch
|
plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java
|
Java
|
apache-2.0
| 5,384 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.redis.topology;
import com.google.common.collect.Maps;
import java.util.Map;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.IBasicBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
public class WordCounter implements IBasicBolt {
private Map<String, Integer> wordCounter = Maps.newHashMap();
@Override
public void prepare(Map<String, Object> topoConf, TopologyContext context) {
}
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
String word = input.getStringByField("word");
int count;
if (wordCounter.containsKey(word)) {
count = wordCounter.get(word) + 1;
wordCounter.put(word, wordCounter.get(word) + 1);
} else {
count = 1;
}
wordCounter.put(word, count);
collector.emit(new Values(word, String.valueOf(count)));
}
@Override
public void cleanup() {
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word", "count"));
}
@Override
public Map<String, Object> getComponentConfiguration() {
return null;
}
}
|
kishorvpatil/incubator-storm
|
examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WordCounter.java
|
Java
|
apache-2.0
| 2,215 |
/*
* Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.net;
import java.io.IOException;
import java.io.FileDescriptor;
/*
* On Unix systems we simply delegate to native methods.
*
* @author Chris Hegarty
*/
class PlainSocketImpl extends AbstractPlainSocketImpl
{
static {
initProto();
}
/**
* Constructs an empty instance.
*/
PlainSocketImpl() { }
/**
* Constructs an instance with the given file descriptor.
*/
PlainSocketImpl(FileDescriptor fd) {
this.fd = fd;
}
native void socketCreate(boolean isServer) throws IOException;
native void socketConnect(InetAddress address, int port, int timeout)
throws IOException;
native void socketBind(InetAddress address, int port)
throws IOException;
native void socketListen(int count) throws IOException;
native void socketAccept(SocketImpl s) throws IOException;
native int socketAvailable() throws IOException;
native void socketClose0(boolean useDeferredClose) throws IOException;
native void socketShutdown(int howto) throws IOException;
static native void initProto();
native void socketSetOption(int cmd, boolean on, Object value)
throws SocketException;
native int socketGetOption(int opt, Object iaContainerObj) throws SocketException;
native int socketGetOption1(int opt, Object iaContainerObj, FileDescriptor fd)
throws SocketException;
native void socketSendUrgentData(int data) throws IOException;
}
|
andreagenso/java2scala
|
test/J2s/java/openjdk-6-src-b27/jdk/src/solaris/classes/java/net/PlainSocketImpl.java
|
Java
|
apache-2.0
| 2,691 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.annotations.*;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.TimeUnits;
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
/**
* Base testcase for lucene based testing. This class should be used if low level lucene features are tested.
*/
@Listeners({
ReproduceInfoPrinter.class
})
@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
@ThreadLeakScope(Scope.SUITE)
@ThreadLeakLingering(linger = 5000) // 5 sec lingering
@TimeoutSuite(millis = TimeUnits.HOUR)
@SuppressCodecs("Lucene3x")
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
public abstract class ElasticsearchLuceneTestCase extends LuceneTestCase {
private static final Codec DEFAULT_CODEC = Codec.getDefault();
/**
* Returns the lucene default codec without any randomization
*/
public static Codec actualDefaultCodec() {
return DEFAULT_CODEC;
}
/**
* Forcefully reset the default codec
*/
public static void forceDefaultCodec() {
Codec.setDefault(DEFAULT_CODEC);
}
public static int scaledRandomIntBetween(int min, int max) {
return RandomizedTest.scaledRandomIntBetween(min, max);
}
}
|
combinatorist/elasticsearch
|
src/test/java/org/elasticsearch/test/ElasticsearchLuceneTestCase.java
|
Java
|
apache-2.0
| 2,351 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
@Category({ RegionServerTests.class, MediumTests.class })
public class TestWALReplay extends AbstractTestWALReplay {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestWALReplay.class);
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration();
conf.set(WALFactory.WAL_PROVIDER, "filesystem");
AbstractTestWALReplay.setUpBeforeClass();
}
@Override
protected WAL createWAL(Configuration c, Path hbaseRootDir, String logName) throws IOException {
FSHLog wal = new FSHLog(FileSystem.get(c), hbaseRootDir, logName, c);
wal.init();
// Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone.
HBaseTestingUtil.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
return wal;
}
}
|
mahak/hbase
|
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
|
Java
|
apache-2.0
| 2,328 |
/*
* Copyright 2000-2011 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.codeInsight.actions;
import com.intellij.codeInsight.CodeInsightBundle;
import com.intellij.lang.ImportOptimizer;
import com.intellij.lang.LanguageImportStatements;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.EmptyRunnable;
import com.intellij.psi.PsiDirectory;
import com.intellij.psi.PsiFile;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.FutureTask;
public class OptimizeImportsProcessor extends AbstractLayoutCodeProcessor {
private static final String PROGRESS_TEXT = CodeInsightBundle.message("progress.text.optimizing.imports");
private static final String COMMAND_NAME = CodeInsightBundle.message("process.optimize.imports");
public OptimizeImportsProcessor(Project project) {
super(project, COMMAND_NAME, PROGRESS_TEXT, false);
}
public OptimizeImportsProcessor(Project project, Module module) {
super(project, module, COMMAND_NAME, PROGRESS_TEXT, false);
}
public OptimizeImportsProcessor(Project project, PsiDirectory directory, boolean includeSubdirs) {
super(project, directory, includeSubdirs, PROGRESS_TEXT, COMMAND_NAME, false);
}
public OptimizeImportsProcessor(Project project, PsiFile file) {
super(project, file, PROGRESS_TEXT, COMMAND_NAME, false);
}
public OptimizeImportsProcessor(Project project, PsiFile[] files, Runnable postRunnable) {
this(project, files, COMMAND_NAME, postRunnable);
}
public OptimizeImportsProcessor(Project project, PsiFile[] files, String commandName, Runnable postRunnable) {
super(project, files, PROGRESS_TEXT, commandName, postRunnable, false);
}
@Override
@NotNull
protected FutureTask<Boolean> preprocessFile(@NotNull final PsiFile file, boolean processChangedTextOnly) throws IncorrectOperationException {
final Set<ImportOptimizer> optimizers = LanguageImportStatements.INSTANCE.forFile(file);
final List<Runnable> runnables = new ArrayList<Runnable>();
List<PsiFile> files = file.getViewProvider().getAllFiles();
for (ImportOptimizer optimizer : optimizers) {
for (PsiFile psiFile : files) {
if (optimizer.supports(psiFile)) {
runnables.add(optimizer.processFile(psiFile));
}
}
}
Runnable runnable = runnables.isEmpty() ? EmptyRunnable.getInstance() : new Runnable() {
@Override
public void run() {
for (Runnable runnable : runnables) {
runnable.run();
}
}
};
return new FutureTask<Boolean>(runnable, true);
}
}
|
romankagan/DDBWorkbench
|
platform/lang-impl/src/com/intellij/codeInsight/actions/OptimizeImportsProcessor.java
|
Java
|
apache-2.0
| 3,305 |
/*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.framework.persistence;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.sql.SQLException;
import junit.framework.TestCase;
import junitx.framework.StringAssert;
import org.mifos.framework.ApplicationInitializer;
import org.mifos.framework.DatabaseErrorCode;
import org.testng.annotations.Test;
@Test(groups={"unit", "fastTestsSuite"}, dependsOnGroups={"productMixTestSuite"})
public class DatabaseInitFilterTest extends TestCase {
@Override
protected void setUp() throws Exception {
ApplicationInitializer.setDatabaseError(DatabaseErrorCode.UPGRADE_FAILURE, "test death message",
new SQLException("bletch ick sputter die"));
}
@Override
protected void tearDown() throws Exception {
ApplicationInitializer.clearDatabaseError();
}
public void testUpgradeFailed() throws Exception {
ApplicationInitializer.setDatabaseError(DatabaseErrorCode.UPGRADE_FAILURE, "test death message",
new SQLException("bletch ick sputter die"));
String output = printError(66);
StringAssert.assertContains("Please apply upgrade DB and restart the server", output);
StringAssert.assertContains("Correct the error and restart the application", output);
StringAssert.assertContains("bletch ick sputter die", output);
}
public void testInexplicableFailure() throws Exception {
ApplicationInitializer.clearDatabaseError();
String output = printError(66);
StringAssert.assertContains("<p>I don't have any further details, unfortunately.</p>", output);
StringAssert.assertNotContains("Exception", output);
}
private String printError(int version) {
StringWriter out = new StringWriter();
new DatabaseInitFilter().printErrorPage(new PrintWriter(out));
String output = out.toString();
return output;
}
}
|
madhav123/gkmaster
|
application/src/test/java/org/mifos/framework/persistence/DatabaseInitFilterTest.java
|
Java
|
apache-2.0
| 2,685 |
/*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.eureka;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
import com.netflix.eureka.aws.AwsBindingStrategy;
/**
* Configuration information required by the eureka server to operate.
*
* <p>
* Most of the required information is provided by the default configuration
* {@link com.netflix.eureka.DefaultEurekaServerConfig}.
*
* Note that all configurations are not effective at runtime unless and
* otherwise specified.
* </p>
*
* @author Karthik Ranganathan
*
*/
public interface EurekaServerConfig {
/**
* Gets the <em>AWS Access Id</em>. This is primarily used for
* <em>Elastic IP Biding</em>. The access id should be provided with
* appropriate AWS permissions to bind the EIP.
*
* @return
*/
String getAWSAccessId();
/**
* Gets the <em>AWS Secret Key</em>. This is primarily used for
* <em>Elastic IP Biding</em>. The access id should be provided with
* appropriate AWS permissions to bind the EIP.
*
* @return
*/
String getAWSSecretKey();
/**
* Gets the number of times the server should try to bind to the candidate
* EIP.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of times the server should try to bind to the
* candidate EIP.
*/
int getEIPBindRebindRetries();
/**
* Get the interval with which the server should check if the EIP is bound
* and should try to bind in the case if it is already not bound, iff the EIP
* is not currently bound.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getEIPBindingRetryIntervalMsWhenUnbound();
/**
* Gets the interval with which the server should check if the EIP is bound
* and should try to bind in the case if it is already not bound, iff the EIP
* is already bound. (so this refresh is just for steady state checks)
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getEIPBindingRetryIntervalMs();
/**
* Checks to see if the eureka server is enabled for self preservation.
*
* <p>
* When enabled, the server keeps track of the number of <em>renewals</em>
* it should receive from the server. Any time, the number of renewals drops
* below the threshold percentage as defined by
* {@link #getRenewalPercentThreshold()}, the server turns off expirations
* to avert danger.This will help the server in maintaining the registry
* information in case of network problems between client and the server.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true to enable self preservation, false otherwise.
*/
boolean shouldEnableSelfPreservation();
/**
* The minimum percentage of renewals that is expected from the clients in
* the period specified by {@link #getRenewalThresholdUpdateIntervalMs()}.
* If the renewals drop below the threshold, the expirations are disabled if
* the {@link #shouldEnableSelfPreservation()} is enabled.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return value between 0 and 1 indicating the percentage. For example,
* <code>85%</code> will be specified as <code>0.85</code>.
*/
double getRenewalPercentThreshold();
/**
* The interval with which the threshold as specified in
* {@link #getRenewalPercentThreshold()} needs to be updated.
*
* @return time in milliseconds indicating the interval.
*/
int getRenewalThresholdUpdateIntervalMs();
/**
* The interval with which clients are expected to send their heartbeats. Defaults to 30
* seconds. If clients send heartbeats with different frequency, say, every 15 seconds, then
* this parameter should be tuned accordingly, otherwise, self-preservation won't work as
* expected.
*
* @return time in seconds indicating the expected interval
*/
int getExpectedClientRenewalIntervalSeconds();
/**
* The interval with which the information about the changes in peer eureka
* nodes is updated. The user can use the DNS mechanism or dynamic
* configuration provided by <a href="https://github.com/Netflix/archaius">Archaius</a> to
* change the information dynamically.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return timer in milliseconds indicating the interval.
*/
int getPeerEurekaNodesUpdateIntervalMs();
/**
* If set to true, the replicated data send in the request will be always compressed.
* This does not define response path, which is driven by "Accept-Encoding" header.
*/
boolean shouldEnableReplicatedRequestCompression();
/**
* Get the number of times the replication events should be retried with
* peers.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of retries.
*/
int getNumberOfReplicationRetries();
/**
* Gets the interval with which the status information about peer nodes is
* updated.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return time in milliseconds indicating the interval.
*/
int getPeerEurekaStatusRefreshTimeIntervalMs();
/**
* Gets the time to wait when the eureka server starts up unable to get
* instances from peer nodes. It is better not to start serving rightaway
* during these scenarios as the information that is stored in the registry
* may not be complete.
*
* When the instance registry starts up empty, it builds over time when the
* clients start to send heartbeats and the server requests the clients for
* registration information.
*
* @return time in milliseconds.
*/
int getWaitTimeInMsWhenSyncEmpty();
/**
* Gets the timeout value for connecting to peer eureka nodes for
* replication.
*
* @return timeout value in milliseconds.
*/
int getPeerNodeConnectTimeoutMs();
/**
* Gets the timeout value for reading information from peer eureka nodes for
* replication.
*
* @return timeout value in milliseconds.
*/
int getPeerNodeReadTimeoutMs();
/**
* Gets the total number of <em>HTTP</em> connections allowed to peer eureka
* nodes for replication.
*
* @return total number of allowed <em>HTTP</em> connections.
*/
int getPeerNodeTotalConnections();
/**
* Gets the total number of <em>HTTP</em> connections allowed to a
* particular peer eureka node for replication.
*
* @return total number of allowed <em>HTTP</em> connections for a peer
* node.
*/
int getPeerNodeTotalConnectionsPerHost();
/**
* Gets the idle time after which the <em>HTTP</em> connection should be
* cleaned up.
*
* @return idle time in seconds.
*/
int getPeerNodeConnectionIdleTimeoutSeconds();
/**
* Get the time for which the delta information should be cached for the
* clients to retrieve the value without missing it.
*
* @return time in milliseconds
*/
long getRetentionTimeInMSInDeltaQueue();
/**
* Get the time interval with which the clean up task should wake up and
* check for expired delta information.
*
* @return time in milliseconds.
*/
long getDeltaRetentionTimerIntervalInMs();
/**
* Get the time interval with which the task that expires instances should
* wake up and run.
*
* @return time in milliseconds.
*/
long getEvictionIntervalTimerInMs();
/**
* Whether to use AWS API to query ASG statuses.
*
* @return true if AWS API is used, false otherwise.
*/
boolean shouldUseAwsAsgApi();
/**
* Get the timeout value for querying the <em>AWS</em> for <em>ASG</em>
* information.
*
* @return timeout value in milliseconds.
*/
int getASGQueryTimeoutMs();
/**
* Get the time interval with which the <em>ASG</em> information must be
* queried from <em>AWS</em>.
*
* @return time in milliseconds.
*/
long getASGUpdateIntervalMs();
/**
* Get the expiration value for the cached <em>ASG</em> information
*
* @return time in milliseconds.
*/
long getASGCacheExpiryTimeoutMs();
/**
* Gets the time for which the registry payload should be kept in the cache
* if it is not invalidated by change events.
*
* @return time in seconds.
*/
long getResponseCacheAutoExpirationInSeconds();
/**
* Gets the time interval with which the payload cache of the client should
* be updated.
*
* @return time in milliseconds.
*/
long getResponseCacheUpdateIntervalMs();
/**
* The {@link com.netflix.eureka.registry.ResponseCache} currently uses a two level caching
* strategy to responses. A readWrite cache with an expiration policy, and a readonly cache
* that caches without expiry.
*
* @return true if the read only cache is to be used
*/
boolean shouldUseReadOnlyResponseCache();
/**
* Checks to see if the delta information can be served to client or not.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true if the delta information is allowed to be served, false
* otherwise.
*/
boolean shouldDisableDelta();
/**
* Get the idle time for which the status replication threads can stay
* alive.
*
* @return time in minutes.
*/
long getMaxIdleThreadInMinutesAgeForStatusReplication();
/**
* Get the minimum number of threads to be used for status replication.
*
* @return minimum number of threads to be used for status replication.
*/
int getMinThreadsForStatusReplication();
/**
* Get the maximum number of threads to be used for status replication.
*
* @return maximum number of threads to be used for status replication.
*/
int getMaxThreadsForStatusReplication();
/**
* Get the maximum number of replication events that can be allowed to back
* up in the status replication pool.
* <p>
* Depending on the memory allowed, timeout and the replication traffic,
* this value can vary.
* </p>
*
* @return the maximum number of replication events that can be allowed to
* back up.
*/
int getMaxElementsInStatusReplicationPool();
/**
* Checks whether to synchronize instances when timestamp differs.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true, to synchronize, false otherwise.
*/
boolean shouldSyncWhenTimestampDiffers();
/**
* Get the number of times that a eureka node would try to get the registry
* information from the peers during startup.
*
* @return the number of retries
*/
int getRegistrySyncRetries();
/**
* Get the wait/sleep time between each retry sync attempts, if the prev retry failed and there are
* more retries to attempt.
*
* @return the wait time in ms between each sync retries
*/
long getRegistrySyncRetryWaitMs();
/**
* Get the maximum number of replication events that can be allowed to back
* up in the replication pool. This replication pool is responsible for all
* events except status updates.
* <p>
* Depending on the memory allowed, timeout and the replication traffic,
* this value can vary.
* </p>
*
* @return the maximum number of replication events that can be allowed to
* back up.
*/
int getMaxElementsInPeerReplicationPool();
/**
* Get the idle time for which the replication threads can stay alive.
*
* @return time in minutes.
*/
long getMaxIdleThreadAgeInMinutesForPeerReplication();
/**
* Get the minimum number of threads to be used for replication.
*
* @return minimum number of threads to be used for replication.
*/
int getMinThreadsForPeerReplication();
/**
* Get the maximum number of threads to be used for replication.
*
* @return maximum number of threads to be used for replication.
*/
int getMaxThreadsForPeerReplication();
/**
* Get the minimum number of available peer replication instances
* for this instance to be considered healthy. The design of eureka allows
* for an instance to continue operating with zero peers, but that would not
* be ideal.
* <p>
* The default value of -1 is interpreted as a marker to not compare
* the number of replicas. This would be done to either disable this check
* or to run eureka in a single node configuration.
*
* @return minimum number of available peer replication instances
* for this instance to be considered healthy.
*/
int getHealthStatusMinNumberOfAvailablePeers();
/**
* Get the time in milliseconds to try to replicate before dropping
* replication events.
*
* @return time in milliseconds
*/
int getMaxTimeForReplication();
/**
* Checks whether the connections to replicas should be primed. In AWS, the
* firewall requires sometime to establish network connection for new nodes.
*
* @return true, if connections should be primed, false otherwise.
*/
boolean shouldPrimeAwsReplicaConnections();
/**
* Checks to see if the delta information can be served to client or not for
* remote regions.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return true if the delta information is allowed to be served, false
* otherwise.
*/
boolean shouldDisableDeltaForRemoteRegions();
/**
* Gets the timeout value for connecting to peer eureka nodes for remote
* regions.
*
* @return timeout value in milliseconds.
*/
int getRemoteRegionConnectTimeoutMs();
/**
* Gets the timeout value for reading information from peer eureka nodes for
* remote regions.
*
* @return timeout value in milliseconds.
*/
int getRemoteRegionReadTimeoutMs();
/**
* Gets the total number of <em>HTTP</em> connections allowed to peer eureka
* nodes for remote regions.
*
* @return total number of allowed <em>HTTP</em> connections.
*/
int getRemoteRegionTotalConnections();
/**
* Gets the total number of <em>HTTP</em> connections allowed to a
* particular peer eureka node for remote regions.
*
* @return total number of allowed <em>HTTP</em> connections for a peer
* node.
*/
int getRemoteRegionTotalConnectionsPerHost();
/**
* Gets the idle time after which the <em>HTTP</em> connection should be
* cleaned up for remote regions.
*
* @return idle time in seconds.
*/
int getRemoteRegionConnectionIdleTimeoutSeconds();
/**
* Indicates whether the content fetched from eureka server has to be
* compressed for remote regions whenever it is supported by the server. The
* registry information from the eureka server is compressed for optimum
* network traffic.
*
* @return true, if the content need to be compressed, false otherwise.
*/
boolean shouldGZipContentFromRemoteRegion();
/**
* Get a map of region name against remote region discovery url.
*
* @return - An unmodifiable map of remote region name against remote region discovery url. Empty map if no remote
* region url is defined.
*/
Map<String, String> getRemoteRegionUrlsWithName();
/**
* Get the list of remote region urls.
* @return - array of string representing {@link java.net.URL}s.
* @deprecated Use {@link #getRemoteRegionUrlsWithName()}
*/
String[] getRemoteRegionUrls();
/**
* Returns a list of applications that must be retrieved from the passed remote region. <br/>
* This list can be <code>null</code> which means that no filtering should be applied on the applications
* for this region i.e. all applications must be returned. <br/>
* A global whitelist can also be configured which can be used when no setting is available for a region, such a
* whitelist can be obtained by passing <code>null</code> to this method.
*
* @param regionName Name of the region for which the application whitelist is to be retrieved. If null a global
* setting is returned.
*
* @return A set of application names which must be retrieved from the passed region. If <code>null</code> all
* applications must be retrieved.
*/
@Nullable
Set<String> getRemoteRegionAppWhitelist(@Nullable String regionName);
/**
* Get the time interval for which the registry information need to be fetched from the remote region.
* @return time in seconds.
*/
int getRemoteRegionRegistryFetchInterval();
/**
* Size of a thread pool used to execute remote region registry fetch requests. Delegating these requests
* to internal threads is necessary workaround to https://bugs.openjdk.java.net/browse/JDK-8049846 bug.
*/
int getRemoteRegionFetchThreadPoolSize();
/**
* Gets the fully qualified trust store file that will be used for remote region registry fetches.
* @return
*/
String getRemoteRegionTrustStore();
/**
* Get the remote region trust store's password.
*/
String getRemoteRegionTrustStorePassword();
/**
* Old behavior of fallback to applications in the remote region (if configured) if there are no instances of that
* application in the local region, will be disabled.
*
* @return {@code true} if the old behavior is to be disabled.
*/
boolean disableTransparentFallbackToOtherRegion();
/**
* Indicates whether the replication between cluster nodes should be batched for network efficiency.
* @return {@code true} if the replication needs to be batched.
*/
boolean shouldBatchReplication();
/**
* Allows to configure URL which Eureka should treat as its own during replication. In some cases Eureka URLs don't
* match IP address or hostname (for example, when nodes are behind load balancers). Setting this parameter on each
* node to URLs of associated load balancers helps to avoid replication to the same node where event originally came
* to. Important: you need to configure the whole URL including scheme and path, like
* <code>http://eureka-node1.mydomain.com:8010/eureka/v2/</code>
* @return URL Eureka will treat as its own
*/
String getMyUrl();
/**
* Indicates whether the eureka server should log/metric clientAuthHeaders
* @return {@code true} if the clientAuthHeaders should be logged and/or emitted as metrics
*/
boolean shouldLogIdentityHeaders();
/**
* Indicates whether the rate limiter should be enabled or disabled.
*/
boolean isRateLimiterEnabled();
/**
* Indicate if rate limit standard clients. If set to false, only non standard clients
* will be rate limited.
*/
boolean isRateLimiterThrottleStandardClients();
/**
* A list of certified clients. This is in addition to standard eureka Java clients.
*/
Set<String> getRateLimiterPrivilegedClients();
/**
* Rate limiter, token bucket algorithm property. See also {@link #getRateLimiterRegistryFetchAverageRate()}
* and {@link #getRateLimiterFullFetchAverageRate()}.
*/
int getRateLimiterBurstSize();
/**
* Rate limiter, token bucket algorithm property. Specifies the average enforced request rate.
* See also {@link #getRateLimiterBurstSize()}.
*/
int getRateLimiterRegistryFetchAverageRate();
/**
* Rate limiter, token bucket algorithm property. Specifies the average enforced request rate.
* See also {@link #getRateLimiterBurstSize()}.
*/
int getRateLimiterFullFetchAverageRate();
/**
* Name of the Role used to describe auto scaling groups from third AWS accounts.
*/
String getListAutoScalingGroupsRoleName();
/**
* @return the class name of the full json codec to use for the server. If none set a default codec will be used
*/
String getJsonCodecName();
/**
* @return the class name of the full xml codec to use for the server. If none set a default codec will be used
*/
String getXmlCodecName();
/**
* Get the configured binding strategy EIP or Route53.
* @return the configured binding strategy
*/
AwsBindingStrategy getBindingStrategy();
/**
*
* @return the ttl used to set up the route53 domain if new
*/
long getRoute53DomainTTL();
/**
* Gets the number of times the server should try to bind to the candidate
* Route53 domain.
*
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the number of times the server should try to bind to the
* candidate Route53 domain.
*/
int getRoute53BindRebindRetries();
/**
* Gets the interval with which the server should check if the Route53 domain is bound
* and should try to bind in the case if it is already not bound.
* <p>
* <em>The changes are effective at runtime.</em>
* </p>
*
* @return the time in milliseconds.
*/
int getRoute53BindingRetryIntervalMs();
/**
* To avoid configuration API pollution when trying new/experimental or features or for the migration process,
* the corresponding configuration can be put into experimental configuration section.
*
* @return a property of experimental feature
*/
String getExperimental(String name);
/**
* Get the capacity of responseCache, default value is 1000.
*
* @return the capacity of responseCache.
*/
int getInitialCapacityOfResponseCache();
}
|
brharrington/eureka
|
eureka-core/src/main/java/com/netflix/eureka/EurekaServerConfig.java
|
Java
|
apache-2.0
| 23,232 |
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.eclipse.importer;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.psi.codeStyle.CommonCodeStyleSettings;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.util.Properties;
/**
* @author Rustam Vishnyakov
*/
public class EclipseImportMap {
private Properties myProperties;
private final static String MAP_PROPERTIES = "EclipseImportMap.properties";
private static final Logger LOG = Logger.getInstance(EclipseImportMap.class);
public EclipseImportMap() {
myProperties = new Properties();
}
public void load() {
try {
InputStream sourceStream = getClass().getResourceAsStream(MAP_PROPERTIES);
try {
myProperties.load(sourceStream);
}
finally {
sourceStream.close();
}
}
catch (IOException e) {
LOG.error(e);
}
}
@Nullable
public ImportDescriptor getImportDescriptor(String name) {
String rawData = myProperties.getProperty(name);
if (rawData != null && !rawData.trim().isEmpty()) {
if (rawData.contains(":")) {
String[] parameters = rawData.split(":");
if (parameters.length == 2) {
return new ImportDescriptor(parameters[0].trim(), parameters[1].trim());
}
else if (parameters.length == 3) {
boolean indentOptions = "indentOptions".equalsIgnoreCase(parameters[1].trim());
return new ImportDescriptor(parameters[0].trim(), parameters[2], indentOptions);
}
}
else {
return new ImportDescriptor(rawData.trim());
}
}
return null;
}
public static class ImportDescriptor {
private String myLanguage;
private String myFieldName;
private boolean myIndentOptions;
private boolean myIsCustomField;
public ImportDescriptor(String language, String fieldName, boolean indentOptions) {
myLanguage = language;
myFieldName = fieldName;
myIsCustomField = isCustomField(fieldName);
myIndentOptions = indentOptions;
}
public ImportDescriptor(String language, String fieldName) {
this(language, fieldName, false);
}
public ImportDescriptor(String fieldName) {
this(null, fieldName);
}
public String getLanguage() {
return myLanguage;
}
public String getFieldName() {
return myFieldName;
}
public boolean isIndentOptions() {
return myIndentOptions;
}
public boolean isLanguageSpecific() {
return myLanguage != null;
}
public boolean isCustomField() {
return myIsCustomField;
}
private static boolean isCustomField(@NotNull String fieldName) {
if (EclipseCodeStyleImportWorker.PROGRAMMATIC_IMPORT_KEY.equals(fieldName)) {
return false;
}
for (Field field : CommonCodeStyleSettings.class.getFields()) {
if (fieldName.equals(field.getName())) return false;
}
for (Field field : CommonCodeStyleSettings.IndentOptions.class.getFields()) {
if (fieldName.equals(field.getName())) return false;
}
return true;
}
}
}
|
vvv1559/intellij-community
|
plugins/eclipse/src/org/jetbrains/idea/eclipse/importer/EclipseImportMap.java
|
Java
|
apache-2.0
| 3,840 |
/*
* Copyright 2016 MovingBlocks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.terasology.rendering.nui.layers.mainMenu;
import org.terasology.assets.ResourceUrn;
import org.terasology.config.Config;
import org.terasology.config.PlayerConfig;
import org.terasology.registry.In;
import org.terasology.rendering.nui.CoreScreenLayer;
import org.terasology.rendering.nui.WidgetUtil;
import org.terasology.rendering.nui.widgets.UIText;
public class EnterUsernamePopup extends CoreScreenLayer {
public static final ResourceUrn ASSET_URI = new ResourceUrn("engine:enterUsernamePopup");
@In
private Config config;
private UIText username;
private PlayerConfig playerConfig;
@Override
public void initialise() {
playerConfig = config.getPlayer();
username = find("username", UIText.class);
username.setText(playerConfig.getName());
WidgetUtil.trySubscribe(this, "ok", button -> {
if (username != null && !username.getText().isEmpty()) {
playerConfig.setName(username.getText());
playerConfig.setHasEnteredUsername(true);
getManager().popScreen();
}
});
WidgetUtil.trySubscribe(this, "cancel", button -> {
playerConfig.setHasEnteredUsername(true);
getManager().popScreen();
});
}
}
|
dannyzhou98/Terasology
|
engine/src/main/java/org/terasology/rendering/nui/layers/mainMenu/EnterUsernamePopup.java
|
Java
|
apache-2.0
| 1,894 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.engine.impl.delegate.invocation;
import org.flowable.common.engine.impl.javax.el.ELContext;
import org.flowable.common.engine.impl.javax.el.ValueExpression;
/**
* Class responsible for handling Expression.setValue() invocations.
*
* @author Daniel Meyer
*/
public class ExpressionSetInvocation extends ExpressionInvocation {
protected final Object value;
protected ELContext elContext;
public ExpressionSetInvocation(ValueExpression valueExpression, ELContext elContext, Object value) {
super(valueExpression);
this.value = value;
this.elContext = elContext;
this.invocationParameters = new Object[] { value };
}
@Override
protected void invoke() {
valueExpression.setValue(elContext, value);
}
}
|
lsmall/flowable-engine
|
modules/flowable-engine/src/main/java/org/flowable/engine/impl/delegate/invocation/ExpressionSetInvocation.java
|
Java
|
apache-2.0
| 1,399 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.cache.query;
/**
* Thrown if an exception is thrown when a method is invoked during query execution.
*
* @since GemFire 4.0
*/
public class QueryInvocationTargetException extends QueryException {
private static final long serialVersionUID = 2978208305701582906L;
/**
* Construct an instance of QueryInvalidException
*
* @param cause a Throwable cause of this exception
*/
public QueryInvocationTargetException(Throwable cause) {
super(cause);
}
/**
* Construct an instance of QueryInvalidException
*
* @param msg the error message
*/
public QueryInvocationTargetException(String msg) {
super(msg);
}
/**
* Construct an instance of QueryInvalidException
*
* @param msg the error message
* @param cause a Throwable cause of this exception
*/
public QueryInvocationTargetException(String msg, Throwable cause) {
super(msg, cause);
}
}
|
prasi-in/geode
|
geode-core/src/main/java/org/apache/geode/cache/query/QueryInvocationTargetException.java
|
Java
|
apache-2.0
| 1,731 |
package io.cattle.platform.extension.dynamic.process;
import static io.cattle.platform.core.model.tables.ExternalHandlerExternalHandlerProcessMapTable.EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP;
import static io.cattle.platform.core.model.tables.ExternalHandlerProcessTable.EXTERNAL_HANDLER_PROCESS;
import io.cattle.platform.core.constants.ExternalHandlerConstants;
import io.cattle.platform.core.dao.GenericMapDao;
import io.cattle.platform.core.model.ExternalHandler;
import io.cattle.platform.core.model.ExternalHandlerExternalHandlerProcessMap;
import io.cattle.platform.core.model.ExternalHandlerProcess;
import io.cattle.platform.engine.handler.HandlerResult;
import io.cattle.platform.engine.process.ProcessInstance;
import io.cattle.platform.engine.process.ProcessState;
import io.cattle.platform.extension.dynamic.api.addon.ExternalHandlerProcessConfig;
import io.cattle.platform.json.JsonMapper;
import io.cattle.platform.lock.LockCallback;
import io.cattle.platform.lock.LockManager;
import io.cattle.platform.object.process.StandardProcess;
import io.cattle.platform.object.util.DataAccessor;
import io.cattle.platform.process.base.AbstractDefaultProcessHandler;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import org.apache.commons.lang3.StringUtils;
public class ExternalHandlerActivate extends AbstractDefaultProcessHandler {
@Inject
LockManager lockManager;
@Inject
GenericMapDao mapDao;
@Inject
JsonMapper jsonMapper;
@Override
public HandlerResult handle(ProcessState state, ProcessInstance process) {
ExternalHandler externalHandler = (ExternalHandler) state.getResource();
Map<String, Config> processConfigs = new HashMap<String, Config>();
DataAccessor accessor = DataAccessor.fields(externalHandler).withKey(ExternalHandlerConstants.FIELD_PROCESS_CONFIGS);
List<? extends ExternalHandlerProcessConfig> list = accessor.asList(jsonMapper, ExternalHandlerProcessConfig.class);
if (list != null) {
for (ExternalHandlerProcessConfig config : list) {
for (String part : config.getName().toString().trim().split("\\s*,\\s*")) {
String[] moreParts = StringUtils.split(part, ":", 2);
String name = moreParts[0];
String eventName = null;
if (moreParts.length > 1) {
eventName = moreParts[1];
} else if (name.startsWith("environment.")) {
/* Handle migration from v1 to v2 api */
eventName = name;
name = part.replace("environment.", "stack.");
}
processConfigs.put(name, new Config(config.getOnError(), eventName));
}
}
}
if (!processConfigs.isEmpty()) {
for (ExternalHandlerExternalHandlerProcessMap map : getObjectManager().children(externalHandler, ExternalHandlerExternalHandlerProcessMap.class)) {
ExternalHandlerProcess handlerProcess = getObjectManager().loadResource(ExternalHandlerProcess.class, map.getExternalHandlerProcessId());
processConfigs.remove(handlerProcess.getName());
}
}
if (!processConfigs.isEmpty()) {
for (Iterator<String> iter = processConfigs.keySet().iterator(); iter.hasNext();) {
final String processName = iter.next();
ExternalHandlerProcess handlerProcess = lockManager.lock(new CreateExternalHandlerProcessLock(processName),
new LockCallback<ExternalHandlerProcess>() {
@Override
public ExternalHandlerProcess doWithLock() {
return getExternalHandlerProcess(processName);
}
});
Config config = processConfigs.get(processName);
String onError = null;
String eventName = null;
if (config != null) {
onError = config.onError;
eventName = config.eventName;
}
getObjectManager().create(ExternalHandlerExternalHandlerProcessMap.class,
EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP.EXTERNAL_HANDLER_ID, externalHandler.getId(),
EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP.EXTERNAL_HANDLER_PROCESS_ID, handlerProcess.getId(),
EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP.ON_ERROR, onError,
EXTERNAL_HANDLER_EXTERNAL_HANDLER_PROCESS_MAP.EVENT_NAME, eventName);
}
}
for (ExternalHandlerExternalHandlerProcessMap map : mapDao.findNonRemoved(ExternalHandlerExternalHandlerProcessMap.class, ExternalHandler.class,
externalHandler.getId())) {
getObjectProcessManager().executeStandardProcess(StandardProcess.CREATE, map, state.getData());
}
accessor.set(null);
getObjectManager().persist(externalHandler);
return null;
}
protected ExternalHandlerProcess getExternalHandlerProcess(String name) {
List<ExternalHandlerProcess> processes = getObjectManager().find(ExternalHandlerProcess.class, EXTERNAL_HANDLER_PROCESS.NAME, name);
ExternalHandlerProcess process = null;
if (processes.size() == 0) {
process = getObjectManager().create(ExternalHandlerProcess.class, EXTERNAL_HANDLER_PROCESS.NAME, name);
} else {
process = processes.get(0);
}
getObjectProcessManager().executeStandardProcess(StandardProcess.CREATE, process, null);
return getObjectManager().reload(process);
}
private static class Config {
String onError;
String eventName;
public Config(String onError, String eventName) {
super();
this.onError = onError;
this.eventName = eventName;
}
}
}
|
wlan0/cattle
|
code/iaas/external-handler/src/main/java/io/cattle/platform/extension/dynamic/process/ExternalHandlerActivate.java
|
Java
|
apache-2.0
| 6,145 |
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.actions;
import static com.google.devtools.build.lib.profiler.AutoProfiler.profiled;
import com.google.common.annotations.VisibleForTesting;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.profiler.AutoProfiler;
import com.google.devtools.build.lib.profiler.ProfilerTask;
import com.google.devtools.build.lib.util.Pair;
import com.google.devtools.build.lib.util.Preconditions;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
/**
* Used to keep track of resources consumed by the Blaze action execution threads and throttle them
* when necessary.
*
* <p>Threads which are known to consume a significant amount of resources should call
* {@link #acquireResources} method. This method will check whether requested resources are
* available and will either mark them as used and allow the thread to proceed or will block the
* thread until requested resources will become available. When the thread completes its task, it
* must release allocated resources by calling {@link #releaseResources} method.
*
* <p>Available resources can be calculated using one of three ways:
* <ol>
* <li>They can be preset using {@link #setAvailableResources(ResourceSet)} method. This is used
* mainly by the unit tests (however it is possible to provide a future option that would
* artificially limit amount of CPU/RAM consumed by the Blaze).
* <li>They can be preset based on the /proc/cpuinfo and /proc/meminfo information. Blaze will
* calculate amount of available CPU cores (adjusting for hyperthreading logical cores) and
* amount of the total available memory and will limit itself to the number of effective cores
* and 2/3 of the available memory. For details, please look at the {@link
* LocalHostCapacity#getLocalHostCapacity} method.
* </ol>
*
* <p>The resource manager also allows a slight overallocation of the resources to account for the
* fact that requested resources are usually estimated using a pessimistic approximation. It also
* guarantees that at least one thread will always be able to acquire any amount of requested
* resources (even if it is greater than amount of available resources). Therefore, assuming that
* threads correctly release acquired resources, Blaze will never be fully blocked.
*/
@ThreadSafe
public class ResourceManager {
/**
* A handle returned by {@link #acquireResources(ActionExecutionMetadata, ResourceSet)} that must
* be closed in order to free the resources again.
*/
public static class ResourceHandle implements AutoCloseable {
final ResourceManager rm;
final ActionExecutionMetadata actionMetadata;
final ResourceSet resourceSet;
public ResourceHandle(ResourceManager rm, ActionExecutionMetadata actionMetadata,
ResourceSet resources) {
this.rm = rm;
this.actionMetadata = actionMetadata;
this.resourceSet = resources;
}
/**
* Closing the ResourceHandle releases the resources associated with it.
*/
@Override
public void close() {
rm.releaseResources(actionMetadata, resourceSet);
}
}
private final ThreadLocal<Boolean> threadLocked = new ThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return false;
}
};
/**
* Singleton reference defined in a separate class to ensure thread-safe lazy
* initialization.
*/
private static class Singleton {
static ResourceManager instance = new ResourceManager();
}
/**
* Returns singleton instance of the resource manager.
*/
public static ResourceManager instance() {
return Singleton.instance;
}
// Allocated resources are allowed to go "negative", but at least
// MIN_AVAILABLE_CPU_RATIO portion of CPU and MIN_AVAILABLE_RAM_RATIO portion
// of RAM should be available.
// Please note that this value is purely empirical - we assume that generally
// requested resources are somewhat pessimistic and thread would end up
// using less than requested amount.
private static final double MIN_NECESSARY_CPU_RATIO = 0.6;
private static final double MIN_NECESSARY_RAM_RATIO = 1.0;
private static final double MIN_NECESSARY_IO_RATIO = 1.0;
// List of blocked threads. Associated CountDownLatch object will always
// be initialized to 1 during creation in the acquire() method.
private final List<Pair<ResourceSet, CountDownLatch>> requestList;
// The total amount of resources on the local host. Must be set by
// an explicit call to setAvailableResources(), often using
// LocalHostCapacity.getLocalHostCapacity() as an argument.
private ResourceSet staticResources = null;
private ResourceSet availableResources = null;
// Used amount of CPU capacity (where 1.0 corresponds to the one fully
// occupied CPU core. Corresponds to the CPU resource definition in the
// ResourceSet class.
private double usedCpu;
// Used amount of RAM capacity in MB. Corresponds to the RAM resource
// definition in the ResourceSet class.
private double usedRam;
// Used amount of I/O resources. Corresponds to the I/O resource
// definition in the ResourceSet class.
private double usedIo;
// Used local test count. Corresponds to the local test count definition in the ResourceSet class.
private int usedLocalTestCount;
// Specifies how much of the RAM in staticResources we should allow to be used.
public static final int DEFAULT_RAM_UTILIZATION_PERCENTAGE = 67;
private int ramUtilizationPercentage = DEFAULT_RAM_UTILIZATION_PERCENTAGE;
private ResourceManager() {
requestList = new LinkedList<>();
}
@VisibleForTesting public static ResourceManager instanceForTestingOnly() {
return new ResourceManager();
}
/**
* Resets resource manager state and releases all thread locks.
* Note - it does not reset available resources. Use separate call to setAvailableResources().
*/
public synchronized void resetResourceUsage() {
usedCpu = 0;
usedRam = 0;
usedIo = 0;
usedLocalTestCount = 0;
for (Pair<ResourceSet, CountDownLatch> request : requestList) {
// CountDownLatch can be set only to 0 or 1.
request.second.countDown();
}
requestList.clear();
}
/**
* Sets available resources using given resource set. Must be called
* at least once before using resource manager.
*/
public synchronized void setAvailableResources(ResourceSet resources) {
Preconditions.checkNotNull(resources);
staticResources = resources;
availableResources = ResourceSet.create(
staticResources.getMemoryMb() * this.ramUtilizationPercentage / 100.0,
staticResources.getCpuUsage(),
staticResources.getIoUsage(),
staticResources.getLocalTestCount());
processWaitingThreads();
}
/**
* Specify how much of the available RAM we should allow to be used.
*/
public synchronized void setRamUtilizationPercentage(int percentage) {
ramUtilizationPercentage = percentage;
}
/**
* Acquires requested resource set. Will block if resource is not available.
* NB! This method must be thread-safe!
*/
public ResourceHandle acquireResources(ActionExecutionMetadata owner, ResourceSet resources)
throws InterruptedException {
Preconditions.checkNotNull(
resources, "acquireResources called with resources == NULL during %s", owner);
Preconditions.checkState(
!threadHasResources(), "acquireResources with existing resource lock during %s", owner);
AutoProfiler p = profiled(owner, ProfilerTask.ACTION_LOCK);
CountDownLatch latch = null;
try {
latch = acquire(resources);
if (latch != null) {
latch.await();
}
} catch (InterruptedException e) {
// Synchronize on this to avoid any racing with #processWaitingThreads
synchronized (this) {
if (latch.getCount() == 0) {
// Resources already acquired by other side. Release them, but not inside this
// synchronized block to avoid deadlock.
release(resources);
} else {
// Inform other side that resources shouldn't be acquired.
latch.countDown();
}
}
throw e;
}
threadLocked.set(true);
// Profile acquisition only if it waited for resource to become available.
if (latch != null) {
p.complete();
}
return new ResourceHandle(this, owner, resources);
}
/**
* Acquires the given resources if available immediately. Does not block.
*
* @return a ResourceHandle iff the given resources were locked (all or nothing), null otherwise.
*/
@VisibleForTesting
ResourceHandle tryAcquire(ActionExecutionMetadata owner, ResourceSet resources) {
Preconditions.checkNotNull(
resources, "tryAcquire called with resources == NULL during %s", owner);
Preconditions.checkState(
!threadHasResources(), "tryAcquire with existing resource lock during %s", owner);
boolean acquired = false;
synchronized (this) {
if (areResourcesAvailable(resources)) {
incrementResources(resources);
acquired = true;
}
}
if (acquired) {
threadLocked.set(resources != ResourceSet.ZERO);
return new ResourceHandle(this, owner, resources);
}
return null;
}
private void incrementResources(ResourceSet resources) {
usedCpu += resources.getCpuUsage();
usedRam += resources.getMemoryMb();
usedIo += resources.getIoUsage();
usedLocalTestCount += resources.getLocalTestCount();
}
/**
* Return true if any resources have been claimed through this manager.
*/
public synchronized boolean inUse() {
return usedCpu != 0.0 || usedRam != 0.0 || usedIo != 0.0 || usedLocalTestCount != 0
|| !requestList.isEmpty();
}
/**
* Return true iff this thread has a lock on non-zero resources.
*/
public boolean threadHasResources() {
return threadLocked.get();
}
/**
* Releases previously requested resource =.
*
* <p>NB! This method must be thread-safe!
*/
@VisibleForTesting
void releaseResources(ActionExecutionMetadata owner, ResourceSet resources) {
Preconditions.checkNotNull(
resources, "releaseResources called with resources == NULL during %s", owner);
Preconditions.checkState(
threadHasResources(), "releaseResources without resource lock during %s", owner);
boolean isConflict = false;
AutoProfiler p = profiled(owner, ProfilerTask.ACTION_RELEASE);
try {
isConflict = release(resources);
} finally {
threadLocked.set(false);
// Profile resource release only if it resolved at least one allocation request.
if (isConflict) {
p.complete();
}
}
}
private synchronized CountDownLatch acquire(ResourceSet resources) {
if (areResourcesAvailable(resources)) {
incrementResources(resources);
return null;
}
Pair<ResourceSet, CountDownLatch> request =
new Pair<>(resources, new CountDownLatch(1));
requestList.add(request);
return request.second;
}
private synchronized boolean release(ResourceSet resources) {
usedCpu -= resources.getCpuUsage();
usedRam -= resources.getMemoryMb();
usedIo -= resources.getIoUsage();
usedLocalTestCount -= resources.getLocalTestCount();
// TODO(bazel-team): (2010) rounding error can accumulate and value below can end up being
// e.g. 1E-15. So if it is small enough, we set it to 0. But maybe there is a better solution.
double epsilon = 0.0001;
if (usedCpu < epsilon) {
usedCpu = 0;
}
if (usedRam < epsilon) {
usedRam = 0;
}
if (usedIo < epsilon) {
usedIo = 0;
}
if (!requestList.isEmpty()) {
processWaitingThreads();
return true;
}
return false;
}
/**
* Tries to unblock one or more waiting threads if there are sufficient resources available.
*/
private synchronized void processWaitingThreads() {
Iterator<Pair<ResourceSet, CountDownLatch>> iterator = requestList.iterator();
while (iterator.hasNext()) {
Pair<ResourceSet, CountDownLatch> request = iterator.next();
if (request.second.getCount() != 0) {
if (areResourcesAvailable(request.first)) {
incrementResources(request.first);
request.second.countDown();
iterator.remove();
}
} else {
// Cancelled by other side.
iterator.remove();
}
}
}
// Method will return true if all requested resources are considered to be available.
private boolean areResourcesAvailable(ResourceSet resources) {
Preconditions.checkNotNull(availableResources);
// Comparison below is robust, since any calculation errors will be fixed
// by the release() method.
if (usedCpu == 0.0 && usedRam == 0.0 && usedIo == 0.0 && usedLocalTestCount == 0) {
return true;
}
// Use only MIN_NECESSARY_???_RATIO of the resource value to check for
// allocation. This is necessary to account for the fact that most of the
// requested resource sets use pessimistic estimations. Note that this
// ratio is used only during comparison - for tracking we will actually
// mark whole requested amount as used.
double cpu = resources.getCpuUsage() * MIN_NECESSARY_CPU_RATIO;
double ram = resources.getMemoryMb() * MIN_NECESSARY_RAM_RATIO;
double io = resources.getIoUsage() * MIN_NECESSARY_IO_RATIO;
int localTestCount = resources.getLocalTestCount();
double availableCpu = availableResources.getCpuUsage();
double availableRam = availableResources.getMemoryMb();
double availableIo = availableResources.getIoUsage();
int availableLocalTestCount = availableResources.getLocalTestCount();
// Resources are considered available if any one of the conditions below is true:
// 1) If resource is not requested at all, it is available.
// 2) If resource is not used at the moment, it is considered to be
// available regardless of how much is requested. This is necessary to
// ensure that at any given time, at least one thread is able to acquire
// resources even if it requests more than available.
// 3) If used resource amount is less than total available resource amount.
boolean cpuIsAvailable = cpu == 0.0 || usedCpu == 0.0 || usedCpu + cpu <= availableCpu;
boolean ramIsAvailable = ram == 0.0 || usedRam == 0.0 || usedRam + ram <= availableRam;
boolean ioIsAvailable = io == 0.0 || usedIo == 0.0 || usedIo + io <= availableIo;
boolean localTestCountIsAvailable = localTestCount == 0 || usedLocalTestCount == 0
|| usedLocalTestCount + localTestCount <= availableLocalTestCount;
return cpuIsAvailable && ramIsAvailable && ioIsAvailable && localTestCountIsAvailable;
}
@VisibleForTesting
synchronized int getWaitCount() {
return requestList.size();
}
@VisibleForTesting
synchronized boolean isAvailable(double ram, double cpu, double io, int localTestCount) {
return areResourcesAvailable(ResourceSet.create(ram, cpu, io, localTestCount));
}
}
|
juhalindfors/bazel-patches
|
src/main/java/com/google/devtools/build/lib/actions/ResourceManager.java
|
Java
|
apache-2.0
| 15,858 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.giraph.bsp.checkpoints;
import org.apache.giraph.conf.GiraphConfiguration;
import org.apache.giraph.master.MasterCompute;
/**
* Disable all checkpoints.
*/
public class DisabledCheckpointSupportedChecker
implements CheckpointSupportedChecker {
@Override
public boolean isCheckpointSupported(GiraphConfiguration conf,
MasterCompute masterCompute) {
return false;
}
}
|
KidEinstein/giraph
|
giraph-core/src/main/java/org/apache/giraph/bsp/checkpoints/DisabledCheckpointSupportedChecker.java
|
Java
|
apache-2.0
| 1,249 |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.ospf.protocol.ospfpacket;
import org.jboss.netty.buffer.ChannelBuffer;
import org.onlab.packet.Ip4Address;
import org.onosproject.ospf.exceptions.OspfParseException;
import org.onosproject.ospf.protocol.util.OspfPacketType;
/**
* Representation of an OSPF message.
*/
public interface OspfMessage {
/**
* Returns the type of OSPF message.
*
* @return OSPF message type
*/
public OspfPacketType ospfMessageType();
/**
* Reads from ChannelBuffer and initializes the type of LSA.
*
* @param channelBuffer channel buffer instance
* @throws OspfParseException might throws exception while parsing buffer
*/
void readFrom(ChannelBuffer channelBuffer) throws OspfParseException;
/**
* Returns OSPFMessage as byte array.
*
* @return OSPF message as bytes
*/
byte[] asBytes();
/**
* Sets the source IP address.
*
* @param sourceIp IP address
*/
public void setSourceIp(Ip4Address sourceIp);
/**
* Gets the destination IP address.
*
* @return destination IP address
*/
public Ip4Address destinationIp();
/**
* Sets destination IP.
*
* @param destinationIp destination IP address
*/
public void setDestinationIp(Ip4Address destinationIp);
}
|
VinodKumarS-Huawei/ietf96yang
|
protocols/ospf/protocol/src/main/java/org/onosproject/ospf/protocol/ospfpacket/OspfMessage.java
|
Java
|
apache-2.0
| 1,954 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.script;
import java.io.File;
import java.io.FileInputStream;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.script.Invocable;
import javax.script.ScriptEngine;
import javax.script.ScriptException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.DynamicProperty;
import org.apache.nifi.annotation.behavior.Restricted;
import org.apache.nifi.annotation.behavior.Restriction;
import org.apache.nifi.annotation.behavior.Stateful;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.SeeAlso;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.annotation.lifecycle.OnScheduled;
import org.apache.nifi.annotation.lifecycle.OnStopped;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.components.RequiredPermission;
import org.apache.nifi.components.ValidationContext;
import org.apache.nifi.components.ValidationResult;
import org.apache.nifi.components.state.Scope;
import org.apache.nifi.controller.ControllerServiceLookup;
import org.apache.nifi.controller.NodeTypeProvider;
import org.apache.nifi.expression.ExpressionLanguageScope;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.AbstractSessionFactoryProcessor;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSessionFactory;
import org.apache.nifi.processor.Processor;
import org.apache.nifi.processor.ProcessorInitializationContext;
import org.apache.nifi.processor.Relationship;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.processor.util.StandardValidators;
import org.apache.nifi.script.ScriptingComponentHelper;
import org.apache.nifi.script.ScriptingComponentUtils;
import org.apache.nifi.script.impl.FilteredPropertiesValidationContextAdapter;
@Tags({"script", "invoke", "groovy", "python", "jython", "jruby", "ruby", "javascript", "js", "lua", "luaj"})
@CapabilityDescription("Experimental - Invokes a script engine for a Processor defined in the given script. The script must define "
+ "a valid class that implements the Processor interface, and it must set a variable 'processor' to an instance of "
+ "the class. Processor methods such as onTrigger() will be delegated to the scripted Processor instance. Also any "
+ "Relationships or PropertyDescriptors defined by the scripted processor will be added to the configuration dialog. The scripted processor can "
+ "implement public void setLogger(ComponentLog logger) to get access to the parent logger, as well as public void onScheduled(ProcessContext context) and "
+ "public void onStopped(ProcessContext context) methods to be invoked when the parent InvokeScriptedProcessor is scheduled or stopped, respectively. "
+ "Experimental: Impact of sustained usage not yet verified.")
@DynamicProperty(name = "A script engine property to update", value = "The value to set it to",
expressionLanguageScope = ExpressionLanguageScope.FLOWFILE_ATTRIBUTES,
description = "Updates a script engine property specified by the Dynamic Property's key with the value specified by the Dynamic Property's value")
@Stateful(scopes = {Scope.LOCAL, Scope.CLUSTER},
description = "Scripts can store and retrieve state using the State Management APIs. Consult the State Manager section of the Developer's Guide for more details.")
@SeeAlso({ExecuteScript.class})
@Restricted(
restrictions = {
@Restriction(
requiredPermission = RequiredPermission.EXECUTE_CODE,
explanation = "Provides operator the ability to execute arbitrary code assuming all permissions that NiFi has.")
}
)
public class InvokeScriptedProcessor extends AbstractSessionFactoryProcessor {
private final AtomicReference<Processor> processor = new AtomicReference<>();
private final AtomicReference<Collection<ValidationResult>> validationResults = new AtomicReference<>(new ArrayList<>());
private final AtomicBoolean scriptNeedsReload = new AtomicBoolean(true);
private volatile ScriptEngine scriptEngine = null;
private volatile String kerberosServicePrincipal = null;
private volatile File kerberosConfigFile = null;
private volatile File kerberosServiceKeytab = null;
volatile ScriptingComponentHelper scriptingComponentHelper = new ScriptingComponentHelper();
/**
* Returns the valid relationships for this processor as supplied by the
* script itself.
*
* @return a Set of Relationships supported by this processor
*/
@Override
public Set<Relationship> getRelationships() {
final Set<Relationship> relationships = new HashSet<>();
final Processor instance = processor.get();
if (instance != null) {
try {
final Set<Relationship> rels = instance.getRelationships();
if (rels != null && !rels.isEmpty()) {
relationships.addAll(rels);
}
} catch (final Throwable t) {
final ComponentLog logger = getLogger();
final String message = "Unable to get relationships from scripted Processor: " + t;
logger.error(message);
if (logger.isDebugEnabled()) {
logger.error(message, t);
}
}
}
return Collections.unmodifiableSet(relationships);
}
@Override
protected void init(final ProcessorInitializationContext context) {
kerberosServicePrincipal = context.getKerberosServicePrincipal();
kerberosConfigFile = context.getKerberosConfigurationFile();
kerberosServiceKeytab = context.getKerberosServiceKeytab();
}
/**
* Returns a list of property descriptors supported by this processor. The
* list always includes properties such as script engine name, script file
* name, script body name, script arguments, and an external module path. If
* the scripted processor also defines supported properties, those are added
* to the list as well.
*
* @return a List of PropertyDescriptor objects supported by this processor
*/
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
synchronized (scriptingComponentHelper.isInitialized) {
if (!scriptingComponentHelper.isInitialized.get()) {
scriptingComponentHelper.createResources();
}
}
List<PropertyDescriptor> supportedPropertyDescriptors = new ArrayList<>();
supportedPropertyDescriptors.addAll(scriptingComponentHelper.getDescriptors());
final Processor instance = processor.get();
if (instance != null) {
try {
final List<PropertyDescriptor> instanceDescriptors = instance.getPropertyDescriptors();
if (instanceDescriptors != null) {
supportedPropertyDescriptors.addAll(instanceDescriptors);
}
} catch (final Throwable t) {
final ComponentLog logger = getLogger();
final String message = "Unable to get property descriptors from Processor: " + t;
logger.error(message);
if (logger.isDebugEnabled()) {
logger.error(message, t);
}
}
}
return Collections.unmodifiableList(supportedPropertyDescriptors);
}
/**
* Returns a PropertyDescriptor for the given name. This is for the user to
* be able to define their own properties which will be available as
* variables in the script
*
* @param propertyDescriptorName used to lookup if any property descriptors
* exist for that name
* @return a PropertyDescriptor object corresponding to the specified
* dynamic property name
*/
@Override
protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final String propertyDescriptorName) {
return new PropertyDescriptor.Builder()
.name(propertyDescriptorName)
.required(false)
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
.dynamic(true)
.build();
}
/**
* Performs setup operations when the processor is scheduled to run. This
* includes evaluating the processor's properties, as well as reloading the
* script (from file or the "Script Body" property)
*
* @param context the context in which to perform the setup operations
*/
@OnScheduled
public void setup(final ProcessContext context) {
scriptingComponentHelper.setupVariables(context);
setup();
invokeScriptedProcessorMethod("onScheduled", context);
}
public void setup() {
// Create a single script engine, the Processor object is reused by each task
if(scriptEngine == null) {
scriptingComponentHelper.setup(1, getLogger());
scriptEngine = scriptingComponentHelper.engineQ.poll();
}
if (scriptEngine == null) {
throw new ProcessException("No script engine available!");
}
if (scriptNeedsReload.get() || processor.get() == null) {
if (ScriptingComponentHelper.isFile(scriptingComponentHelper.getScriptPath())) {
reloadScriptFile(scriptingComponentHelper.getScriptPath());
} else {
reloadScriptBody(scriptingComponentHelper.getScriptBody());
}
scriptNeedsReload.set(false);
}
}
/**
* Handles changes to this processor's properties. If changes are made to
* script- or engine-related properties, the script will be reloaded.
*
* @param descriptor of the modified property
* @param oldValue non-null property value (previous)
* @param newValue the new property value or if null indicates the property
*/
@Override
public void onPropertyModified(final PropertyDescriptor descriptor, final String oldValue, final String newValue) {
validationResults.set(new HashSet<>());
final ComponentLog logger = getLogger();
final Processor instance = processor.get();
if (ScriptingComponentUtils.SCRIPT_FILE.equals(descriptor)
|| ScriptingComponentUtils.SCRIPT_BODY.equals(descriptor)
|| ScriptingComponentUtils.MODULES.equals(descriptor)
|| scriptingComponentHelper.SCRIPT_ENGINE.equals(descriptor)) {
scriptNeedsReload.set(true);
scriptEngine = null; //reset engine. This happens only when a processor is stopped, so there won't be any performance impact in run-time.
} else if (instance != null) {
// If the script provides a Processor, call its onPropertyModified() method
try {
instance.onPropertyModified(descriptor, oldValue, newValue);
} catch (final Exception e) {
final String message = "Unable to invoke onPropertyModified from script Processor: " + e;
logger.error(message, e);
}
}
}
/**
* Reloads the script located at the given path
*
* @param scriptPath the path to the script file to be loaded
* @return true if the script was loaded successfully; false otherwise
*/
private boolean reloadScriptFile(final String scriptPath) {
final Collection<ValidationResult> results = new HashSet<>();
try (final FileInputStream scriptStream = new FileInputStream(scriptPath)) {
return reloadScript(IOUtils.toString(scriptStream, Charset.defaultCharset()));
} catch (final Exception e) {
final ComponentLog logger = getLogger();
final String message = "Unable to load script: " + e;
logger.error(message, e);
results.add(new ValidationResult.Builder()
.subject("ScriptValidation")
.valid(false)
.explanation("Unable to load script due to " + e)
.input(scriptPath)
.build());
}
// store the updated validation results
validationResults.set(results);
// return whether there was any issues loading the configured script
return results.isEmpty();
}
/**
* Reloads the script defined by the given string
*
* @param scriptBody the contents of the script to be loaded
* @return true if the script was loaded successfully; false otherwise
*/
private boolean reloadScriptBody(final String scriptBody) {
final Collection<ValidationResult> results = new HashSet<>();
try {
return reloadScript(scriptBody);
} catch (final Exception e) {
final ComponentLog logger = getLogger();
final String message = "Unable to load script: " + e;
logger.error(message, e);
results.add(new ValidationResult.Builder()
.subject("ScriptValidation")
.valid(false)
.explanation("Unable to load script due to " + e)
.input(scriptingComponentHelper.getScriptPath())
.build());
}
// store the updated validation results
validationResults.set(results);
// return whether there was any issues loading the configured script
return results.isEmpty();
}
/**
* Reloads the script Processor. This must be called within the lock.
*
* @param scriptBody An input stream associated with the script content
* @return Whether the script was successfully reloaded
*/
private boolean reloadScript(final String scriptBody) {
// note we are starting here with a fresh listing of validation
// results since we are (re)loading a new/updated script. any
// existing validation results are not relevant
final Collection<ValidationResult> results = new HashSet<>();
try {
// get the engine and ensure its invocable
if (scriptEngine instanceof Invocable) {
final Invocable invocable = (Invocable) scriptEngine;
// Find a custom configurator and invoke their eval() method
ScriptEngineConfigurator configurator = scriptingComponentHelper.scriptEngineConfiguratorMap.get(scriptingComponentHelper.getScriptEngineName().toLowerCase());
if (configurator != null) {
configurator.eval(scriptEngine, scriptBody, scriptingComponentHelper.getModules());
} else {
// evaluate the script
scriptEngine.eval(scriptBody);
}
// get configured processor from the script (if it exists)
final Object obj = scriptEngine.get("processor");
if (obj != null) {
final ComponentLog logger = getLogger();
try {
// set the logger if the processor wants it
invocable.invokeMethod(obj, "setLogger", logger);
} catch (final NoSuchMethodException nsme) {
if (logger.isDebugEnabled()) {
logger.debug("Configured script Processor does not contain a setLogger method.");
}
}
// record the processor for use later
final Processor scriptProcessor = invocable.getInterface(obj, Processor.class);
processor.set(scriptProcessor);
if (scriptProcessor != null) {
try {
scriptProcessor.initialize(new ProcessorInitializationContext() {
@Override
public String getIdentifier() {
return InvokeScriptedProcessor.this.getIdentifier();
}
@Override
public ComponentLog getLogger() {
return logger;
}
@Override
public ControllerServiceLookup getControllerServiceLookup() {
return InvokeScriptedProcessor.super.getControllerServiceLookup();
}
@Override
public NodeTypeProvider getNodeTypeProvider() {
return InvokeScriptedProcessor.super.getNodeTypeProvider();
}
@Override
public String getKerberosServicePrincipal() {
return InvokeScriptedProcessor.this.kerberosServicePrincipal;
}
@Override
public File getKerberosServiceKeytab() {
return InvokeScriptedProcessor.this.kerberosServiceKeytab;
}
@Override
public File getKerberosConfigurationFile() {
return InvokeScriptedProcessor.this.kerberosConfigFile;
}
});
} catch (final Exception e) {
logger.error("Unable to initialize scripted Processor: " + e.getLocalizedMessage(), e);
throw new ProcessException(e);
}
}
} else {
throw new ScriptException("No processor was defined by the script.");
}
}
} catch (final Exception ex) {
final ComponentLog logger = getLogger();
final String message = "Unable to load script: " + ex.getLocalizedMessage();
logger.error(message, ex);
results.add(new ValidationResult.Builder()
.subject("ScriptValidation")
.valid(false)
.explanation("Unable to load script due to " + ex.getLocalizedMessage())
.input(scriptingComponentHelper.getScriptPath())
.build());
}
// store the updated validation results
validationResults.set(results);
// return whether there was any issues loading the configured script
return results.isEmpty();
}
/**
* Invokes the validate() routine provided by the script, allowing for
* custom validation code. This method assumes there is a valid Processor
* defined in the script and it has been loaded by the
* InvokeScriptedProcessor processor
*
* @param context The validation context to be passed into the custom
* validate method
* @return A collection of ValidationResults returned by the custom validate
* method
*/
@Override
protected Collection<ValidationResult> customValidate(final ValidationContext context) {
Collection<ValidationResult> commonValidationResults = super.customValidate(context);
if (!commonValidationResults.isEmpty()) {
return commonValidationResults;
}
// do not try to build processor/compile/etc until onPropertyModified clear the validation error/s
// and don't print anything into log.
if (!validationResults.get().isEmpty()) {
return validationResults.get();
}
scriptingComponentHelper.setScriptEngineName(context.getProperty(scriptingComponentHelper.SCRIPT_ENGINE).getValue());
scriptingComponentHelper.setScriptPath(context.getProperty(ScriptingComponentUtils.SCRIPT_FILE).evaluateAttributeExpressions().getValue());
scriptingComponentHelper.setScriptBody(context.getProperty(ScriptingComponentUtils.SCRIPT_BODY).getValue());
String modulePath = context.getProperty(ScriptingComponentUtils.MODULES).evaluateAttributeExpressions().getValue();
if (!StringUtils.isEmpty(modulePath)) {
scriptingComponentHelper.setModules(modulePath.split(","));
} else {
scriptingComponentHelper.setModules(new String[0]);
}
setup();
// Now that InvokeScriptedProcessor is validated, we can call validate on the scripted processor
final Processor instance = processor.get();
final Collection<ValidationResult> currentValidationResults = validationResults.get();
// if there was existing validation errors and the processor loaded successfully
if (currentValidationResults.isEmpty() && instance != null) {
try {
// defer to the underlying processor for validation, without the
// invokescriptedprocessor properties
final Set<PropertyDescriptor> innerPropertyDescriptor = new HashSet<PropertyDescriptor>(scriptingComponentHelper.getDescriptors());
ValidationContext innerValidationContext = new FilteredPropertiesValidationContextAdapter(context, innerPropertyDescriptor);
final Collection<ValidationResult> instanceResults = instance.validate(innerValidationContext);
if (instanceResults != null && instanceResults.size() > 0) {
// return the validation results from the underlying instance
return instanceResults;
}
} catch (final Exception e) {
final ComponentLog logger = getLogger();
final String message = "Unable to validate the script Processor: " + e;
logger.error(message, e);
// return a new validation message
final Collection<ValidationResult> results = new HashSet<>();
results.add(new ValidationResult.Builder()
.subject("Validation")
.valid(false)
.explanation("An error occurred calling validate in the configured script Processor.")
.input(context.getProperty(ScriptingComponentUtils.SCRIPT_FILE).getValue())
.build());
return results;
}
}
return currentValidationResults;
}
/**
* Invokes the onTrigger() method of the scripted processor. If the script
* failed to reload, the processor yields until the script can be reloaded
* successfully. If the scripted processor's onTrigger() method throws an
* exception, a ProcessException will be thrown. If no processor is defined
* by the script, an error is logged with the system.
*
* @param context provides access to convenience methods for obtaining
* property values, delaying the scheduling of the processor, provides
* access to Controller Services, etc.
* @param sessionFactory provides access to a {@link ProcessSessionFactory},
* which can be used for accessing FlowFiles, etc.
* @throws ProcessException if the scripted processor's onTrigger() method
* throws an exception
*/
@Override
public void onTrigger(ProcessContext context, ProcessSessionFactory sessionFactory) throws ProcessException {
// Initialize the rest of the processor resources if we have not already done so
synchronized (scriptingComponentHelper.isInitialized) {
if (!scriptingComponentHelper.isInitialized.get()) {
scriptingComponentHelper.createResources();
}
}
ComponentLog log = getLogger();
// ensure the processor (if it exists) is loaded
final Processor instance = processor.get();
// ensure the processor did not fail to reload at some point
final Collection<ValidationResult> results = validationResults.get();
if (!results.isEmpty()) {
log.error(String.format("Unable to run because the Processor is not valid: [%s]",
StringUtils.join(results, ", ")));
context.yield();
return;
}
if (instance != null) {
try {
// run the processor
instance.onTrigger(context, sessionFactory);
} catch (final ProcessException e) {
final String message = String.format("An error occurred executing the configured Processor [%s]: %s",
context.getProperty(ScriptingComponentUtils.SCRIPT_FILE).getValue(), e);
log.error(message);
throw e;
}
} else {
log.error("There is no processor defined by the script");
}
}
@OnStopped
public void stop(ProcessContext context) {
invokeScriptedProcessorMethod("onStopped", context);
scriptingComponentHelper.stop();
processor.set(null);
scriptEngine = null;
}
private void invokeScriptedProcessorMethod(String methodName, Object... params) {
// Run the scripted processor's method here, if it exists
if (scriptEngine instanceof Invocable) {
final Invocable invocable = (Invocable) scriptEngine;
final Object obj = scriptEngine.get("processor");
if (obj != null) {
ComponentLog logger = getLogger();
try {
invocable.invokeMethod(obj, methodName, params);
} catch (final NoSuchMethodException nsme) {
if (logger.isDebugEnabled()) {
logger.debug("Configured script Processor does not contain the method " + methodName);
}
} catch (final Exception e) {
// An error occurred during onScheduled, propagate it up
logger.error("Error while executing the scripted processor's method " + methodName, e);
if (e instanceof ProcessException) {
throw (ProcessException) e;
}
throw new ProcessException(e);
}
}
}
}
}
|
jskora/nifi
|
nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/src/main/java/org/apache/nifi/processors/script/InvokeScriptedProcessor.java
|
Java
|
apache-2.0
| 27,812 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.server.cluster.ha;
import java.util.Map;
import java.util.Objects;
import org.apache.activemq.artemis.core.config.ha.ReplicationBackupPolicyConfiguration;
import org.apache.activemq.artemis.core.config.ha.DistributedPrimitiveManagerConfiguration;
import org.apache.activemq.artemis.core.io.IOCriticalErrorListener;
import org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl;
import org.apache.activemq.artemis.core.server.impl.ReplicationBackupActivation;
import org.apache.activemq.artemis.quorum.DistributedPrimitiveManager;
public class ReplicationBackupPolicy implements HAPolicy<ReplicationBackupActivation> {
private final ReplicationPrimaryPolicy livePolicy;
private final String groupName;
private final String clusterName;
private final int maxSavedReplicatedJournalsSize;
private final long retryReplicationWait;
private final DistributedPrimitiveManagerConfiguration managerConfiguration;
private final boolean tryFailback;
private ReplicationBackupPolicy(ReplicationBackupPolicyConfiguration configuration,
ReplicationPrimaryPolicy livePolicy) {
Objects.requireNonNull(livePolicy);
this.clusterName = configuration.getClusterName();
this.maxSavedReplicatedJournalsSize = configuration.getMaxSavedReplicatedJournalsSize();
this.groupName = configuration.getGroupName();
this.retryReplicationWait = configuration.getRetryReplicationWait();
this.managerConfiguration = configuration.getDistributedManagerConfiguration();
this.tryFailback = true;
this.livePolicy = livePolicy;
}
private ReplicationBackupPolicy(ReplicationBackupPolicyConfiguration configuration) {
this.clusterName = configuration.getClusterName();
this.maxSavedReplicatedJournalsSize = configuration.getMaxSavedReplicatedJournalsSize();
this.groupName = configuration.getGroupName();
this.retryReplicationWait = configuration.getRetryReplicationWait();
this.managerConfiguration = configuration.getDistributedManagerConfiguration();
this.tryFailback = false;
livePolicy = ReplicationPrimaryPolicy.failoverPolicy(
configuration.getInitialReplicationSyncTimeout(),
configuration.getGroupName(),
configuration.getClusterName(),
this,
configuration.isAllowFailBack(),
configuration.getDistributedManagerConfiguration());
}
public boolean isTryFailback() {
return tryFailback;
}
/**
* It creates a policy which live policy won't cause to broker to try failback.
*/
public static ReplicationBackupPolicy with(ReplicationBackupPolicyConfiguration configuration) {
return new ReplicationBackupPolicy(configuration);
}
/**
* It creates a companion backup policy for a natural-born primary: it would cause the broker to try failback.
*/
static ReplicationBackupPolicy failback(long retryReplicationWait,
String clusterName,
String groupName,
ReplicationPrimaryPolicy livePolicy,
DistributedPrimitiveManagerConfiguration distributedManagerConfiguration) {
return new ReplicationBackupPolicy(ReplicationBackupPolicyConfiguration.withDefault()
.setRetryReplicationWait(retryReplicationWait)
.setClusterName(clusterName)
.setGroupName(groupName)
.setDistributedManagerConfiguration(distributedManagerConfiguration),
livePolicy);
}
@Override
public ReplicationBackupActivation createActivation(ActiveMQServerImpl server,
boolean wasLive,
Map<String, Object> activationParams,
IOCriticalErrorListener shutdownOnCriticalIO) throws Exception {
return new ReplicationBackupActivation(server, DistributedPrimitiveManager.newInstanceOf(
managerConfiguration.getClassName(), managerConfiguration.getProperties()), this);
}
@Override
public boolean isSharedStore() {
return false;
}
@Override
public boolean isBackup() {
return true;
}
@Override
public boolean canScaleDown() {
return false;
}
@Override
public String getScaleDownGroupName() {
return null;
}
@Override
public String getScaleDownClustername() {
return null;
}
public String getClusterName() {
return clusterName;
}
@Override
public String getBackupGroupName() {
return groupName;
}
public String getGroupName() {
return groupName;
}
public ReplicationPrimaryPolicy getLivePolicy() {
return livePolicy;
}
public int getMaxSavedReplicatedJournalsSize() {
return maxSavedReplicatedJournalsSize;
}
public long getRetryReplicationWait() {
return retryReplicationWait;
}
@Override
public boolean useQuorumManager() {
return false;
}
}
|
tabish121/activemq-artemis
|
artemis-server/src/main/java/org/apache/activemq/artemis/core/server/cluster/ha/ReplicationBackupPolicy.java
|
Java
|
apache-2.0
| 6,136 |
/*
* Copyright 2019 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel.kqueue;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.socket.InternetProtocolFamily;
import io.netty.testsuite.transport.TestsuitePermutation;
import io.netty.testsuite.transport.socket.DatagramUnicastIPv6Test;
import io.netty.testsuite.transport.socket.DatagramUnicastTest;
import java.util.List;
public class KQueueDatagramUnicastIPv6Test extends DatagramUnicastIPv6Test {
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<Bootstrap, Bootstrap>> newFactories() {
return KQueueSocketTestPermutation.INSTANCE.datagram(internetProtocolFamily());
}
}
|
doom369/netty
|
transport-native-kqueue/src/test/java/io/netty/channel/kqueue/KQueueDatagramUnicastIPv6Test.java
|
Java
|
apache-2.0
| 1,266 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.rules.impl;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import com.facebook.buck.core.build.buildable.context.FakeBuildableContext;
import com.facebook.buck.core.build.context.BuildContext;
import com.facebook.buck.core.build.context.FakeBuildContext;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.model.BuildTargetFactory;
import com.facebook.buck.core.model.impl.BuildTargetPaths;
import com.facebook.buck.core.rulekey.RuleKey;
import com.facebook.buck.core.rules.ActionGraphBuilder;
import com.facebook.buck.core.rules.BuildRuleResolver;
import com.facebook.buck.core.rules.SourcePathRuleFinder;
import com.facebook.buck.core.rules.resolver.impl.TestActionGraphBuilder;
import com.facebook.buck.core.sourcepath.FakeSourcePath;
import com.facebook.buck.core.sourcepath.PathSourcePath;
import com.facebook.buck.core.sourcepath.SourcePath;
import com.facebook.buck.core.sourcepath.resolver.SourcePathResolverAdapter;
import com.facebook.buck.io.BuildCellRelativePath;
import com.facebook.buck.io.file.MorePaths;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.io.filesystem.TestProjectFilesystems;
import com.facebook.buck.rules.keys.InputBasedRuleKeyFactory;
import com.facebook.buck.rules.keys.TestDefaultRuleKeyFactory;
import com.facebook.buck.rules.keys.TestInputBasedRuleKeyFactory;
import com.facebook.buck.shell.Genrule;
import com.facebook.buck.shell.GenruleBuilder;
import com.facebook.buck.step.Step;
import com.facebook.buck.step.TestExecutionContext;
import com.facebook.buck.step.fs.MakeCleanDirectoryStep;
import com.facebook.buck.step.fs.SymlinkMapsPaths;
import com.facebook.buck.step.fs.SymlinkTreeMergeStep;
import com.facebook.buck.testutil.FakeFileHashCache;
import com.facebook.buck.testutil.TemporaryPaths;
import com.facebook.buck.util.cache.FileHashCacheMode;
import com.facebook.buck.util.cache.impl.DefaultFileHashCache;
import com.facebook.buck.util.cache.impl.StackedFileHashCache;
import com.facebook.buck.util.hashing.FileHashLoader;
import com.google.common.base.Charsets;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSortedSet;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.hamcrest.Matchers;
import org.hamcrest.junit.ExpectedException;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class SymlinkTreeTest {
@Rule public final TemporaryPaths tmpDir = new TemporaryPaths();
@Rule public final ExpectedException exception = ExpectedException.none();
private ProjectFilesystem projectFilesystem;
private BuildTarget buildTarget;
private SymlinkTree symlinkTreeBuildRule;
private ImmutableMap<Path, SourcePath> links;
private Path outputPath;
private SourcePathResolverAdapter pathResolver;
private ActionGraphBuilder graphBuilder;
@Before
public void setUp() throws Exception {
projectFilesystem = TestProjectFilesystems.createProjectFilesystem(tmpDir.getRoot());
// Create a build target to use when building the symlink tree.
buildTarget = BuildTargetFactory.newInstance("//test:test");
// Get the first file we're symlinking
Path link1 = Paths.get("file");
Path file1 = tmpDir.newFile();
Files.write(file1, "hello world".getBytes(Charsets.UTF_8));
// Get the second file we're symlinking
Path link2 = Paths.get("directory", "then", "file");
Path file2 = tmpDir.newFile();
Files.write(file2, "hello world".getBytes(Charsets.UTF_8));
// Setup the map representing the link tree.
links =
ImmutableMap.of(
link1,
PathSourcePath.of(projectFilesystem, MorePaths.relativize(tmpDir.getRoot(), file1)),
link2,
PathSourcePath.of(projectFilesystem, MorePaths.relativize(tmpDir.getRoot(), file2)));
// The output path used by the buildable for the link tree.
outputPath =
BuildTargetPaths.getGenPath(projectFilesystem, buildTarget, "%s/symlink-tree-root");
graphBuilder = new TestActionGraphBuilder();
pathResolver = graphBuilder.getSourcePathResolver();
// Setup the symlink tree buildable.
symlinkTreeBuildRule =
new SymlinkTree("link_tree", buildTarget, projectFilesystem, outputPath, links);
}
@Test
public void testSymlinkTreeBuildSteps() {
// Create the fake build contexts.
BuildContext buildContext = FakeBuildContext.withSourcePathResolver(pathResolver);
FakeBuildableContext buildableContext = new FakeBuildableContext();
// Verify the build steps are as expected.
ImmutableList<Step> expectedBuildSteps =
new ImmutableList.Builder<Step>()
.addAll(
MakeCleanDirectoryStep.of(
BuildCellRelativePath.fromCellRelativePath(
buildContext.getBuildCellRootPath(), projectFilesystem, outputPath)))
.add(
new SymlinkTreeMergeStep(
"link_tree",
projectFilesystem,
outputPath,
new SymlinkMapsPaths(pathResolver.getMappedPaths(links)),
(a, b) -> false))
.build();
ImmutableList<Step> actualBuildSteps =
symlinkTreeBuildRule.getBuildSteps(buildContext, buildableContext);
assertEquals(expectedBuildSteps, actualBuildSteps.subList(1, actualBuildSteps.size()));
}
@Test
public void testSymlinkTreeRuleKeyChangesIfLinkMapChanges() throws Exception {
// Create a BuildRule wrapping the stock SymlinkTree buildable.
// BuildRule rule1 = symlinkTreeBuildable;
// Also create a new BuildRule based around a SymlinkTree buildable with a different
// link map.
Path aFile = tmpDir.newFile();
Files.write(aFile, "hello world".getBytes(Charsets.UTF_8));
SymlinkTree modifiedSymlinkTreeBuildRule =
new SymlinkTree(
"link_tree",
buildTarget,
projectFilesystem,
outputPath,
ImmutableMap.of(
Paths.get("different/link"),
PathSourcePath.of(
projectFilesystem, MorePaths.relativize(tmpDir.getRoot(), aFile))));
SourcePathRuleFinder ruleFinder = new TestActionGraphBuilder();
// Calculate their rule keys and verify they're different.
DefaultFileHashCache hashCache =
DefaultFileHashCache.createDefaultFileHashCache(
TestProjectFilesystems.createProjectFilesystem(tmpDir.getRoot()),
FileHashCacheMode.DEFAULT);
FileHashLoader hashLoader = new StackedFileHashCache(ImmutableList.of(hashCache));
RuleKey key1 =
new TestDefaultRuleKeyFactory(hashLoader, ruleFinder).build(symlinkTreeBuildRule);
RuleKey key2 =
new TestDefaultRuleKeyFactory(hashLoader, ruleFinder).build(modifiedSymlinkTreeBuildRule);
assertNotEquals(key1, key2);
}
@Test
public void testSymlinkTreeRuleKeyDoesNotChangeIfLinkTargetsChangeOnUnix() throws IOException {
graphBuilder.addToIndex(symlinkTreeBuildRule);
InputBasedRuleKeyFactory ruleKeyFactory =
new TestInputBasedRuleKeyFactory(
FakeFileHashCache.createFromStrings(ImmutableMap.of()), graphBuilder);
// Calculate the rule key
RuleKey key1 = ruleKeyFactory.build(symlinkTreeBuildRule);
// Change the contents of the target of the link.
Path existingFile = pathResolver.getAbsolutePath(links.values().asList().get(0));
Files.write(existingFile, "something new".getBytes(Charsets.UTF_8));
// Re-calculate the rule key
RuleKey key2 = ruleKeyFactory.build(symlinkTreeBuildRule);
// Verify that the rules keys are the same.
assertEquals(key1, key2);
}
@Test
public void testSymlinkTreeDependentRuleKeyChangesWhenLinkSourceContentChanges()
throws Exception {
// If a dependent of a symlink tree uses the symlink tree's output as an input, that dependent's
// rulekey must change when the link contents change.
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
graphBuilder.addToIndex(symlinkTreeBuildRule);
Genrule genrule =
GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:dep"))
.setSrcs(ImmutableList.of(symlinkTreeBuildRule.getSourcePathToOutput()))
.setOut("out")
.build(graphBuilder);
DefaultFileHashCache hashCache =
DefaultFileHashCache.createDefaultFileHashCache(
TestProjectFilesystems.createProjectFilesystem(tmpDir.getRoot()),
FileHashCacheMode.DEFAULT);
FileHashLoader hashLoader = new StackedFileHashCache(ImmutableList.of(hashCache));
RuleKey ruleKey1 = new TestDefaultRuleKeyFactory(hashLoader, graphBuilder).build(genrule);
Path existingFile =
graphBuilder.getSourcePathResolver().getAbsolutePath(links.values().asList().get(0));
Files.write(existingFile, "something new".getBytes(Charsets.UTF_8));
hashCache.invalidateAll();
RuleKey ruleKey2 = new TestDefaultRuleKeyFactory(hashLoader, graphBuilder).build(genrule);
// Verify that the rules keys are different.
assertNotEquals(ruleKey1, ruleKey2);
}
@Test
public void testSymlinkTreeInputBasedRuleKeysAreImmuneToLinkSourceContentChanges() {
Genrule dep =
GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:dep"))
.setOut("out")
.build(graphBuilder);
symlinkTreeBuildRule =
new SymlinkTree(
"link_tree",
buildTarget,
projectFilesystem,
outputPath,
ImmutableMap.of(Paths.get("link"), dep.getSourcePathToOutput()));
// Generate an input-based rule key for the symlink tree with the contents of the link
// target hashing to "aaaa".
FakeFileHashCache hashCache =
FakeFileHashCache.createFromStrings(ImmutableMap.of("out", "aaaa"));
InputBasedRuleKeyFactory inputBasedRuleKeyFactory =
new TestInputBasedRuleKeyFactory(hashCache, graphBuilder);
RuleKey ruleKey1 = inputBasedRuleKeyFactory.build(symlinkTreeBuildRule);
// Generate an input-based rule key for the symlink tree with the contents of the link
// target hashing to a different value: "bbbb".
hashCache = FakeFileHashCache.createFromStrings(ImmutableMap.of("out", "bbbb"));
inputBasedRuleKeyFactory = new TestInputBasedRuleKeyFactory(hashCache, graphBuilder);
RuleKey ruleKey2 = inputBasedRuleKeyFactory.build(symlinkTreeBuildRule);
// Verify that the rules keys are the same.
assertEquals(ruleKey1, ruleKey2);
}
@Test
public void verifyStepFailsIfKeyContainsDotDot() throws Exception {
BuildRuleResolver ruleResolver = new TestActionGraphBuilder();
SymlinkTree symlinkTree =
new SymlinkTree(
"link_tree",
buildTarget,
projectFilesystem,
outputPath,
ImmutableMap.of(
Paths.get("../something"),
PathSourcePath.of(
projectFilesystem, MorePaths.relativize(tmpDir.getRoot(), tmpDir.newFile()))));
int exitCode =
symlinkTree
.getVerifyStep(symlinkTree.getResolvedSymlinks(ruleResolver.getSourcePathResolver()))
.execute(TestExecutionContext.newInstance())
.getExitCode();
assertThat(exitCode, Matchers.not(Matchers.equalTo(0)));
}
@Test
public void resolveDuplicateRelativePathsIsNoopWhenThereAreNoDuplicates() {
BuildRuleResolver ruleResolver = new TestActionGraphBuilder();
ImmutableSortedSet<SourcePath> sourcePaths =
ImmutableSortedSet.of(
FakeSourcePath.of("one"), FakeSourcePath.of("two/two"), FakeSourcePath.of("three"));
ImmutableBiMap<SourcePath, Path> resolvedDuplicates =
SymlinkTree.resolveDuplicateRelativePaths(
sourcePaths, ruleResolver.getSourcePathResolver());
assertThat(
resolvedDuplicates.inverse(),
Matchers.equalTo(
FluentIterable.from(sourcePaths)
.uniqueIndex(ruleResolver.getSourcePathResolver()::getRelativePath)));
}
@Rule public TemporaryPaths tmp = new TemporaryPaths();
@Test
public void resolveDuplicateRelativePaths() {
BuildRuleResolver ruleResolver = new TestActionGraphBuilder();
tmp.getRoot().resolve("one").toFile().mkdir();
tmp.getRoot().resolve("two").toFile().mkdir();
ProjectFilesystem fsOne =
TestProjectFilesystems.createProjectFilesystem(tmp.getRoot().resolve("one"));
ProjectFilesystem fsTwo =
TestProjectFilesystems.createProjectFilesystem(tmp.getRoot().resolve("two"));
ImmutableBiMap<SourcePath, Path> expected =
ImmutableBiMap.of(
FakeSourcePath.of(fsOne, "a/one.a"), Paths.get("a/one.a"),
FakeSourcePath.of(fsOne, "a/two"), Paths.get("a/two"),
FakeSourcePath.of(fsTwo, "a/one.a"), Paths.get("a/one-1.a"),
FakeSourcePath.of(fsTwo, "a/two"), Paths.get("a/two-1"));
ImmutableBiMap<SourcePath, Path> resolvedDuplicates =
SymlinkTree.resolveDuplicateRelativePaths(
ImmutableSortedSet.copyOf(expected.keySet()), ruleResolver.getSourcePathResolver());
assertThat(resolvedDuplicates, Matchers.equalTo(expected));
}
@Test
public void resolveDuplicateRelativePathsWithConflicts() {
BuildRuleResolver ruleResolver = new TestActionGraphBuilder();
tmp.getRoot().resolve("a-fs").toFile().mkdir();
tmp.getRoot().resolve("b-fs").toFile().mkdir();
tmp.getRoot().resolve("c-fs").toFile().mkdir();
ProjectFilesystem fsOne =
TestProjectFilesystems.createProjectFilesystem(tmp.getRoot().resolve("a-fs"));
ProjectFilesystem fsTwo =
TestProjectFilesystems.createProjectFilesystem(tmp.getRoot().resolve("b-fs"));
ProjectFilesystem fsThree =
TestProjectFilesystems.createProjectFilesystem(tmp.getRoot().resolve("c-fs"));
ImmutableBiMap<SourcePath, Path> expected =
ImmutableBiMap.<SourcePath, Path>builder()
.put(FakeSourcePath.of(fsOne, "a/one.a"), Paths.get("a/one.a"))
.put(FakeSourcePath.of(fsOne, "a/two"), Paths.get("a/two"))
.put(FakeSourcePath.of(fsOne, "a/two-1"), Paths.get("a/two-1"))
.put(FakeSourcePath.of(fsTwo, "a/one.a"), Paths.get("a/one-1.a"))
.put(FakeSourcePath.of(fsTwo, "a/two"), Paths.get("a/two-2"))
.put(FakeSourcePath.of(fsThree, "a/two"), Paths.get("a/two-3"))
.build();
ImmutableBiMap<SourcePath, Path> resolvedDuplicates =
SymlinkTree.resolveDuplicateRelativePaths(
ImmutableSortedSet.copyOf(expected.keySet()), ruleResolver.getSourcePathResolver());
assertThat(resolvedDuplicates, Matchers.equalTo(expected));
}
@Test
public void getsCorrectCompileTimeDeps() {
assertEquals(ImmutableSortedSet.of(), symlinkTreeBuildRule.getBuildDeps());
}
}
|
facebook/buck
|
test/com/facebook/buck/core/rules/impl/SymlinkTreeTest.java
|
Java
|
apache-2.0
| 15,830 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.core.metamodel.facets.collections.collection.typeof;
import org.apache.isis.applib.annotation.TypeOf;
import org.apache.isis.core.metamodel.facetapi.FacetHolder;
import org.apache.isis.core.metamodel.facets.FacetedMethod;
import org.apache.isis.core.metamodel.facets.actcoll.typeof.TypeOfFacetAbstract;
import org.apache.isis.core.metamodel.specloader.SpecificationLoader;
/**
* @deprecated
*/
@Deprecated
public class TypeOfFacetOnCollectionFromTypeOfAnnotation extends TypeOfFacetAbstract {
public static TypeOfFacetOnCollectionFromTypeOfAnnotation create(final TypeOf annotation, final FacetedMethod facetHolder, final SpecificationLoader specificationLoader) {
if(annotation == null) {
return null;
}
return new TypeOfFacetOnCollectionFromTypeOfAnnotation(annotation.value(), facetHolder, specificationLoader);
}
private TypeOfFacetOnCollectionFromTypeOfAnnotation(final Class<?> type, final FacetHolder holder, final SpecificationLoader specificationLookup) {
super(type, holder, specificationLookup);
}
}
|
niv0/isis
|
core/metamodel/src/main/java/org/apache/isis/core/metamodel/facets/collections/collection/typeof/TypeOfFacetOnCollectionFromTypeOfAnnotation.java
|
Java
|
apache-2.0
| 1,929 |
/*
* Copyright 2013 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.core.api.score.buildin.hardsoftbigdecimal;
import java.math.BigDecimal;
import java.math.RoundingMode;
import org.optaplanner.core.api.score.AbstractScore;
import org.optaplanner.core.api.score.FeasibilityScore;
import org.optaplanner.core.api.score.Score;
/**
* This {@link Score} is based on 2 levels of {@link BigDecimal} constraints: hard and soft.
* Hard constraints have priority over soft constraints.
* <p>
* This class is immutable.
* @see Score
*/
public final class HardSoftBigDecimalScore extends AbstractScore<HardSoftBigDecimalScore>
implements FeasibilityScore<HardSoftBigDecimalScore> {
private static final String HARD_LABEL = "hard";
private static final String SOFT_LABEL = "soft";
public static HardSoftBigDecimalScore parseScore(String scoreString) {
String[] levelStrings = parseLevelStrings(HardSoftBigDecimalScore.class, scoreString, HARD_LABEL, SOFT_LABEL);
BigDecimal hardScore = parseLevelAsBigDecimal(HardSoftBigDecimalScore.class, scoreString, levelStrings[0]);
BigDecimal softScore = parseLevelAsBigDecimal(HardSoftBigDecimalScore.class, scoreString, levelStrings[1]);
return valueOf(hardScore, softScore);
}
public static HardSoftBigDecimalScore valueOf(BigDecimal hardScore, BigDecimal softScore) {
return new HardSoftBigDecimalScore(hardScore, softScore);
}
// ************************************************************************
// Fields
// ************************************************************************
private final BigDecimal hardScore;
private final BigDecimal softScore;
private HardSoftBigDecimalScore(BigDecimal hardScore, BigDecimal softScore) {
this.hardScore = hardScore;
this.softScore = softScore;
}
/**
* The total of the broken negative hard constraints and fulfilled positive hard constraints.
* Their weight is included in the total.
* The hard score is usually a negative number because most use cases only have negative constraints.
* @return higher is better, usually negative, 0 if no hard constraints are broken/fulfilled
*/
public BigDecimal getHardScore() {
return hardScore;
}
/**
* The total of the broken negative soft constraints and fulfilled positive soft constraints.
* Their weight is included in the total.
* The soft score is usually a negative number because most use cases only have negative constraints.
* <p>
* In a normal score comparison, the soft score is irrelevant if the 2 scores don't have the same hard score.
* @return higher is better, usually negative, 0 if no soft constraints are broken/fulfilled
*/
public BigDecimal getSoftScore() {
return softScore;
}
// ************************************************************************
// Worker methods
// ************************************************************************
public boolean isFeasible() {
return getHardScore().compareTo(BigDecimal.ZERO) >= 0;
}
public HardSoftBigDecimalScore add(HardSoftBigDecimalScore augment) {
return new HardSoftBigDecimalScore(hardScore.add(augment.getHardScore()),
softScore.add(augment.getSoftScore()));
}
public HardSoftBigDecimalScore subtract(HardSoftBigDecimalScore subtrahend) {
return new HardSoftBigDecimalScore(hardScore.subtract(subtrahend.getHardScore()),
softScore.subtract(subtrahend.getSoftScore()));
}
public HardSoftBigDecimalScore multiply(double multiplicand) {
// Intentionally not taken "new BigDecimal(multiplicand, MathContext.UNLIMITED)"
// because together with the floor rounding it gives unwanted behaviour
BigDecimal multiplicandBigDecimal = BigDecimal.valueOf(multiplicand);
// The (unspecified) scale/precision of the multiplicand should have no impact on the returned scale/precision
return new HardSoftBigDecimalScore(
hardScore.multiply(multiplicandBigDecimal).setScale(hardScore.scale(), RoundingMode.FLOOR),
softScore.multiply(multiplicandBigDecimal).setScale(softScore.scale(), RoundingMode.FLOOR));
}
public HardSoftBigDecimalScore divide(double divisor) {
// Intentionally not taken "new BigDecimal(multiplicand, MathContext.UNLIMITED)"
// because together with the floor rounding it gives unwanted behaviour
BigDecimal divisorBigDecimal = BigDecimal.valueOf(divisor);
// The (unspecified) scale/precision of the divisor should have no impact on the returned scale/precision
return new HardSoftBigDecimalScore(
hardScore.divide(divisorBigDecimal, hardScore.scale(), RoundingMode.FLOOR),
softScore.divide(divisorBigDecimal, softScore.scale(), RoundingMode.FLOOR));
}
public HardSoftBigDecimalScore power(double exponent) {
// Intentionally not taken "new BigDecimal(multiplicand, MathContext.UNLIMITED)"
// because together with the floor rounding it gives unwanted behaviour
BigDecimal exponentBigDecimal = BigDecimal.valueOf(exponent);
// The (unspecified) scale/precision of the exponent should have no impact on the returned scale/precision
// TODO FIXME remove .intValue() so non-integer exponents produce correct results
// None of the normal Java libraries support BigDecimal.pow(BigDecimal)
return new HardSoftBigDecimalScore(
hardScore.pow(exponentBigDecimal.intValue()).setScale(hardScore.scale()),
softScore.pow(exponentBigDecimal.intValue()).setScale(softScore.scale()));
}
public HardSoftBigDecimalScore negate() {
return new HardSoftBigDecimalScore(hardScore.negate(), softScore.negate());
}
public Number[] toLevelNumbers() {
return new Number[]{hardScore.doubleValue(), softScore.doubleValue()};
}
public boolean equals(Object o) {
// A direct implementation (instead of EqualsBuilder) to avoid dependencies
if (this == o) {
return true;
} else if (o instanceof HardSoftBigDecimalScore) {
HardSoftBigDecimalScore other = (HardSoftBigDecimalScore) o;
return hardScore.equals(other.getHardScore())
&& softScore.equals(other.getSoftScore());
} else {
return false;
}
}
public int hashCode() {
// A direct implementation (instead of HashCodeBuilder) to avoid dependencies
return (((17 * 37) + hardScore.hashCode())) * 37 + softScore.hashCode();
}
public int compareTo(HardSoftBigDecimalScore other) {
// A direct implementation (instead of CompareToBuilder) to avoid dependencies
if (hardScore.compareTo(other.getHardScore()) != 0) {
if (hardScore.compareTo(other.getHardScore()) < 0) {
return -1;
} else {
return 1;
}
} else {
if (softScore.compareTo(other.getSoftScore()) < 0) {
return -1;
} else if (softScore.compareTo(other.getSoftScore()) > 0) {
return 1;
} else {
return 0;
}
}
}
public String toString() {
return hardScore + HARD_LABEL + "/" + softScore + SOFT_LABEL;
}
}
|
codeaudit/optaplanner
|
optaplanner-core/src/main/java/org/optaplanner/core/api/score/buildin/hardsoftbigdecimal/HardSoftBigDecimalScore.java
|
Java
|
apache-2.0
| 8,024 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.spring.processor.onexception;
import org.apache.camel.CamelContext;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.component.mock.MockEndpoint;
import org.junit.Test;
import static org.apache.camel.spring.processor.SpringTestHelper.createSpringCamelContext;
/**
* Unit test for onException with the spring DSL.
*/
public class SpringContextScopeOnExceptionTest extends ContextTestSupport {
@Test
public void testOrderOk() throws Exception {
MockEndpoint result = getMockEndpoint("mock:result");
result.expectedBodiesReceived("Order OK");
result.expectedHeaderReceived("orderid", "123");
MockEndpoint error = getMockEndpoint("mock:error");
error.expectedMessageCount(0);
MockEndpoint dead = getMockEndpoint("mock:dead");
dead.expectedMessageCount(0);
Object out = template.requestBodyAndHeader("direct:start", "Order: MacBook Pro", "customerid", "444");
assertEquals("Order OK", out);
assertMockEndpointsSatisfied();
}
@Test
public void testOrderError() throws Exception {
MockEndpoint error = getMockEndpoint("mock:error");
error.expectedBodiesReceived("Order ERROR");
error.expectedHeaderReceived("orderid", "failed");
MockEndpoint result = getMockEndpoint("mock:result");
result.expectedMessageCount(0);
MockEndpoint dead = getMockEndpoint("mock:dead");
dead.expectedMessageCount(0);
Object out = template.requestBodyAndHeader("direct:start", "Order: kaboom", "customerid", "555");
assertEquals("Order ERROR", out);
assertMockEndpointsSatisfied();
}
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "/org/apache/camel/spring/processor/onexception/SpringContextScopeOnExceptionTest.xml");
}
}
|
objectiser/camel
|
components/camel-spring/src/test/java/org/apache/camel/spring/processor/onexception/SpringContextScopeOnExceptionTest.java
|
Java
|
apache-2.0
| 2,725 |
/*
* Copyright 2000-2013 JetBrains s.r.o.
* Copyright 2014-2015 AS3Boyan
* Copyright 2014-2014 Elias Ku
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This is a generated file. Not intended for manual editing.
package com.intellij.plugins.haxe.hxml.psi;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.PsiElement;
import com.intellij.lang.ASTNode;
import com.intellij.plugins.haxe.hxml.psi.impl.*;
public interface HXMLTypes {
IElementType CLASSPATH = new HXMLElementType("CLASSPATH");
IElementType DEFINE = new HXMLElementType("DEFINE");
IElementType LIB = new HXMLElementType("LIB");
IElementType MAIN = new HXMLElementType("MAIN");
IElementType PROPERTY = new HXMLElementType("PROPERTY");
IElementType QUALIFIED_NAME = new HXMLElementType("QUALIFIED_NAME");
IElementType COMMENT = new HXMLTokenType("COMMENT");
IElementType CRLF = new HXMLTokenType("CRLF");
IElementType KEY = new HXMLTokenType("KEY");
IElementType QUALIFIEDCLASSNAME = new HXMLTokenType("QUALIFIEDCLASSNAME");
IElementType SEPARATOR = new HXMLTokenType("SEPARATOR");
IElementType VALUE = new HXMLTokenType("VALUE");
class Factory {
public static PsiElement createElement(ASTNode node) {
IElementType type = node.getElementType();
if (type == CLASSPATH) {
return new HXMLClasspathImpl(node);
}
else if (type == DEFINE) {
return new HXMLDefineImpl(node);
}
else if (type == LIB) {
return new HXMLLibImpl(node);
}
else if (type == MAIN) {
return new HXMLMainImpl(node);
}
else if (type == PROPERTY) {
return new HXMLPropertyImpl(node);
}
else if (type == QUALIFIED_NAME) {
return new HXMLQualifiedNameImpl(node);
}
throw new AssertionError("Unknown element type: " + type);
}
}
}
|
TiVo/intellij-haxe-nightly-builds
|
gen/com/intellij/plugins/haxe/hxml/psi/HXMLTypes.java
|
Java
|
apache-2.0
| 2,359 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doThrow;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.util.Arrays;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.TestContainerId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
import org.apache.hadoop.yarn.util.Times;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
public class TestAggregatedLogFormat {
private static final File testWorkDir = new File("target",
"TestAggregatedLogFormat");
private static final Configuration conf = new Configuration();
private static final FileSystem fs;
private static final char filler = 'x';
private static final Log LOG = LogFactory
.getLog(TestAggregatedLogFormat.class);
static {
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Before
@After
public void cleanupTestDir() throws Exception {
Path workDirPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("Cleaning test directory [" + workDirPath + "]");
fs.delete(workDirPath, true);
}
//Test for Corrupted AggregatedLogs. The Logs should not write more data
//if Logvalue.write() is called and the application is still
//appending to logs
@Test
public void testForCorruptedAggregatedLogs() throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
long numChars = 950000;
writeSrcFileAndALog(srcFilePath, "stdout", numChars, remoteAppLogFile,
srcFileRoot, testContainerId);
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
try {
LogReader.readAcontainerLogs(dis, writer);
} catch (Exception e) {
if(e.toString().contains("NumberFormatException")) {
Assert.fail("Aggregated logs are corrupted.");
}
}
}
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(new Configuration(), remoteAppLogFile,
ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for(int i=0; i < length/3; i++) {
osw.write(ch);
}
latch.countDown();
for(int i=0; i < (2*length)/3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
logWriter.close();
}
@Test
public void testReadAcontainerLogs1() throws Exception {
//Verify the output generated by readAContainerLogs(DataInputStream, Writer, logUploadedTime)
testReadAcontainerLog(true);
//Verify the output generated by readAContainerLogs(DataInputStream, Writer)
testReadAcontainerLog(false);
}
private void testReadAcontainerLog(boolean logUploadedTime) throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
int numChars = 80000;
// create a sub-folder under srcFilePath
// and create file logs in this sub-folder.
// We only aggregate top level files.
// So, this log file should be ignored.
Path subDir = new Path(srcFilePath, "subDir");
fs.mkdirs(subDir);
writeSrcFile(subDir, "logs", numChars);
// create file stderr and stdout in containerLogDir
writeSrcFile(srcFilePath, "stderr", numChars);
writeSrcFile(srcFilePath, "stdout", numChars);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName());
// When we try to open FileInputStream for stderr, it will throw out an IOException.
// Skip the log aggregation for stderr.
LogValue spyLogValue = spy(logValue);
File errorFile = new File((new Path(srcFilePath, "stderr")).toString());
doThrow(new IOException("Mock can not open FileInputStream")).when(
spyLogValue).secureOpenFile(errorFile);
logWriter.append(logKey, spyLogValue);
logWriter.close();
// make sure permission are correct on the file
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",
FsPermission.createImmutable((short) 0640), fsStatus.getPermission());
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
if (logUploadedTime) {
LogReader.readAcontainerLogs(dis, writer, System.currentTimeMillis());
} else {
LogReader.readAcontainerLogs(dis, writer);
}
// We should only do the log aggregation for stdout.
// Since we could not open the fileInputStream for stderr, this file is not
// aggregated.
String s = writer.toString();
int expectedLength =
"LogType:stdout".length()
+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
.currentTimeMillis())).length() : 0)
+ ("\nLogLength:" + numChars).length()
+ "\nLog Contents:\n".length() + numChars + "\n".length()
+ "\nEnd of LogType:stdout\n".length();
Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));
Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
StringBuilder sb = new StringBuilder();
for (int i = 0 ; i < numChars ; i++) {
sb.append(filler);
}
String expectedContent = sb.toString();
Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
Assert.assertEquals(expectedLength, s.length());
}
@Test(timeout=10000)
public void testContainerLogsFileAccess() throws IOException {
// This test will run only if NativeIO is enabled as SecureIOUtils
// require it to be enabled.
Assume.assumeTrue(NativeIO.isAvailable());
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
File workDir = new File(testWorkDir, "testContainerLogsFileAccess1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
String data = "Log File content for container : ";
// Creating files for container1. Log aggregator will try to read log files
// with illegal user.
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId testContainerId1 =
ContainerId.newContainerId(applicationAttemptId, 1);
Path appDir =
new Path(srcFileRoot, testContainerId1.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath1 = new Path(appDir, testContainerId1.toString());
String stdout = "stdout";
String stderr = "stderr";
writeSrcFile(srcFilePath1, stdout, data + testContainerId1.toString()
+ stdout);
writeSrcFile(srcFilePath1, stderr, data + testContainerId1.toString()
+ stderr);
UserGroupInformation ugi =
UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId1);
String randomUser = "randomUser";
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId1, randomUser));
// It is trying simulate a situation where first log file is owned by
// different user (probably symlink) and second one by the user itself.
// The first file should not be aggregated. Because this log file has the invalid
// user name.
when(logValue.getUser()).thenReturn(randomUser).thenReturn(
ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.close();
BufferedReader in =
new BufferedReader(new FileReader(new File(remoteAppLogFile
.toUri().getRawPath())));
String line;
StringBuffer sb = new StringBuffer("");
while ((line = in.readLine()) != null) {
LOG.info(line);
sb.append(line);
}
line = sb.toString();
String expectedOwner = ugi.getShortUserName();
if (Path.WINDOWS) {
final String adminsGroupString = "Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner = adminsGroupString;
}
}
// This file: stderr should not be aggregated.
// And we will not aggregate the log message.
String stdoutFile1 =
StringUtils.join(
File.separator,
Arrays.asList(new String[] {
workDir.getAbsolutePath(), "srcFiles",
testContainerId1.getApplicationAttemptId().getApplicationId()
.toString(), testContainerId1.toString(), stderr }));
// The file: stdout is expected to be aggregated.
String stdoutFile2 =
StringUtils.join(
File.separator,
Arrays.asList(new String[] {
workDir.getAbsolutePath(), "srcFiles",
testContainerId1.getApplicationAttemptId().getApplicationId()
.toString(), testContainerId1.toString(), stdout }));
String message2 =
"Owner '" + expectedOwner + "' for path "
+ stdoutFile2 + " did not match expected owner '"
+ ugi.getShortUserName() + "'";
Assert.assertFalse(line.contains(message2));
Assert.assertFalse(line.contains(data + testContainerId1.toString()
+ stderr));
Assert.assertTrue(line.contains(data + testContainerId1.toString()
+ stdout));
}
private void writeSrcFile(Path srcFilePath, String fileName, long length)
throws IOException {
OutputStreamWriter osw = getOutputStreamWriter(srcFilePath, fileName);
int ch = filler;
for (int i = 0; i < length; i++) {
osw.write(ch);
}
osw.close();
}
private void writeSrcFile(Path srcFilePath, String fileName, String data)
throws IOException {
OutputStreamWriter osw = getOutputStreamWriter(srcFilePath, fileName);
osw.write(data);
osw.close();
}
private OutputStreamWriter getOutputStreamWriter(Path srcFilePath,
String fileName) throws IOException, FileNotFoundException,
UnsupportedEncodingException {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
return osw;
}
}
|
WIgor/hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
|
Java
|
apache-2.0
| 15,816 |
package org.mifos.platform.rest.controller;
import java.util.List;
import org.codehaus.jackson.annotate.JsonCreator;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.annotate.JsonProperty;
import org.joda.time.LocalDate;
import org.mifos.application.servicefacade.CreateClientNameDetailDto;
import org.mifos.application.servicefacade.CreatePersonalDetailDto;
import org.mifos.application.servicefacade.CreationAccountPenaltyDto;
import org.mifos.application.servicefacade.CreationAddresDto;
import org.mifos.application.servicefacade.CreationFeeDto;
import org.mifos.application.servicefacade.CreationGLIMAccountsDto;
import org.mifos.application.servicefacade.CreationMeetingDto;
public class RESTAPIHelper {
public static class ErrorMessage {
public static final String INVALID_AMOUNT = "please specify correct amount";
public static final String NON_NEGATIVE_AMOUNT = "amount must be grater than 0";
public static final String NOT_ACTIVE_ACCOUNT = "account is not in active state.";
public static final String INVALID_NOTE = "note is not specified";
public static final String INVALID_FEE_ID = "invalid fee Id";
public static final String INVALID_DATE_STRING = "string is not valid date";
public static final String FUTURE_DATE = "Date can not be a future date.";
public static final String INVALID_PAYMENT_TYPE_ID = "invalid payment type Id";
public static final String INVALID_PRODUCT_ID = "invalid product Id";
public static final String INVALID_GLOABAL_CUSTOMER_NUM = "invalid global customer number";
public static final String INVALID_MEETING = "meeting can not be null";
public static final String INVALID_DISPLAY_NAME = "invalid customer name";
public static final String INVALID_LOAN_OFFICER_ID = "invalid loan officer id";
public static final String INVALID_OFFICE_ID = "invalid office id";
public static final String INVALID_MFI_DATE = "mfiJoiningDate is mandatory";
public static final String INVALID_GLIM_AMOUNT = "individual amont must be equal glim amount";
public static final String INVALID_CUSTOMER_STATUS = "invalid customer status";
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationAddresDtoMixIn {
@JsonCreator
public CreationAddresDtoMixIn(@JsonProperty("address1") String address1,
@JsonProperty("address2") String address2, @JsonProperty("address3") String address3,
@JsonProperty("city") String city, @JsonProperty("state") String state,
@JsonProperty("country") String country, @JsonProperty("zip") String zip,
@JsonProperty("phoneNumber") String phoneNumber) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationFeeDtoMixIn {
@JsonCreator
public CreationFeeDtoMixIn(@JsonProperty("feeId") Integer feeId, @JsonProperty("amount") String amount) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationMeetingDtoMixIn {
@JsonCreator
public CreationMeetingDtoMixIn(@JsonProperty("meetingStartDate") LocalDate meetingStartDate,
@JsonProperty("meetingPlace") String meetingPlace,
@JsonProperty("recurrenceType") Short recurrenceType, @JsonProperty("dayNumber") Short dayNumber,
@JsonProperty("weekDay") Short weekDay, @JsonProperty("rankOfDay") Short rankOfDay,
@JsonProperty("recurAfter") Short recurAfter) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CenterCreationDetailMixIn {
@JsonCreator
public CenterCreationDetailMixIn(@JsonProperty("mfiJoiningDate") LocalDate mfiJoiningDate,
@JsonProperty("displayName") String displayName, @JsonProperty("externalId") String externalId,
@JsonProperty("loanOfficerId") Integer loanOfficerId, @JsonProperty("officeId") Integer officeId,
@JsonProperty("address") CreationAddresDto creationAddresDto,
@JsonProperty("accountFees") List<CreationFeeDto> creationFeeDto,
@JsonProperty("meeting") CreationMeetingDto meeting) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreateGroupCreationDetailDtoMixIn {
@JsonCreator
public CreateGroupCreationDetailDtoMixIn(@JsonProperty("centerId") Short centerId,
@JsonProperty("officeId") Short officeId, @JsonProperty("loanOfficerId") Short loanOfficerId,
@JsonProperty("displayName") String displayName, @JsonProperty("externalId") String externalId,
@JsonProperty("address") CreationAddresDto creationAddresDto,
@JsonProperty("accountFees") List<CreationFeeDto> creationFeeDto,
@JsonProperty("customerStatus") Short customerStatus, @JsonProperty("trained") Boolean trained,
@JsonProperty("trainedDate") String trainedOn, @JsonProperty("mfiJoiningDate") String mfiJoiningDate,
@JsonProperty("activationDate") String activationDate,
@JsonProperty("parentSystemId") String parentSystemId,
@JsonProperty("meeting") CreationMeetingDto meeting) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreateClientCreationDetailMixIn {
@JsonCreator
public CreateClientCreationDetailMixIn(@JsonProperty("officeId") Short officeId,
@JsonProperty("loanOfficerId") Short loanOfficerId, @JsonProperty("formedBy") Short formedBy,
@JsonProperty("parentGroupId") String parentGroupId, @JsonProperty("externalId") String externalId,
@JsonProperty("groupFlag") Short groupFlag, @JsonProperty("governmentId") String governmentId,
@JsonProperty("trained") boolean trained, @JsonProperty("trainedDate") LocalDate trainedDate,
@JsonProperty("dateOfBirth") LocalDate dateOfBirth,
@JsonProperty("activationDate") LocalDate activationDate,
@JsonProperty("mfiJoiningDate") LocalDate mfiJoiningDate,
@JsonProperty("customerStatus") Short customerStatus,
@JsonProperty("personalDetail") CreatePersonalDetailDto personalDetail,
@JsonProperty("clientNameDetail") CreateClientNameDetailDto clientNameDetail,
@JsonProperty("address") CreationAddresDto address,
@JsonProperty("accountFees") List<CreationFeeDto> accountFees,
@JsonProperty("meeting") CreationMeetingDto meeting) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreateClientNameDetailDtoMixIn {
@JsonCreator
public CreateClientNameDetailDtoMixIn(@JsonProperty("salutationId") Integer salutation,
@JsonProperty("firstName") String firstName, @JsonProperty("middleName") String middleName,
@JsonProperty("lastName") String lastName, @JsonProperty("secondLastName") String secondLastName) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreatePersonalDetailDtoMixIn {
@JsonCreator
public CreatePersonalDetailDtoMixIn(@JsonProperty("ethnicity") Integer ethnicity,
@JsonProperty("citizenship") Integer citizenship, @JsonProperty("handicapped") Integer handicapped,
@JsonProperty("businessActivities") Integer businessActivities,
@JsonProperty("maritalStatus") Integer maritalStatus,
@JsonProperty("educationLevel") Integer educationLevel,
@JsonProperty("numberOfChildren") Short numberOfChildren, @JsonProperty("gender") Short genderId,
@JsonProperty("povertyStatus") Short povertyStatus) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationLoanAccountDtoMixIn {
@JsonCreator
public CreationLoanAccountDtoMixIn(@JsonProperty("customerId") Integer customerId,
@JsonProperty("glim") Boolean glim, @JsonProperty("productId") Integer productId,
@JsonProperty("accountState") Integer accountState, @JsonProperty("loanAmount") Double loanAmount,
@JsonProperty("interesRate") Double interesRate,
@JsonProperty("disbusmentDate") LocalDate disbursnebtDate,
@JsonProperty("disbursalPaymentTypeId") Short disbursalPaymentTypeId,
@JsonProperty("numberofInstallments") Integer numberOfInstallments,
@JsonProperty("graceDuration") Integer graceDuration,
@JsonProperty("sourceOfFoundId") Integer sourceOfFoundId,
@JsonProperty("loanPurposeId") Integer loanPurposeId,
@JsonProperty("collateralTypeId") Integer collateralTypeId,
@JsonProperty("collateralNotes") String collateralNotes, @JsonProperty("externalId") String externalId,
@JsonProperty("accountFees") List<CreationFeeDto> accountFees,
@JsonProperty("accountPenalties") List<CreationAccountPenaltyDto> accountPenalties,
@JsonProperty("minNumOfInstallments") Integer minNumOfInstallments,
@JsonProperty("maxNumOfInstallments") Integer maxNumOfInstallments,
@JsonProperty("minAllowedLoanAmount") Double minAllowedLoanAmount,
@JsonProperty("maxAllowedLoanAmount") Double maxAllowedLoanAmount,
@JsonProperty("predefinedAccountNumber") String predefinedAccountNumber,
@JsonProperty("flagId") Short flagId,
@JsonProperty("glimAccounts") List<CreationGLIMAccountsDto> glimAccounts) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationGLIMAccountsDtoMixIn {
@JsonCreator
public CreationGLIMAccountsDtoMixIn(@JsonProperty("globalId") String globalId,
@JsonProperty("ammount") Double ammount, @JsonProperty("loanPurposeId") Integer loanPurposeId) {
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static abstract class CreationAccountPenaltyDtoMixIn {
@JsonCreator
public CreationAccountPenaltyDtoMixIn(@JsonProperty("penaltyId") Integer penaltyId,
@JsonProperty("amount") String amount) {
}
}
}
|
madhav123/gkmaster
|
rest/src/main/java/org/mifos/platform/rest/controller/RESTAPIHelper.java
|
Java
|
apache-2.0
| 10,631 |
// Copyright 2012 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.cloudera.impala.analysis;
import java.util.EnumSet;
import com.cloudera.impala.authorization.Privilege;
import com.cloudera.impala.catalog.Db;
import com.cloudera.impala.catalog.HdfsTable;
import com.cloudera.impala.catalog.MetaStoreClientPool.MetaStoreClient;
import com.cloudera.impala.catalog.Table;
import com.cloudera.impala.catalog.TableId;
import com.cloudera.impala.catalog.TableLoadingException;
import com.cloudera.impala.common.AnalysisException;
import com.cloudera.impala.service.CatalogOpExecutor;
import com.cloudera.impala.thrift.THdfsFileFormat;
import com.google.common.base.Preconditions;
/**
* Represents a CREATE TABLE AS SELECT (CTAS) statement
*/
public class CreateTableAsSelectStmt extends StatementBase {
private final CreateTableStmt createStmt_;
private final InsertStmt insertStmt_;
private final static EnumSet<THdfsFileFormat> SUPPORTED_INSERT_FORMATS =
EnumSet.of(THdfsFileFormat.PARQUET, THdfsFileFormat.TEXT);
/**
* Builds a CREATE TABLE AS SELECT statement
*/
public CreateTableAsSelectStmt(CreateTableStmt createStmt, QueryStmt queryStmt) {
Preconditions.checkNotNull(queryStmt);
Preconditions.checkNotNull(createStmt);
this.createStmt_ = createStmt;
this.insertStmt_ = new InsertStmt(null, createStmt.getTblName(), false,
null, null, queryStmt, null);
}
public QueryStmt getQueryStmt() { return insertStmt_.getQueryStmt(); }
public InsertStmt getInsertStmt() { return insertStmt_; }
public CreateTableStmt getCreateStmt() { return createStmt_; }
@Override
public String toSql() { return createStmt_.toSql() + " AS " + getQueryStmt().toSql(); }
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
super.analyze(analyzer);
// The analysis for CTAS happens in two phases - the first phase happens before
// the target table exists and we want to validate the CREATE statement and the
// query portion of the insert statement. If this passes, analysis will be run
// over the full INSERT statement. To avoid duplicate registrations of table/colRefs,
// create a new root analyzer and clone the query statement for this initial pass.
Analyzer dummyRootAnalyzer = new Analyzer(analyzer.getCatalog(),
analyzer.getQueryCtx(), analyzer.getAuthzConfig());
QueryStmt tmpQueryStmt = insertStmt_.getQueryStmt().clone();
try {
Analyzer tmpAnalyzer = new Analyzer(dummyRootAnalyzer);
tmpAnalyzer.setUseHiveColLabels(true);
tmpQueryStmt.analyze(tmpAnalyzer);
if (analyzer.containsSubquery()) {
// The select statement of this CTAS is nested. Rewrite the
// statement to unnest all subqueries and re-analyze using a new analyzer.
StmtRewriter.rewriteQueryStatement(tmpQueryStmt, tmpAnalyzer);
// Update the insert statement with the unanalyzed rewritten select stmt.
insertStmt_.setQueryStmt(tmpQueryStmt.clone());
// Re-analyze the select statement of the CTAS.
tmpQueryStmt = insertStmt_.getQueryStmt().clone();
tmpAnalyzer = new Analyzer(dummyRootAnalyzer);
tmpAnalyzer.setUseHiveColLabels(true);
tmpQueryStmt.analyze(tmpAnalyzer);
}
} finally {
// Record missing tables in the original analyzer.
analyzer.getMissingTbls().addAll(dummyRootAnalyzer.getMissingTbls());
}
// Add the columns from the select statement to the create statement.
int colCnt = tmpQueryStmt.getColLabels().size();
for (int i = 0; i < colCnt; ++i) {
createStmt_.getColumnDefs().add(new ColumnDesc(
tmpQueryStmt.getColLabels().get(i),
tmpQueryStmt.getBaseTblResultExprs().get(i).getType(), null));
}
createStmt_.analyze(analyzer);
if (!SUPPORTED_INSERT_FORMATS.contains(createStmt_.getFileFormat())) {
throw new AnalysisException(String.format("CREATE TABLE AS SELECT " +
"does not support (%s) file format. Supported formats are: (%s)",
createStmt_.getFileFormat().toString().replace("_", ""),
"PARQUET, TEXTFILE"));
}
// The full privilege check for the database will be done as part of the INSERT
// analysis.
Db db = analyzer.getDb(createStmt_.getDb(), Privilege.ANY);
if (db == null) {
throw new AnalysisException(
Analyzer.DB_DOES_NOT_EXIST_ERROR_MSG + createStmt_.getDb());
}
// Running analysis on the INSERT portion of the CTAS requires the target INSERT
// table to "exist". For CTAS the table does not exist yet, so create a "temp"
// table to run analysis against. The schema of this temp table should exactly
// match the schema of the table that will be created by running the CREATE
// statement.
org.apache.hadoop.hive.metastore.api.Table msTbl =
CatalogOpExecutor.createMetaStoreTable(createStmt_.toThrift());
MetaStoreClient client = analyzer.getCatalog().getMetaStoreClient();
try {
// Set a valid location of this table using the same rules as the metastore. If the
// user specified a location for the table this will be a no-op.
msTbl.getSd().setLocation(analyzer.getCatalog().getTablePath(msTbl).toString());
// If the user didn't specify a table location for the CREATE statement, inject the
// location that was calculated in the getTablePath() call. Since this will be the
// target location for the INSERT statement, it is important the two match.
if (createStmt_.getLocation() == null) {
createStmt_.setLocation(new HdfsUri(msTbl.getSd().getLocation()));
}
// Create a "temp" table based off the given metastore.api.Table object. Normally,
// the CatalogService assigns all table IDs, but in this case we need to assign the
// "temp" table an ID locally. This table ID cannot conflict with any table in the
// SelectStmt (or the BE will be very confused). To ensure the ID is unique within
// this query, just assign it the invalid table ID. The CatalogServer will assign
// this table a proper ID once it is created there as part of the CTAS execution.
Table table = Table.fromMetastoreTable(TableId.createInvalidId(), db, msTbl);
Preconditions.checkState(table != null && table instanceof HdfsTable);
HdfsTable hdfsTable = (HdfsTable) table;
hdfsTable.load(hdfsTable, client.getHiveClient(), msTbl);
insertStmt_.setTargetTable(table);
} catch (TableLoadingException e) {
throw new AnalysisException(e.getMessage(), e);
} catch (Exception e) {
throw new AnalysisException(e.getMessage(), e);
} finally {
client.release();
}
// Finally, run analysis on the insert statement.
insertStmt_.analyze(analyzer);
}
}
|
scalingdata/Impala
|
fe/src/main/java/com/cloudera/impala/analysis/CreateTableAsSelectStmt.java
|
Java
|
apache-2.0
| 7,343 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.ml.genetic;
import java.util.ArrayList;
import java.util.List;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.compute.ComputeJobAdapter;
import org.apache.ignite.ml.genetic.parameter.GAGridConstants;
import org.apache.ignite.resources.IgniteInstanceResource;
import org.apache.ignite.resources.LoggerResource;
import org.apache.ignite.transactions.Transaction;
/**
* Responsible for performing fitness evaluation on an individual chromosome
*/
public class FitnessJob extends ComputeJobAdapter {
/**
* Chromosome primary Key
*/
private Long key;
/** Ignite instance */
@IgniteInstanceResource
private Ignite ignite = null;
/** Ignite logger */
@LoggerResource
private IgniteLogger log = null;
/** IFitnessFunction */
private IFitnessFunction fitnessFuncton;
/**
* @param key Chromosome primary Key
* @param fitnessFunction Fitness function defined by developer
*/
public FitnessJob(Long key, IFitnessFunction fitnessFunction) {
this.key = key;
this.fitnessFuncton = fitnessFunction;
}
/**
* Perform fitness operation utilizing IFitnessFunction
*
* Update chromosome's fitness value
*
* @return Fitness score
*/
public Double execute() throws IgniteException {
IgniteCache<Long, Chromosome> populationCache = ignite.cache(GAGridConstants.POPULATION_CACHE);
IgniteCache<Long, Gene> geneCache = ignite.cache(GAGridConstants.GENE_CACHE);
Chromosome chromosome = populationCache.localPeek(key);
long[] geneKeys = chromosome.getGenes();
List<Gene> genes = new ArrayList<Gene>();
for (int i = 0; i < geneKeys.length; i++) {
long aKey = geneKeys[i];
Gene aGene = geneCache.localPeek(aKey);
genes.add(aGene);
}
Double val = fitnessFuncton.evaluate(genes);
chromosome.setFitnessScore(val);
Transaction tx = ignite.transactions().txStart();
populationCache.put(chromosome.id(), chromosome);
tx.commit();
return val;
}
}
|
alexzaitzev/ignite
|
modules/ml/src/main/java/org/apache/ignite/ml/genetic/FitnessJob.java
|
Java
|
apache-2.0
| 3,066 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.cdi.test;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import org.apache.camel.CamelContext;
import org.apache.camel.ProducerTemplate;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.cdi.CdiCamelExtension;
import org.apache.camel.cdi.Uri;
import org.apache.camel.cdi.bean.ManualStartupCamelContext;
import org.apache.camel.component.mock.MockEndpoint;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.arquillian.junit.InSequence;
import org.jboss.shrinkwrap.api.Archive;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.EmptyAsset;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.apache.camel.component.mock.MockEndpoint.assertIsSatisfied;
@RunWith(Arquillian.class)
public class AdvisedMockEndpointProducerTest {
@Inject
@Uri("direct:inbound")
private ProducerTemplate inbound;
@Inject
@Uri("mock:outbound")
private MockEndpoint outbound;
@Inject
@Uri("mock:intercepted")
private MockEndpoint intercepted;
@Deployment
public static Archive<?> deployment() {
return ShrinkWrap.create(JavaArchive.class)
// Camel CDI
.addPackage(CdiCamelExtension.class.getPackage())
// Test class
.addClass(ManualStartupCamelContext.class)
// Bean archive deployment descriptor
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml");
}
@Test
@InSequence(1)
public void startCamelContext(CamelContext context) throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
interceptSendToEndpoint("mock:outbound")
.skipSendToOriginalEndpoint()
.log("Intercepting message [${body}] from mock endpoint")
.to("mock:intercepted");
from("direct:inbound").to("mock:outbound");
}
});
context.getRouteController().startAllRoutes();
}
@Test
@InSequence(2)
public void sendMessageToInbound() throws InterruptedException {
outbound.expectedMessageCount(0);
intercepted.expectedMessageCount(1);
intercepted.expectedBodiesReceived("test");
inbound.sendBody("test");
assertIsSatisfied(2L, TimeUnit.SECONDS, outbound, intercepted);
}
}
|
DariusX/camel
|
components/camel-cdi/src/test/java/org/apache/camel/cdi/test/AdvisedMockEndpointProducerTest.java
|
Java
|
apache-2.0
| 3,356 |
package egovframework.com.cop.cmy.service;
import java.util.Map;
/**
* 승인정보를 관리하기 위한 서비스 인테페이스 클래스
* @author 공통서비스개발팀 이삼섭
* @since 2009.06.01
* @version 1.0
* @see
*
* <pre>
* << 개정이력(Modification Information) >>
*
* 수정일 수정자 수정내용
* ------- -------- ---------------------------
* 2009.4.7 이삼섭 최초 생성
*
* </pre>
*/
public interface EgovConfirmManageService {
/**
* 승인(탈퇴)요청에 대한 등록을 처리한다.
*
* @param history
* @throws Exception
*/
public void insertConfirmRequest(ConfirmHistory history) throws Exception;
/**
* 승인(탈퇴)요청에 대한 목록을 조회한다.
*
* @param historyVO
* @return
* @throws Exception
*/
public Map<String, Object> selectConfirmRequest(ConfirmHistoryVO historyVO) throws Exception;
/**
* 승인(탈퇴)요청에 대한 확인을 처리한다.
*
* @param history
* @throws Exception
*/
public void updateConfirmRequest(ConfirmHistory history) throws Exception;
/**
* 승인(탈퇴)요청에 대한 상세내용을 조회한다.
*
* @param historyVO
* @return
* @throws Exception
*/
public ConfirmHistoryVO selectSingleConfirmRequest(ConfirmHistoryVO historyVO) throws Exception;
/**
* 현재 승인 요청된 건수를 조회한다.
*/
public int countConfirmRequest(ConfirmHistory history) throws Exception;
}
|
dasomel/egovframework
|
common-component/v3.7.3/src/main/java/egovframework/com/cop/cmy/service/EgovConfirmManageService.java
|
Java
|
apache-2.0
| 1,667 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.evolutionary.score;
import com.heatonresearch.aifh.learning.MLMethod;
import com.heatonresearch.aifh.learning.score.ScoreFunction;
import java.io.Serializable;
/**
* An empty score function. Simply returns zero as the score, always.
*/
public class EmptyScoreFunction implements ScoreFunction, Serializable {
/**
* Serial ID.
*/
private static final long serialVersionUID = 1L;
/**
* {@inheritDoc}
*/
@Override
public double calculateScore(final MLMethod phenotype) {
return 0;
}
/**
* {@inheritDoc}
*/
@Override
public boolean shouldMinimize() {
return true;
}
}
|
PeterLauris/aifh
|
vol2/vol2-java-examples/src/main/java/com/heatonresearch/aifh/evolutionary/score/EmptyScoreFunction.java
|
Java
|
apache-2.0
| 1,636 |
/*
* Copyright 2004-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.compass.core.test.dynamicproperty.map.compound.array;
/**
* @author kimchy
*/
public class DynaKey {
String key1;
String key2;
public DynaKey(String key1, String key2) {
this.key1 = key1;
this.key2 = key2;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DynaKey dynaKey = (DynaKey) o;
if (key1 != null ? !key1.equals(dynaKey.key1) : dynaKey.key1 != null) return false;
if (key2 != null ? !key2.equals(dynaKey.key2) : dynaKey.key2 != null) return false;
return true;
}
@Override
public int hashCode() {
int result = key1 != null ? key1.hashCode() : 0;
result = 31 * result + (key2 != null ? key2.hashCode() : 0);
return result;
}
}
|
baboune/compass
|
src/main/test/org/compass/core/test/dynamicproperty/map/compound/array/DynaKey.java
|
Java
|
apache-2.0
| 1,488 |
/**
* Copyright 2005-2014 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package io.fabric8.process.manager.commands.support;
import io.fabric8.process.manager.Installation;
import io.fabric8.process.manager.ProcessManager;
import org.apache.felix.gogo.commands.Argument;
import java.util.Map;
/**
*/
public abstract class ProcessControlCommandSupport extends ProcessCommandSupport {
@Argument(index = 0, required = true, multiValued = true, name = "id", description = "The id of the managed processes to control")
protected String[] ids;
protected ProcessControlCommandSupport(ProcessManager processManager) {
super(processManager);
}
@Override
protected Object doExecute() throws Exception {
Map<String, Installation> map = getProcessManager().listInstallationMap();
for (String id : ids) {
Installation installation = map.get(id);
if (installation == null) {
System.out.println("No such process number: " + id);
} else {
doControlCommand(installation);
}
}
return null;
}
protected abstract void doControlCommand(Installation installation) throws Exception;
}
|
hekonsek/fabric8
|
sandbox/process/process-manager/src/main/java/io/fabric8/process/manager/commands/support/ProcessControlCommandSupport.java
|
Java
|
apache-2.0
| 1,789 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin;
import org.apache.commons.configuration.Configuration;
import org.apache.tinkerpop.gremlin.process.computer.GraphComputer;
import org.apache.tinkerpop.gremlin.process.traversal.Step;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.step.TraversalParent;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.apache.tinkerpop.gremlin.structure.VertexProperty;
import org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeThat;
/**
* Sets up g based on the current graph configuration and checks required features for the test.
*
* @author Stephen Mallette (http://stephen.genoprime.com)
*/
public abstract class AbstractGremlinTest {
protected Graph graph;
protected GraphTraversalSource g;
protected Optional<Class<? extends GraphComputer>> graphComputerClass;
protected Configuration config;
protected GraphProvider graphProvider;
@Rule
public TestName name = new TestName();
@Before
public void setup() throws Exception {
final Method testMethod = this.getClass().getMethod(cleanMethodName(name.getMethodName()));
final LoadGraphWith[] loadGraphWiths = testMethod.getAnnotationsByType(LoadGraphWith.class);
final LoadGraphWith loadGraphWith = loadGraphWiths.length == 0 ? null : loadGraphWiths[0];
final LoadGraphWith.GraphData loadGraphWithData = null == loadGraphWith ? null : loadGraphWith.value();
graphProvider = GraphManager.getGraphProvider();
config = graphProvider.standardGraphConfiguration(this.getClass(), name.getMethodName(), loadGraphWithData);
// this should clear state from a previously unfinished test. since the graph does not yet exist,
// persisted graphs will likely just have their directories removed
graphProvider.clear(config);
graph = graphProvider.openTestGraph(config);
g = graphProvider.traversal(graph);
graphComputerClass = g.getGraphComputer().isPresent() ? Optional.of(g.getGraphComputer().get().getClass()) : Optional.empty();
// get feature requirements on the test method and add them to the list of ones to check
final FeatureRequirement[] featureRequirement = testMethod.getAnnotationsByType(FeatureRequirement.class);
final List<FeatureRequirement> frs = new ArrayList<>(Arrays.asList(featureRequirement));
// if the graph is loading data then it will come with it's own requirements
if (loadGraphWiths.length > 0) frs.addAll(loadGraphWiths[0].value().featuresRequired());
// if the graph has a set of feature requirements bundled together then add those
final FeatureRequirementSet[] featureRequirementSets = testMethod.getAnnotationsByType(FeatureRequirementSet.class);
if (featureRequirementSets.length > 0)
frs.addAll(Arrays.stream(featureRequirementSets)
.flatMap(f -> f.value().featuresRequired().stream()).collect(Collectors.toList()));
// process the unique set of feature requirements
final Set<FeatureRequirement> featureRequirementSet = new HashSet<>(frs);
for (FeatureRequirement fr : featureRequirementSet) {
try {
//System.out.println(String.format("Assume that %s meets Feature Requirement - %s - with %s", fr.featureClass().getSimpleName(), fr.feature(), fr.supported()));
assumeThat(String.format("%s does not support all of the features required by this test so it will be ignored: %s.%s=%s",
graph.getClass().getSimpleName(), fr.featureClass().getSimpleName(), fr.feature(), fr.supported()),
graph.features().supports(fr.featureClass(), fr.feature()), is(fr.supported()));
} catch (NoSuchMethodException nsme) {
throw new NoSuchMethodException(String.format("[supports%s] is not a valid feature on %s", fr.feature(), fr.featureClass()));
}
}
beforeLoadGraphWith(graph);
// load a graph with sample data if the annotation is present on the test
graphProvider.loadGraphData(graph, loadGraphWith, this.getClass(), name.getMethodName());
afterLoadGraphWith(graph);
}
protected void beforeLoadGraphWith(final Graph g) throws Exception {
// do nothing
}
protected void afterLoadGraphWith(final Graph g) throws Exception {
// do nothing
}
@After
public void tearDown() throws Exception {
if (null != graphProvider) {
graphProvider.clear(graph, config);
g = null;
config = null;
graphProvider = null;
}
}
/**
* Looks up the identifier as generated by the current source graph being tested.
*
* @param vertexName a unique string that will identify a graph element within a graph
* @return the id as generated by the graph
*/
public Object convertToVertexId(final String vertexName) {
return convertToVertexId(graph, vertexName);
}
/**
* Looks up the identifier as generated by the current source graph being tested.
*
* @param graph the graph to get the element id from
* @param vertexName a unique string that will identify a graph element within a graph
* @return the id as generated by the graph
*/
public Object convertToVertexId(final Graph graph, final String vertexName) {
return convertToVertex(graph, vertexName).id();
}
public Vertex convertToVertex(final Graph graph, final String vertexName) {
// all test graphs have "name" as a unique id which makes it easy to hardcode this...works for now
return graph.traversal().V().has("name", vertexName).next();
}
public GraphTraversal<Vertex, Object> convertToVertexPropertyId(final String vertexName, final String vertexPropertyKey) {
return convertToVertexPropertyId(graph, vertexName, vertexPropertyKey);
}
public GraphTraversal<Vertex, Object> convertToVertexPropertyId(final Graph graph, final String vertexName, final String vertexPropertyKey) {
return convertToVertexProperty(graph, vertexName, vertexPropertyKey).id();
}
public GraphTraversal<Vertex, VertexProperty<Object>> convertToVertexProperty(final Graph graph, final String vertexName, final String vertexPropertyKey) {
// all test graphs have "name" as a unique id which makes it easy to hardcode this...works for now
return (GraphTraversal<Vertex, VertexProperty<Object>>) graph.traversal().V().has("name", vertexName).properties(vertexPropertyKey);
}
public Object convertToEdgeId(final String outVertexName, String edgeLabel, final String inVertexName) {
return convertToEdgeId(graph, outVertexName, edgeLabel, inVertexName);
}
public Object convertToEdgeId(final Graph graph, final String outVertexName, String edgeLabel, final String inVertexName) {
return graph.traversal().V().has("name", outVertexName).outE(edgeLabel).as("e").inV().has("name", inVertexName).<Edge>select("e").next().id();
}
/**
* Utility method that commits if the graph supports transactions.
*/
public void tryCommit(final Graph graph) {
if (graph.features().graph().supportsTransactions())
graph.tx().commit();
}
public void tryRandomCommit(final Graph graph) {
if (graph.features().graph().supportsTransactions() && new Random().nextBoolean())
graph.tx().commit();
}
/**
* Utility method that commits if the graph supports transactions and executes an assertion function before and
* after the commit. It assumes that the assertion should be true before and after the commit.
*/
public void tryCommit(final Graph graph, final Consumer<Graph> assertFunction) {
assertFunction.accept(graph);
if (graph.features().graph().supportsTransactions()) {
graph.tx().commit();
assertFunction.accept(graph);
}
}
/**
* Utility method that rollsback if the graph supports transactions.
*/
public void tryRollback(final Graph graph) {
if (graph.features().graph().supportsTransactions())
graph.tx().rollback();
}
/**
* If using "parameterized test" junit will append an identifier to the end of the method name which prevents it
* from being found via reflection. This method removes that suffix.
*/
private static String cleanMethodName(final String methodName) {
if (methodName.endsWith("]")) {
return methodName.substring(0, methodName.indexOf("["));
}
return methodName;
}
public void printTraversalForm(final Traversal traversal) {
final boolean muted = Boolean.parseBoolean(System.getProperty("muteTestLogs", "false"));
if (!muted) System.out.println(String.format("Testing: %s", name.getMethodName()));
if (!muted) System.out.println(" pre-strategy:" + traversal);
traversal.hasNext();
if (!muted) System.out.println(" post-strategy:" + traversal);
verifyUniqueStepIds(traversal.asAdmin());
}
public boolean isComputerTest() {
return this.graphComputerClass.isPresent();
}
public static Consumer<Graph> assertVertexEdgeCounts(final int expectedVertexCount, final int expectedEdgeCount) {
return (g) -> {
assertEquals(expectedVertexCount, IteratorUtils.count(g.vertices()));
assertEquals(expectedEdgeCount, IteratorUtils.count(g.edges()));
};
}
public static void validateException(final Throwable expected, final Throwable actual) {
assertThat(actual, instanceOf(expected.getClass()));
}
public static void verifyUniqueStepIds(final Traversal.Admin<?, ?> traversal) {
AbstractGremlinTest.verifyUniqueStepIds(traversal, 0, new HashSet<>());
}
private static void verifyUniqueStepIds(final Traversal.Admin<?, ?> traversal, final int depth, final Set<String> ids) {
for (final Step step : traversal.asAdmin().getSteps()) {
/*for (int i = 0; i < depth; i++) System.out.print("\t");
System.out.println(step.getId() + " --> " + step);*/
if (!ids.add(step.getId())) {
fail("The following step id already exists: " + step.getId() + "---" + step);
}
if (step instanceof TraversalParent) {
for (final Traversal.Admin<?, ?> globalTraversal : ((TraversalParent) step).getGlobalChildren()) {
verifyUniqueStepIds(globalTraversal, depth + 1, ids);
}
for (final Traversal.Admin<?, ?> localTraversal : ((TraversalParent) step).getLocalChildren()) {
verifyUniqueStepIds(localTraversal, depth + 1, ids);
}
}
}
}
}
|
PommeVerte/incubator-tinkerpop
|
gremlin-test/src/main/java/org/apache/tinkerpop/gremlin/AbstractGremlinTest.java
|
Java
|
apache-2.0
| 12,621 |
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.psi.impl.auxiliary;
import com.intellij.lang.ASTNode;
import com.intellij.openapi.util.Pair;
import com.intellij.psi.*;
import com.intellij.psi.impl.source.tree.LeafPsiElement;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.psi.tree.TokenSet;
import com.intellij.psi.util.InheritanceUtil;
import com.intellij.util.Function;
import com.intellij.util.ReflectionCache;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.groovy.findUsages.LiteralConstructorReference;
import org.jetbrains.plugins.groovy.lang.lexer.GroovyTokenTypes;
import org.jetbrains.plugins.groovy.lang.parser.GroovyElementTypes;
import org.jetbrains.plugins.groovy.lang.psi.GroovyElementVisitor;
import org.jetbrains.plugins.groovy.lang.psi.api.auxiliary.GrListOrMap;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrVariableDeclaration;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.arguments.GrArgumentLabel;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.arguments.GrNamedArgument;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrExpression;
import org.jetbrains.plugins.groovy.lang.psi.api.types.GrTypeElement;
import org.jetbrains.plugins.groovy.lang.psi.dataFlow.types.TypeInferenceHelper;
import org.jetbrains.plugins.groovy.lang.psi.impl.GrMapType;
import org.jetbrains.plugins.groovy.lang.psi.impl.GrTupleType;
import org.jetbrains.plugins.groovy.lang.psi.impl.PsiImplUtil;
import org.jetbrains.plugins.groovy.lang.psi.impl.statements.expressions.GrExpressionImpl;
import org.jetbrains.plugins.groovy.lang.psi.impl.statements.expressions.TypesUtil;
import org.jetbrains.plugins.groovy.lang.psi.util.GroovyCommonClassNames;
import org.jetbrains.plugins.groovy.lang.psi.util.PsiUtil;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.jetbrains.plugins.groovy.lang.lexer.GroovyTokenTypes.mCOMMA;
/**
* @author ilyas
*/
public class GrListOrMapImpl extends GrExpressionImpl implements GrListOrMap {
private static final TokenSet MAP_LITERAL_TOKEN_SET = TokenSet.create(GroovyElementTypes.NAMED_ARGUMENT, GroovyTokenTypes.mCOLON);
private static final Function<GrListOrMapImpl, PsiType> TYPES_CALCULATOR = new MyTypesCalculator();
public GrListOrMapImpl(@NotNull ASTNode node) {
super(node);
}
public void accept(GroovyElementVisitor visitor) {
visitor.visitListOrMap(this);
}
public String toString() {
return "Generalized list";
}
@Override
public ASTNode addInternal(ASTNode first, ASTNode last, ASTNode anchor, Boolean before) {
if (getInitializers().length == 0) {
return super.addInternal(first, last, getNode().getFirstChildNode(), false);
}
final ASTNode lastChild = getNode().getLastChildNode();
getNode().addLeaf(mCOMMA, ",", lastChild);
return super.addInternal(first, last, lastChild.getTreePrev(), false);
}
@Override
public void deleteChildInternal(@NotNull ASTNode child) {
final PsiElement psi = child.getPsi();
if (psi instanceof GrExpression || psi instanceof GrNamedArgument) {
PsiElement prev = PsiUtil.getPrevNonSpace(psi);
PsiElement next = PsiUtil.getNextNonSpace(psi);
if (prev != null && prev.getNode() != null && prev.getNode().getElementType() == mCOMMA) {
super.deleteChildInternal(prev.getNode());
}
else if (next instanceof LeafPsiElement && next.getNode() != null && next.getNode().getElementType() == mCOMMA) {
super.deleteChildInternal(next.getNode());
}
}
super.deleteChildInternal(child);
}
public PsiType getType() {
return TypeInferenceHelper.getCurrentContext().getExpressionType(this, TYPES_CALCULATOR);
}
public boolean isMap() {
return findChildByType(MAP_LITERAL_TOKEN_SET) != null;
}
@Override
public PsiElement getLBrack() {
return findChildByType(GroovyTokenTypes.mLBRACK);
}
@Override
public PsiElement getRBrack() {
return findChildByType(GroovyTokenTypes.mRBRACK);
}
@NotNull
public GrExpression[] getInitializers() {
List<GrExpression> result = new ArrayList<GrExpression>();
for (PsiElement cur = getFirstChild(); cur != null; cur = cur.getNextSibling()) {
if (ReflectionCache.isInstance(cur, GrExpression.class)) result.add((GrExpression)cur);
}
return result.toArray((GrExpression[]) Array.newInstance(GrExpression.class, result.size()));
}
@NotNull
public GrNamedArgument[] getNamedArguments() {
List<GrNamedArgument> result = new ArrayList<GrNamedArgument>();
for (PsiElement cur = getFirstChild(); cur != null; cur = cur.getNextSibling()) {
if (cur instanceof GrNamedArgument) result.add((GrNamedArgument)cur);
}
return result.toArray(new GrNamedArgument[result.size()]);
}
@Override
public GrNamedArgument findNamedArgument(@NotNull String label) {
return PsiImplUtil.findNamedArgument(this, label);
}
@Override
public PsiReference getReference() {
final PsiClassType conversionType = LiteralConstructorReference.getTargetConversionType(this);
if (conversionType == null) return null;
PsiType ownType = getType();
if (ownType instanceof PsiClassType) {
ownType = ((PsiClassType)ownType).rawType();
}
if (ownType != null && TypesUtil.isAssignableWithoutConversions(conversionType.rawType(), ownType, this)) return null;
final PsiClass resolved = conversionType.resolve();
if (resolved != null) {
if (InheritanceUtil.isInheritor(resolved, CommonClassNames.JAVA_UTIL_SET)) return null;
if (InheritanceUtil.isInheritor(resolved, CommonClassNames.JAVA_UTIL_LIST)) return null;
}
return new LiteralConstructorReference(this, conversionType);
}
private static class MyTypesCalculator implements Function<GrListOrMapImpl, PsiType> {
@Nullable
public PsiType fun(GrListOrMapImpl listOrMap) {
final GlobalSearchScope scope = listOrMap.getResolveScope();
if (listOrMap.isMap()) {
JavaPsiFacade facade = JavaPsiFacade.getInstance(listOrMap.getProject());
return inferMapInitializerType(listOrMap, facade, scope);
}
PsiElement parent = listOrMap.getParent();
if (parent.getParent() instanceof GrVariableDeclaration) {
GrTypeElement typeElement = ((GrVariableDeclaration)parent.getParent()).getTypeElementGroovy();
if (typeElement != null) {
PsiType declaredType = typeElement.getType();
if (declaredType instanceof PsiArrayType) return declaredType;
}
}
return getTupleType(listOrMap.getInitializers(), listOrMap);
}
@Nullable
private static PsiClassType inferMapInitializerType(GrListOrMapImpl listOrMap, JavaPsiFacade facade, GlobalSearchScope scope) {
final HashMap<String, PsiType> stringEntries = new HashMap<String, PsiType>();
final ArrayList<Pair<PsiType, PsiType>> otherEntries = new ArrayList<Pair<PsiType, PsiType>>();
GrNamedArgument[] namedArgs = listOrMap.getNamedArguments();
if (namedArgs.length == 0) {
PsiType lType = PsiImplUtil.inferExpectedTypeForDiamond(listOrMap);
if (lType instanceof PsiClassType && InheritanceUtil.isInheritor(lType, CommonClassNames.JAVA_UTIL_MAP)) {
PsiClass hashMap = facade.findClass(GroovyCommonClassNames.JAVA_UTIL_LINKED_HASH_MAP, scope);
if (hashMap == null) hashMap = facade.findClass(CommonClassNames.JAVA_UTIL_MAP, scope);
if (hashMap != null) {
PsiSubstitutor mapSubstitutor = PsiSubstitutor.EMPTY.
put(hashMap.getTypeParameters()[0], com.intellij.psi.util.PsiUtil.substituteTypeParameter(lType, CommonClassNames.JAVA_UTIL_MAP, 0, false)).
put(hashMap.getTypeParameters()[1], com.intellij.psi.util.PsiUtil.substituteTypeParameter(lType, CommonClassNames.JAVA_UTIL_MAP, 1, false));
return facade.getElementFactory().createType(hashMap, mapSubstitutor);
}
}
}
for (GrNamedArgument namedArg : namedArgs) {
final GrArgumentLabel label = namedArg.getLabel();
final GrExpression expression = namedArg.getExpression();
if (label == null || expression == null) {
continue;
}
final String name = label.getName();
if (name != null) {
stringEntries.put(name, expression.getType());
} else {
otherEntries.add(Pair.create(label.getLabelType(), expression.getType()));
}
}
return GrMapType.create(facade, scope, stringEntries, otherEntries);
}
private static PsiClassType getTupleType(GrExpression[] initializers, GrListOrMap listOrMap) {
JavaPsiFacade facade = JavaPsiFacade.getInstance(listOrMap.getProject());
GlobalSearchScope scope = listOrMap.getResolveScope();
if (initializers.length == 0) {
PsiType lType = PsiImplUtil.inferExpectedTypeForDiamond(listOrMap);
if (lType instanceof PsiClassType && InheritanceUtil.isInheritor(lType, CommonClassNames.JAVA_UTIL_LIST)) {
PsiClass arrayList = facade.findClass(CommonClassNames.JAVA_UTIL_ARRAY_LIST, scope);
if (arrayList == null) arrayList = facade.findClass(CommonClassNames.JAVA_UTIL_LIST, scope);
if (arrayList != null) {
PsiSubstitutor arrayListSubstitutor = PsiSubstitutor.EMPTY.
put(arrayList.getTypeParameters()[0], com.intellij.psi.util.PsiUtil.substituteTypeParameter(lType, CommonClassNames.JAVA_UTIL_LIST, 0, false));
return facade.getElementFactory().createType(arrayList, arrayListSubstitutor);
}
}
}
PsiType[] result = new PsiType[initializers.length];
for (int i = 0; i < result.length; i++) {
result[i] = initializers[i].getType();
}
return new GrTupleType(result, facade, scope);
}
}
}
|
android-ia/platform_tools_idea
|
plugins/groovy/src/org/jetbrains/plugins/groovy/lang/psi/impl/auxiliary/GrListOrMapImpl.java
|
Java
|
apache-2.0
| 10,579 |
/*
* Copyright 2013 Red Hat Inc. and/or its affiliates and other contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.switchyard.component.camel;
import org.jboss.logging.Logger;
import org.jboss.logging.annotations.MessageLogger;
/**
* <p/>
* This file is using the subset 33400-33799 for logger messages.
* <p/>
*
*/
@MessageLogger(projectCode = "SWITCHYARD")
public interface CamelComponentLogger {
/**
* A root logger with the category of the package name.
*/
CamelComponentLogger ROOT_LOGGER = Logger.getMessageLogger(CamelComponentLogger.class, CamelComponentLogger.class.getPackage().getName());
}
|
tadayosi/switchyard
|
components/camel/component/src/main/java/org/switchyard/component/camel/CamelComponentLogger.java
|
Java
|
apache-2.0
| 1,161 |
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.rage;
import com.facebook.buck.io.ProjectFilesystem;
import com.facebook.buck.util.Console;
import com.facebook.buck.util.environment.BuildEnvironmentDescription;
import com.facebook.buck.util.versioncontrol.VersionControlCommandFailedException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.util.Optional;
/**
* Responsible for gathering logs and other interesting information from buck without user
* interaction.
*/
public class AutomatedReport extends AbstractReport {
private final BuildLogHelper buildLogHelper;
private final Optional<VcsInfoCollector> vcsInfoCollector;
private final Console console;
public AutomatedReport(
DefectReporter defectReporter,
ProjectFilesystem filesystem,
ObjectMapper objectMapper,
Console console,
BuildEnvironmentDescription buildEnvironmentDescription,
Optional<VcsInfoCollector> vcsInfoCollector,
RageConfig rageConfig,
ExtraInfoCollector extraInfoCollector) {
super(filesystem,
defectReporter,
buildEnvironmentDescription,
console,
rageConfig,
extraInfoCollector,
Optional.empty());
this.vcsInfoCollector = vcsInfoCollector;
this.buildLogHelper = new BuildLogHelper(filesystem, objectMapper);
this.console = console;
}
@Override
public ImmutableSet<BuildLogEntry> promptForBuildSelection() throws IOException {
return ImmutableSet.copyOf(buildLogHelper.getBuildLogs());
}
@Override
public Optional<SourceControlInfo> getSourceControlInfo()
throws IOException, InterruptedException {
try {
if (vcsInfoCollector.isPresent()) {
return Optional.of(vcsInfoCollector.get().gatherScmInformation());
}
} catch (VersionControlCommandFailedException e) {
console.printErrorText(
"Failed to get source control information: %s, proceeding regardless.", e);
}
return Optional.empty();
}
@Override
protected Optional<FileChangesIgnoredReport> getFileChangesIgnoredReport() {
return Optional.empty();
}
@Override
protected Optional<UserReport> getUserReport() throws IOException {
return Optional.empty();
}
}
|
daedric/buck
|
src/com/facebook/buck/rage/AutomatedReport.java
|
Java
|
apache-2.0
| 2,892 |
/**
*
* Copyright (c) Microsoft and contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
package com.microsoft.azure.management.storage.models;
/**
* The account type of the storage account.
*/
public enum AccountType {
/**
* Locally-redundant storage.
*/
STANDARDLRS,
/**
* Zone-redundant storage.
*/
STANDARDZRS,
/**
* Geo-redundant storage.
*/
STANDARDGRS,
/**
* Read access geo-redundant storage.
*/
STANDARDRAGRS,
/**
* Premium locally-redundant storage.
*/
PREMIUMLRS,
}
|
flydream2046/azure-sdk-for-java
|
resource-management/azure-mgmt-storage/src/main/java/com/microsoft/azure/management/storage/models/AccountType.java
|
Java
|
apache-2.0
| 1,285 |
package com.mossle.car.persistence.manager;
import com.mossle.car.persistence.domain.CarInfo;
import com.mossle.core.hibernate.HibernateEntityDao;
import org.springframework.stereotype.Service;
@Service
public class CarInfoManager extends HibernateEntityDao<CarInfo> {
}
|
NJU-STP/STP
|
src/main/java/com/mossle/car/persistence/manager/CarInfoManager.java
|
Java
|
apache-2.0
| 275 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.distributed;
import org.apache.geode.distributed.internal.locks.*;
import org.apache.geode.distributed.internal.*;
/**
* <p>
* A named instance of DistributedLockService defines a space for locking arbitrary names across the
* distributed system defined by a specified distribution manager. Any number of
* DistributedLockService instances can be created with different service names. For all processes
* in the distributed system that have created an instance of DistributedLockService with the same
* name, no more than one thread is permitted to own the lock on a given name in that instance at
* any point in time. Additionally, a thread can lock the entire service, preventing any other
* threads in the system from locking the service or any names in the service.
* </p>
*/
public abstract class DistributedLockService {
/**
* Create a DistributedLockService with the given serviceName for the given DistributedSystem.
* This DistributedLockService will continue to manage locks until <code>{@link #destroy}</code>
* is called, or <code>ds</code> is disconnected, at which point any locks that were held by this
* instance are released.
*
* @param serviceName the name of the DistributedLockService to create.
*
* @param ds the <code>DistributedSystem</code> for the new service instance to use for
* distributed lock messaging.
*
* @throws IllegalArgumentException if serviceName is an illegal name or this process has already
* created a DistributedLockService with the given <code>serviceName</code>.
*
* @throws IllegalStateException if this process is in the middle of disconnecting from the
* <code>DistributedSystem</code>
*/
public static DistributedLockService create(String serviceName, DistributedSystem ds)
throws IllegalArgumentException {
DLockService.validateServiceName(serviceName);
return DLockService.create(serviceName, (InternalDistributedSystem) ds, true /* distributed */,
true /* destroyOnDisconnect */, false /* automateFreeResources */);
}
/**
* Look up and return the DistributedLockService with the given name, if it has been created in
* this VM. If it has not been created, return null.
*
* @param serviceName the name of the DistributedLockService to look up
*
* @return the DistributedLockService with the given name, or null if it hasn't been created in
* this VM.
*/
public static DistributedLockService getServiceNamed(String serviceName) {
return DLockService.getServiceNamed(serviceName);
}
/**
* Destroy a previously created DistributedLockService with the given <code>serviceName</code>.
* Any locks currently held in this DistributedLockService by this process are released. Attempts
* to access a destroyed lock service will result in a {@link LockServiceDestroyedException} being
* thrown.
*
* @param serviceName the name of the instance to destroy, previously supplied in the
* <code>create(String, DistributedSystem)</code> invocation.
*
* @throws IllegalArgumentException if this process hasn't created a DistributedLockService with
* the given <code>serviceName</code> and <code>dm</code>.
*/
public static void destroy(String serviceName) throws IllegalArgumentException {
DLockService.destroyServiceNamed(serviceName);
}
/**
* Public instance creation is prohibited - use {@link #create(String, DistributedSystem)}
*/
protected DistributedLockService() {}
/**
* <p>
* Attempts to acquire a lock named <code>name</code>. Returns <code>true</code> as soon as the
* lock is acquired. If the lock is currently held by another thread in this or any other process
* in the distributed system, or another thread in the system has locked the entire service, this
* method keeps trying to acquire the lock for up to <code>waitTimeMillis</code> before giving up
* and returning <code>false</code>. If the lock is acquired, it is held until
* <code>unlock(Object name)</code> is invoked, or until <code>leaseTimeMillis</code> milliseconds
* have passed since the lock was granted - whichever comes first.
* </p>
*
* <p>
* Locks are reentrant. If a thread invokes this method n times on the same instance, specifying
* the same <code>name</code>, without an intervening release or lease expiration expiration on
* the lock, the thread must invoke <code>unlock(name)</code> the same number of times before the
* lock is released (unless the lease expires). When this method is invoked for a lock that is
* already acquired, the lease time will be set to the maximum of the remaining least time from
* the previous invocation, or <code>leaseTimeMillis</code>
* </p>
*
* @param name the name of the lock to acquire in this service. This object must conform to the
* general contract of <code>equals(Object)</code> and <code>hashCode()</code> as described
* in {@link java.lang.Object#hashCode()}.
*
* @param waitTimeMillis the number of milliseconds to try to acquire the lock before giving up
* and returning false. A value of -1 causes this method to block until the lock is
* acquired. A value of 0 causes this method to return false without waiting for the lock
* if the lock is held by another member or thread.
*
* @param leaseTimeMillis the number of milliseconds to hold the lock after granting it, before
* automatically releasing it if it hasn't already been released by invoking
* {@link #unlock(Object)}. If <code>leaseTimeMillis</code> is -1, hold the lock until
* explicitly unlocked.
*
* @return true if the lock was acquired, false if the timeout <code>waitTimeMillis</code> passed
* without acquiring the lock.
*
* @throws LockServiceDestroyedException if this lock service has been destroyed
*/
public abstract boolean lock(Object name, long waitTimeMillis, long leaseTimeMillis);
/**
* <p>
* Attempts to acquire a lock named <code>name</code>. Returns <code>true</code> as soon as the
* lock is acquired. If the lock is currently held by another thread in this or any other process
* in the distributed system, or another thread in the system has locked the entire service, this
* method keeps trying to acquire the lock for up to <code>waitTimeMillis</code> before giving up
* and returning <code>false</code>. If the lock is acquired, it is held until
* <code>unlock(Object name)</code> is invoked, or until <code>leaseTimeMillis</code> milliseconds
* have passed since the lock was granted - whichever comes first.
* </p>
*
* <p>
* Locks are reentrant. If a thread invokes this method n times on the same instance, specifying
* the same <code>name</code>, without an intervening release or lease expiration expiration on
* the lock, the thread must invoke <code>unlock(name)</code> the same number of times before the
* lock is released (unless the lease expires). When this method is invoked for a lock that is
* already acquired, the lease time will be set to the maximum of the remaining least time from
* the previous invocation, or <code>leaseTimeMillis</code>
* </p>
*
* @param name the name of the lock to acquire in this service. This object must conform to the
* general contract of <code>equals(Object)</code> and <code>hashCode()</code> as described
* in {@link java.lang.Object#hashCode()}.
*
* @param waitTimeMillis the number of milliseconds to try to acquire the lock before giving up
* and returning false. A value of -1 causes this method to block until the lock is
* acquired.
*
* @param leaseTimeMillis the number of milliseconds to hold the lock after granting it, before
* automatically releasing it if it hasn't already been released by invoking
* {@link #unlock(Object)}. If <code>leaseTimeMillis</code> is -1, hold the lock until
* explicitly unlocked.
*
* @return true if the lock was acquired, false if the timeout <code>waitTimeMillis</code> passed
* without acquiring the lock.
*
* @throws InterruptedException if the thread is interrupted before or during this method.
*
* @throws LockServiceDestroyedException if this lock service has been destroyed
*
* @deprecated as of GemFire 5.1, use {@link #lock(Object, long, long)} with waitTimeMillis
* instead
*/
@Deprecated
public abstract boolean lockInterruptibly(Object name, long waitTimeMillis, long leaseTimeMillis)
throws InterruptedException;
/**
* Release the lock previously granted for the given <code>name</code>.
*
* @param name the object to unlock in this service.
*
* @throws LockNotHeldException if the current thread is not the owner of this lock
*
* @throws LeaseExpiredException if the current thread was the owner of this lock, but it's lease
* has expired.
*
* @throws LockServiceDestroyedException if the service has been destroyed
*/
public abstract void unlock(Object name) throws LeaseExpiredException;
/**
* Determine whether the current thread owns the lock on the given object.
*
* @return true if the current thread owns the lock for <code>name</code>.
*
* @throws LockServiceDestroyedException if this service has been destroyed
*/
public abstract boolean isHeldByCurrentThread(Object name);
/**
* Suspend granting of locks in this service. When locking has been suspended, no other thread in
* the distributed system will be granted a lock for any new or existing name in that service
* until locking is resumed by the thread that suspended it. Only one thread at a time in a
* distributed system is permitted suspend locking on a given DistributedLockService instance.
* This method blocks until lock suspension can be granted to the current thread, and all
* outstanding locks on names in this service held by other threads in the distributed system have
* been released, or until <code>waitTimeMillis</code> milliseconds have passed without
* successfully granting suspension.
*
* @param waitTimeMillis the number of milliseconds to try to acquire suspension before giving up
* and returning false. A value of -1 causes this method to block until suspension is
* granted.
*
* @return true if suspension was granted, false if the timeout <code>waitTimeMillis</code> passed
* before it could be granted.
*
* @throws IllegalStateException if the current thread already has suspended locking on this
* instance.
*
* @throws InterruptedException if the current thread is interrupted.
*
* @throws LockServiceDestroyedException if the service has been destroyed
*
* @deprecated as of GemFire 5.1, use {@link #suspendLocking(long)} with waitTimeMillis instead
*/
@Deprecated
public abstract boolean suspendLockingInterruptibly(long waitTimeMillis)
throws InterruptedException;
/**
* Suspend granting of locks in this service. When locking has been suspended, no other thread in
* the distributed system will be granted a lock for any new or existing name in that service
* until locking is resumed by the thread that suspended it. Only one thread at a time in a
* distributed system is permitted suspend locking on a given DistributedLockService instance.
* This method blocks until lock suspension can be granted to the current thread, and all
* outstanding locks on names in this service held by other threads in the distributed system have
* been released, or until <code>waitTimeMillis</code> milliseconds have passed without
* successfully granting suspension.
*
* @param waitTimeMillis the number of milliseconds to try to acquire suspension before giving up
* and returning false. A value of -1 causes this method to block until suspension is
* granted. A value of 0 causes this method to return false without waiting for the lock if
* the lock is held by another member or thread.
*
* @return true if suspension was granted, false if the timeout <code>waitTimeMillis</code> passed
* before it could be granted.
*
* @throws IllegalStateException if the current thread already has suspended locking on this
* instance
*
* @throws LockServiceDestroyedException if the service has been destroyed
*/
public abstract boolean suspendLocking(long waitTimeMillis);
/**
* Allow locking to resume in this DistributedLockService instance.
*
* @throws IllegalStateException if the current thread didn't previously suspend locking
*
* @throws LockServiceDestroyedException if the service has been destroyed
*/
public abstract void resumeLocking();
/**
* Determine whether the current thread has suspended locking in this DistributedLockService.
*
* @return true if locking is suspended by the current thread.
*
* @throws LockServiceDestroyedException if this service has been destroyed
*/
public abstract boolean isLockingSuspendedByCurrentThread();
/**
* Free internal resources associated with the given <code>name</code>. This may reduce this VM's
* memory use, but may also prohibit performance optimizations if <code>name</code> is
* subsequently locked in this VM.
*
* @throws LockServiceDestroyedException if this service has been destroyed
*/
public abstract void freeResources(Object name);
/**
* Specifies this member to become the grantor for this lock service. The grantor will be the lock
* authority which is responsible for handling all lock requests for this service. Other members
* will request locks from this member. Locking for this member is optimized as it will not
* require messaging to acquire a given lock.
* <p>
* Calls to this method will block until grantor authority has been transferred to this member.
* <p>
* If another member calls <code>becomeLockGrantor</code> after this member, that member will
* transfer grantor authority from this member to itself.
* <p>
* This operation should not be invoked repeatedly in an application. It is possible to create a
* lock service and have two or more members endlessly calling becomeLockGrantor to transfer
* grantorship back and forth.
*
* @throws LockServiceDestroyedException if this service has been destroyed
*/
public abstract void becomeLockGrantor();
/**
* Specifies that this member should become the grantor for the named locking service.
*
* @param serviceName the name of the locking service
*
* @throws IllegalArgumentException if <code>serviceName<code> does not refer to any registered
* locking service in this process
*
* @see org.apache.geode.distributed.DistributedLockService#becomeLockGrantor()
*/
public static void becomeLockGrantor(String serviceName) throws IllegalArgumentException {
DLockService.becomeLockGrantor(serviceName);
}
/**
* Returns true if this member is currently the lock authority responsible for granting locks for
* this service. This can be explicitly requested by calling
* {@link org.apache.geode.distributed.DistributedLockService#becomeLockGrantor()}. If no member
* has explicitly requested grantor authority, then one member participating in the service will
* be implicitly selected. In either case, this method returns true if the calling member is the
* grantor.
*
* @return true if this member is the grantor for this service
*
* @throws LockServiceDestroyedException if lock service has been destroyed
*/
public abstract boolean isLockGrantor();
/**
* Returns true if this member is the grantor for the named service.
*
* @param serviceName the name of the locking service
*
* @return true if this member is the grantor for this service
*
* @throws IllegalArgumentException if <code>serviceName<code> does not refer to any registered
* locking service in this process
*
* @see org.apache.geode.distributed.DistributedLockService#isLockGrantor()
*/
public static boolean isLockGrantor(String serviceName) throws IllegalArgumentException {
return DLockService.isLockGrantor(serviceName);
}
}
|
pivotal-amurmann/geode
|
geode-core/src/main/java/org/apache/geode/distributed/DistributedLockService.java
|
Java
|
apache-2.0
| 17,286 |
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.launcher.cli;
import org.gradle.StartParameter;
import org.gradle.initialization.BuildAction;
import org.gradle.initialization.BuildController;
import java.io.Serializable;
public class ExecuteBuildAction implements BuildAction<Void>, Serializable {
private final StartParameter startParameter;
public ExecuteBuildAction(StartParameter startParameter) {
this.startParameter = startParameter;
}
public Void run(BuildController buildController) {
buildController.setStartParameter(startParameter);
buildController.run();
return null;
}
}
|
Pushjet/Pushjet-Android
|
gradle/wrapper/dists/gradle-2.2.1-all/c64ydeuardnfqctvr1gm30w53/gradle-2.2.1/src/launcher/org/gradle/launcher/cli/ExecuteBuildAction.java
|
Java
|
bsd-2-clause
| 1,228 |
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.Random;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntries;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntry;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Setattrs;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.XAttr;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.listxattrResponse;
import org.xtreemfs.test.SetupUtils;
import org.xtreemfs.test.TestHelper;
/**
*
* <br>
* Sep 30, 2011
*/
public class MetadataCacheTest {
@Rule
public final TestRule testLog = TestHelper.testLog;
private class MetadataCacheSmasherThread extends Thread {
private final MetadataCache mdCache;
private final String[] paths;
private final Stat[] stats;
private final DirectoryEntries[] dirs;
private boolean failed;
/**
*
*/
public MetadataCacheSmasherThread(MetadataCache cache, String[] paths, Stat[] stats,
DirectoryEntries[] dirs) {
this.mdCache = cache;
this.paths = paths;
this.stats = stats;
this.dirs = dirs;
this.failed = false;
}
/*
* (non-Javadoc)
*
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
while (true) {
int operation = new Random().nextInt(10) + 1;
int object = new Random().nextInt(10);
try {
switch (operation) {
case 1:
mdCache.updateStat(paths[object], stats[object]);
break;
case 2:
long time = System.currentTimeMillis() / 1000;
mdCache.updateStatTime(paths[object], time, Setattrs.SETATTR_ATIME.getNumber());
break;
case 3:
int i = 1;
i = i << new Random().nextInt(7);
mdCache.updateStatAttributes(paths[object], stats[object], i);
break;
case 4:
mdCache.getStat(paths[object]);
break;
case 5:
mdCache.size();
break;
case 6:
mdCache.invalidate(paths[object]);
break;
case 7:
mdCache.updateDirEntries(paths[object], dirs[object]);
break;
case 8:
mdCache.getDirEntries(paths[object], 0, 1024);
break;
case 9:
mdCache.invalidatePrefix(paths[object]);
break;
case 10:
mdCache.renamePrefix(paths[object], paths[new Random().nextInt(10)]);
break;
}
sleep(10);
} catch (Exception e) {
e.printStackTrace();
failed = true;
}
}
}
public boolean getFailed() {
return failed;
}
}
private MetadataCache metadataCache;
/*
* (non-Javadoc)
*
* @see junit.framework.TestCase#setUp()
*/
@Before
public void setUp() throws Exception {
Logging.start(SetupUtils.DEBUG_LEVEL, SetupUtils.DEBUG_CATEGORIES);
// Max 2 entries, 1 hour
metadataCache = new MetadataCache(2, 3600);
}
/**
* If a Stat entry gets updated through UpdateStatTime(), the new timeout must be respected in case of an
* eviction.
*
**/
@Test
public void testUpdateStatTimeKeepsSequentialTimeoutOrder() throws Exception {
Stat.Builder a, b, c;
a = getIntializedStatBuilder();
b = getIntializedStatBuilder();
c = getIntializedStatBuilder();
a.setIno(0);
b.setIno(1);
c.setIno(2);
metadataCache.updateStat("/a", a.build());
metadataCache.updateStat("/b", b.build());
// Cache is full now. a would should be first item to get evicted.
metadataCache.updateStatTime("/a", 0, Setattrs.SETATTR_MTIME.getNumber());
// "b" should be the oldest Stat element now and get evicted.
metadataCache.updateStat("/c", c.build());
// Was "a" found or did "b" survive?
Stat aStat = metadataCache.getStat("/a");
assertNotNull(aStat);
assertEquals(0, aStat.getIno());
// "c" ist also still there?!
Stat cStat = metadataCache.getStat("/c");
assertNotNull(cStat);
assertEquals(2, cStat.getIno());
}
/**
* If a Stat entry gets updated through UpdateStat(), the new timeout must be respected in case of an
* eviction.
**/
@Test
public void testUpdateStatKeepsSequentialTimeoutOrder() throws Exception {
Stat.Builder a, b, c;
a = getIntializedStatBuilder();
b = getIntializedStatBuilder();
c = getIntializedStatBuilder();
a.setIno(0);
b.setIno(1);
c.setIno(2);
Stat aa = a.build();
metadataCache.updateStat("/a", aa);
metadataCache.updateStat("/b", b.build());
// Cache is full now. a would should be first item to get evicted.
metadataCache.updateStat("/a", aa);
// "b" should be the oldest Stat element now and get evicted.
metadataCache.updateStat("/c", c.build());
// Was "a" found or did "b" survive?
Stat aStat = metadataCache.getStat("/a");
assertNotNull(aStat);
assertEquals(0, aStat.getIno());
// "c" ist also still there?!
Stat cStat = metadataCache.getStat("/c");
assertNotNull(cStat);
assertEquals(2, cStat.getIno());
}
/**
* Test if Size is updated correctly after UpdateStat() or Invalidate().
**/
@Test
public void testCheckSizeAfterUpdateAndInvalidate() throws Exception {
Stat.Builder a, b, c;
a = getIntializedStatBuilder();
b = getIntializedStatBuilder();
c = getIntializedStatBuilder();
assertEquals(0l, metadataCache.size());
metadataCache.updateStat("/a", a.build());
assertEquals(1l, metadataCache.size());
metadataCache.updateStat("/b", b.build());
assertEquals(2l, metadataCache.size());
metadataCache.updateStat("/c", c.build());
// metadatacache has only room for two entries.
assertEquals(2l, metadataCache.size());
metadataCache.invalidate("/b");
assertEquals(1l, metadataCache.size());
metadataCache.invalidate("/c");
assertEquals(0l, metadataCache.size());
}
@Test
public void testGetUpdateDirEntries() throws Exception {
DirectoryEntries.Builder dirEntriesBuilder = DirectoryEntries.newBuilder();
int chunkSize = 1024;
int entryCount = chunkSize;
String dir = "/";
// Fill dirEntriesBuilder
for (int i = 0; i < entryCount; i++) {
// create new Stat object for a new entry object
Stat.Builder a = getIntializedStatBuilder();
a.setIno(i);
DirectoryEntry entry = DirectoryEntry.newBuilder().setName(dir + i).setStbuf(a.build()).build();
dirEntriesBuilder.addEntries(entry);
}
metadataCache.updateDirEntries(dir, dirEntriesBuilder.build());
// Read all dir entries.
DirectoryEntries dirEntriesRead = metadataCache.getDirEntries(dir, 0, entryCount);
assertEquals(entryCount, dirEntriesRead.getEntriesCount());
for (int i = 0; i < dirEntriesRead.getEntriesCount(); i++) {
String pathToStat = dir + i;
assertEquals(i, dirEntriesRead.getEntries(i).getStbuf().getIno());
assertEquals(pathToStat, dirEntriesRead.getEntries(i).getName());
}
// Read a subset.
int offset = entryCount / 2;
dirEntriesRead = metadataCache.getDirEntries(dir, offset, entryCount / 2 - 1);
assertEquals(entryCount / 2 - 1, dirEntriesRead.getEntriesCount());
for (int i = 0; i < dirEntriesRead.getEntriesCount(); i++) {
String pathToStat = dir + (offset + i);
assertEquals(pathToStat, dirEntriesRead.getEntries(i).getName());
assertEquals(offset + i, dirEntriesRead.getEntries(i).getStbuf().getIno());
}
dirEntriesBuilder = DirectoryEntries.newBuilder();
// Fill dirEntriesBuilder with other entries for this dir
for (int i = 10; i < entryCount + 10; i++) {
// create new Stat object for a new entry object
Stat.Builder a = getIntializedStatBuilder();
a.setIno(i);
DirectoryEntry entry = DirectoryEntry.newBuilder().setName(dir + i).setStbuf(a.build()).build();
dirEntriesBuilder.addEntries(entry);
}
metadataCache.updateDirEntries(dir, dirEntriesBuilder.build());
// Read all dir entries.
dirEntriesRead = metadataCache.getDirEntries(dir, 0, entryCount);
assertEquals(entryCount, dirEntriesRead.getEntriesCount());
for (int i = 0; i < dirEntriesRead.getEntriesCount(); i++) {
String pathToStat = dir + (i + 10);
assertEquals(i + 10, dirEntriesRead.getEntries(i).getStbuf().getIno());
assertEquals(pathToStat, dirEntriesRead.getEntries(i).getName());
}
}
/**
* If a Stat entry gets updated through UpdateStat(), the new timeout must be respected in case of an
* eviction.
*/
@Test
public void testInvalidatePrefix() throws Exception {
// create new metadataCache with 1024 entries.
metadataCache = new MetadataCache(1024, 3600);
Stat.Builder a, b, c, d;
a = getIntializedStatBuilder();
b = getIntializedStatBuilder();
c = getIntializedStatBuilder();
d = getIntializedStatBuilder();
a.setIno(0);
b.setIno(1);
c.setIno(2);
d.setIno(3);
String dir = "/dir";
metadataCache.updateStat(dir, a.build());
metadataCache.updateStat(dir + "/file1", b.build());
metadataCache.updateStat(dir + ".file1", c.build());
metadataCache.updateStat(dir + "Zfile1", d.build());
metadataCache.invalidatePrefix(dir);
// invalidation of all matching entries successful?
assertNull(metadataCache.getStat(dir));
assertNull(metadataCache.getStat(dir + "/file1"));
// Similiar entries which do not match the prefix "/dir/" have not been
// invalidated.
Stat statC = metadataCache.getStat(dir + ".file1");
assertNotNull(statC);
assertEquals(2, statC.getIno());
Stat statD = metadataCache.getStat(dir + "Zfile1");
assertNotNull(statD);
assertEquals(3, statD.getIno());
}
/**
* If a Stat entry gets updated through UpdateStat(), the new timeout must be respected in case of an
* eviction.
*/
@Test
public void testRenamePrefix() throws Exception {
// create new metadataCache with 1024 entries.
metadataCache = new MetadataCache(1024, 3600);
Stat.Builder a, b, c, d;
a = getIntializedStatBuilder();
b = getIntializedStatBuilder();
c = getIntializedStatBuilder();
d = getIntializedStatBuilder();
a.setIno(0);
b.setIno(1);
c.setIno(2);
d.setIno(3);
String dir = "/dir";
metadataCache.updateStat(dir, a.build());
metadataCache.updateStat(dir + "/file1", b.build());
metadataCache.updateStat(dir + ".file1", c.build());
metadataCache.updateStat(dir + "Zfile1", d.build());
assertEquals(4l, metadataCache.size());
metadataCache.renamePrefix(dir, "/newDir");
assertEquals(4l, metadataCache.size());
// Renaming of all matching entries was successful?
Stat statA = metadataCache.getStat("/newDir");
assertNotNull(statA);
assertEquals(0, statA.getIno());
Stat statB = metadataCache.getStat("/newDir" + "/file1");
assertNotNull(statB);
assertEquals(1, statB.getIno());
// Similiar entries which do not match the prefix "/dir/" hat not been renamed
Stat statC = metadataCache.getStat(dir + ".file1");
assertNotNull(statC);
assertEquals(2, statC.getIno());
Stat statD = metadataCache.getStat(dir + "Zfile1");
assertNotNull(statD);
assertEquals(3, statD.getIno());
}
/**
* Are large nanoseconds values correctly updated by UpdateStatAttributes?
*/
@Test
public void testUpdateStatAttributes() throws Exception {
// create new metadataCache with 1024 entries.
metadataCache = new MetadataCache(1024, 3600);
String path = "/file";
Stat.Builder stat = getIntializedStatBuilder();
Stat.Builder newStat = getIntializedStatBuilder();
stat.setIno(0);
newStat.setIno(1);
metadataCache.updateStat(path, stat.build());
assertEquals(1l, metadataCache.size());
Stat statA = metadataCache.getStat(path);
assertNotNull(statA);
assertEquals(0, statA.getIno());
assertEquals(0, statA.getMtimeNs());
long time = 1234567890;
time *= 1000000000;
newStat.setAtimeNs(time);
newStat.setMtimeNs(time);
metadataCache.updateStatAttributes(path, newStat.build(), Setattrs.SETATTR_ATIME.getNumber()
| Setattrs.SETATTR_MTIME.getNumber());
assertEquals(1l, metadataCache.size());
Stat statB = metadataCache.getStat(path);
assertNotNull(statB);
assertEquals(0, statB.getIno());
assertEquals(time, statB.getAtimeNs());
assertEquals(time, statB.getMtimeNs());
}
/**
* Changing the file access mode may only modify the last 12 bits (3 bits for sticky bit, set GID and set
* UID and 3 * 3 bits for the file access mode).
*/
@Test
public void testUpdateStatAttributesPreservesModeBits() throws Exception {
String path = "/file";
Stat.Builder stat = getIntializedStatBuilder();
Stat.Builder cachedStat = getIntializedStatBuilder();
stat.setIno(0);
stat.setMode(33188); // Octal: 100644 ( regular file + 644).
metadataCache.updateStat(path, stat.build());
assertEquals(1l, metadataCache.size());
Stat statA = metadataCache.getStat(path);
assertNotNull(statA);
assertEquals(0, statA.getIno());
assertEquals(33188, statA.getMode());
stat = getIntializedStatBuilder();
stat.setMode(420); // Octal: 644
metadataCache.updateStatAttributes(path, stat.build(), Setattrs.SETATTR_MODE.getNumber());
assertEquals(1l, metadataCache.size());
statA = metadataCache.getStat(path);
assertNotNull(statA);
assertEquals(0, cachedStat.getIno());
assertEquals(33188, statA.getMode());
stat = getIntializedStatBuilder();
stat.setMode(263076); // Octal : 1001644 (regular file + sticky bit + 644).
metadataCache.updateStat(path, stat.build());
assertEquals(1l, metadataCache.size());
statA = metadataCache.getStat(path);
assertNotNull(statA);
assertEquals(0, statA.getIno());
assertEquals(263076, statA.getMode());
stat = getIntializedStatBuilder();
stat.setMode(511); // Octal: 0777 (no sticky bit + 777).
metadataCache.updateStatAttributes(path, stat.build(), Setattrs.SETATTR_MODE.getNumber());
assertEquals(1l, metadataCache.size());
statA = metadataCache.getStat(path);
assertNotNull(statA);
assertEquals(0, statA.getIno());
assertEquals(262655, statA.getMode()); // Octal: 1000777
}
@Test
public void testConcurrentModifications() throws Exception {
final int DATA_SIZE = 10;
final java.lang.String FILENAME = "/foobarfile";
final int DIR_COUNT = 64;
final int THREAD_COUNT = 10;
// generate Data
String[] paths = new String[DATA_SIZE];
Stat[] stats = new Stat[DATA_SIZE];
DirectoryEntries[] dirs = new DirectoryEntries[DATA_SIZE];
for (int i = 0; i < DATA_SIZE; i++) {
paths[i] = new String(FILENAME + i + '/');
stats[i] = getIntializedStatBuilder().setIno(i).build();
DirectoryEntries.Builder dirBuilder = DirectoryEntries.newBuilder();
for (int j = 0; j < DIR_COUNT; j++) {
Stat a = getIntializedStatBuilder().setIno(i * 10000 + j).build();
dirBuilder
.addEntries(DirectoryEntry.newBuilder().setName(FILENAME + i + '/' + j).setStbuf(a));
}
dirs[i] = dirBuilder.build();
}
MetadataCacheSmasherThread[] threads = new MetadataCacheSmasherThread[THREAD_COUNT];
for (int i = 0; i < THREAD_COUNT; i++) {
threads[i] = new MetadataCacheSmasherThread(metadataCache, paths, stats, dirs);
threads[i].start();
}
Thread.sleep(10000); // sleep 10 seconds and let the other threads work
for (int i = 0; i < THREAD_COUNT; i++) {
assertEquals(false, threads[i].getFailed());
}
}
@Test
public void testUnenabledMdCache() {
metadataCache = new MetadataCache(0, 10000);
assertEquals(0, metadataCache.size());
assertEquals(0, metadataCache.capacity());
assertNull(metadataCache.getDirEntries("/", 10, 100));
assertNull(metadataCache.getStat("fewf"));
assertNull(metadataCache.getXAttr("tttgreg", "wefwe").getFirst());
assertFalse(metadataCache.getXAttr("tttgreg", "wefwe").getSecond());
assertNull(metadataCache.getXAttrs("asdf"));
assertEquals(0, metadataCache.getXAttrSize("zxcv", "naste").getFirst().intValue());
assertFalse(metadataCache.getXAttrSize("zxcv", "naste").getSecond());
metadataCache.invalidate("bla");
metadataCache.invalidateDirEntries("blub");
metadataCache.invalidateDirEntry("puuuh", "noooooo");
metadataCache.invalidatePrefix("praefeix");
metadataCache.invalidateStat("stat");
metadataCache.invalidateXAttr("Xattr", "dont care");
metadataCache.invalidateXAttrs("jea, finished");
metadataCache.updateDirEntries("fsa", DirectoryEntries.getDefaultInstance());
metadataCache.updateStat("fsa", Stat.getDefaultInstance());
metadataCache.updateStatAttributes("fwef", Stat.getDefaultInstance(), 100);
metadataCache.updateStatFromOSDWriteResponse("fefew", OSDWriteResponse.getDefaultInstance());
metadataCache.updateStatTime("fsa", 1034l, 100);
metadataCache.updateXAttr("fsa", "wefwe", "dfd");
metadataCache.updateXAttrs("sdfs", listxattrResponse.getDefaultInstance());
metadataCache.renamePrefix("foo", "bar");
}
private Stat.Builder getIntializedStatBuilder() {
Stat.Builder statBuilder = Stat.newBuilder();
statBuilder.setDev(0);
statBuilder.setIno(0);
statBuilder.setMode(0);
// if not set to 1 an exception in the metadatacache is triggered
statBuilder.setNlink(1);
statBuilder.setUserId("");
statBuilder.setGroupId("");
statBuilder.setSize(0);
statBuilder.setAtimeNs(0);
statBuilder.setMtimeNs(0);
statBuilder.setCtimeNs(0);
statBuilder.setBlksize(0);
statBuilder.setTruncateEpoch(0);
return statBuilder;
}
@Test
public void testUpdateStatFromOSDWriteResponse() throws Exception {
OSDWriteResponse osdWriteResponse = OSDWriteResponse.newBuilder().setSizeInBytes(1337)
.setTruncateEpoch(1338).build();
metadataCache.updateStat("foobar", getIntializedStatBuilder().setSize(100).build());
assertEquals(100, metadataCache.getStat("foobar").getSize());
metadataCache.updateStatFromOSDWriteResponse("foobar", osdWriteResponse);
assertEquals(1337, metadataCache.getStat("foobar").getSize());
// Test with entry but no Stat
DirectoryEntries entries = getDummyDirEntries();
metadataCache.updateDirEntries("hasDirEntriesButNoStat", entries);
metadataCache.updateStatFromOSDWriteResponse("hasDirEntriesButNoStat", osdWriteResponse);
assertNull(metadataCache.getStat("hasDirEntriesButNoStat"));
// Test with equal truncate epoch and higher size
osdWriteResponse = OSDWriteResponse.newBuilder().setSizeInBytes(20000).setTruncateEpoch(20000)
.build();
metadataCache.updateStatFromOSDWriteResponse("foobar", osdWriteResponse);
assertEquals(20000, metadataCache.getStat("foobar").getSize());
metadataCache.updateStatFromOSDWriteResponse("foobar", osdWriteResponse);
assertEquals(20000, metadataCache.getStat("foobar").getSize());
}
@Test
public void testGetStatExpired() throws Exception {
metadataCache = new MetadataCache(100, 1);
Stat aStat = getIntializedStatBuilder().setSize(333).build();
metadataCache.updateStat("foobar", aStat);
assertEquals(aStat, metadataCache.getStat("foobar"));
Thread.sleep(2000);
assertNull(metadataCache.getStat("foobar"));
}
@Test
public void testGetDirEntriesExpired() throws Exception {
metadataCache = new MetadataCache(100, 1);
DirectoryEntries entries = getDummyDirEntries();
metadataCache.updateDirEntries("foobar", entries);
assertEquals(entries, metadataCache.getDirEntries("foobar", 0, 1));
Thread.sleep(2000);
assertNull(metadataCache.getDirEntries("foobar", 0, 1));
}
@Test
public void testGetNonExistingDirEntries() throws Exception {
assertNull(metadataCache.getDirEntries("do not exist", 0, 100));
}
@Test
public void testInvalidateStat() throws Exception {
Stat aStat = getIntializedStatBuilder().build();
metadataCache.updateStat("foobar", aStat);
assertEquals(aStat, metadataCache.getStat("foobar"));
metadataCache.invalidateStat("foobar");
assertNull(metadataCache.getStat("foobar"));
}
@Test
public void testInvalidateXattrs() throws Exception {
listxattrResponse xattrs = getDummyXattrs();
metadataCache.updateXAttrs("foobar", xattrs);
assertEquals(xattrs, metadataCache.getXAttrs("foobar"));
metadataCache.invalidateXAttrs("foobar");
assertNull(metadataCache.getXAttrs("foobar"));
}
@Test
public void testInvalidateXattr() throws Exception {
metadataCache.invalidateXAttr("foobar", "where are you?");
assertNull(metadataCache.getXAttr("foobar", "where are you?").getFirst());
metadataCache.updateStat("foobar", getIntializedStatBuilder().build());
metadataCache.invalidateXAttr("foobar", "still not there?");
assertNull(metadataCache.getXAttr("foobar", "still not there?").getFirst());
XAttr attr = XAttr.newBuilder().setName("deleteme").setValue("bla").build();
listxattrResponse xattrs = getDummyXattrs().toBuilder().addXattrs(attr).build();
metadataCache.updateXAttrs("foobar", xattrs);
assertEquals("bla", metadataCache.getXAttr("foobar", "deleteme").getFirst());
metadataCache.invalidateXAttr("foobar", "deleteme");
assertNull(metadataCache.getXAttr("foobar", "deleteme").getFirst());
}
@Test
public void testUpdateXattr() throws Exception {
metadataCache.updateXAttr("foobar", "do not exist", "nothing to do");
assertNull(metadataCache.getXAttr("foobar", "do not exist").getFirst());
metadataCache.updateStat("foobar", getIntializedStatBuilder().build());
metadataCache.updateXAttr("foobar", "do not exist", "nothing to do");
assertNull(metadataCache.getXAttr("foobar", "do not exist").getFirst());
// update existing xattr
XAttr attr = XAttr.newBuilder().setName("newAttr").setValue("bla").build();
listxattrResponse xattrs = getDummyXattrs().toBuilder().addXattrs(attr).build();
metadataCache.updateXAttrs("foobar", xattrs);
assertEquals("bla", metadataCache.getXAttr("foobar", "newAttr").getFirst());
metadataCache.updateXAttr("foobar", "newAttr", "blub");
assertEquals("blub", metadataCache.getXAttr("foobar", "newAttr").getFirst());
// update non-existing xattr
metadataCache.updateXAttr("foobar", "nonExistingXattr", "bar");
assertEquals("bar", metadataCache.getXAttr("foobar", "nonExistingXattr").getFirst());
}
@Test
public void testUpdateStatTime() throws Exception {
metadataCache.updateStatTime("do not exists", 10000, Setattrs.SETATTR_ATIME.getNumber());
metadataCache.updateXAttrs("foobar", getDummyXattrs());
metadataCache.updateStatTime("foobar", 10000, Setattrs.SETATTR_ATIME.getNumber());
metadataCache.updateStat("foobar", getIntializedStatBuilder().build());
int toSet = Setattrs.SETATTR_ATIME.getNumber() | Setattrs.SETATTR_CTIME.getNumber()
| Setattrs.SETATTR_MTIME.getNumber();
metadataCache.updateStatTime("foobar", 13337, toSet);
Stat aStat = metadataCache.getStat("foobar");
assertEquals(13337l * 1000 * 1000 * 1000, aStat.getCtimeNs());
assertEquals(13337l * 1000 * 1000 * 1000, aStat.getMtimeNs());
assertEquals(13337l * 1000 * 1000 * 1000, aStat.getAtimeNs());
}
@Test
public void testUpdateAttributes() throws Exception {
metadataCache.updateStatAttributes("do not exist", getIntializedStatBuilder().build(),
Setattrs.SETATTR_GID.getNumber());
metadataCache.updateXAttrs("foobar", getDummyXattrs());
metadataCache.updateStatAttributes("foobar", getIntializedStatBuilder().build(),
Setattrs.SETATTR_GID.getNumber());
metadataCache.updateStat("foobar", getIntializedStatBuilder().setTruncateEpoch(10).build());
// Update stat with greater Truncate Epoch
Stat updateStat = getIntializedStatBuilder().setUserId("TESTUSER").setGroupId("TESTGROUP")
.setAtimeNs(13337).setCtimeNs(13337).setMtimeNs(13337).setSize(11111).setTruncateEpoch(11)
.setAttributes(1000).build();
int toSet = Setattrs.SETATTR_ATIME.getNumber() | Setattrs.SETATTR_CTIME.getNumber()
| Setattrs.SETATTR_MTIME.getNumber() | Setattrs.SETATTR_GID.getNumber()
| Setattrs.SETATTR_UID.getNumber() | Setattrs.SETATTR_SIZE.getNumber()
| Setattrs.SETATTR_ATTRIBUTES.getNumber();
metadataCache.updateStatAttributes("foobar", updateStat, toSet);
Stat getStat = metadataCache.getStat("foobar");
assertEquals(13337l, getStat.getCtimeNs());
assertEquals(13337l, getStat.getMtimeNs());
assertEquals(13337l, getStat.getAtimeNs());
assertEquals("TESTUSER", getStat.getUserId());
assertEquals("TESTGROUP", getStat.getGroupId());
assertEquals(1000, getStat.getAttributes());
assertEquals(11, getStat.getTruncateEpoch());
assertEquals(11111, getStat.getSize());
// Update stat with equal Truncate Epoch.
Stat secondStat = getIntializedStatBuilder().setTruncateEpoch(11).setSize(22222).build();
metadataCache.updateStatAttributes("foobar", secondStat, toSet);
getStat = metadataCache.getStat("foobar");
assertEquals(secondStat.getCtimeNs(), getStat.getCtimeNs());
assertEquals(secondStat.getMtimeNs(), getStat.getMtimeNs());
assertEquals(secondStat.getAtimeNs(), getStat.getAtimeNs());
assertEquals(secondStat.getUserId(), getStat.getUserId());
assertEquals(secondStat.getGroupId(), getStat.getGroupId());
assertEquals(secondStat.getAttributes(), getStat.getAttributes());
assertEquals(11, getStat.getTruncateEpoch());
assertEquals(22222, getStat.getSize());
}
@Test
public void testInvalidateDirEntry() throws Exception {
metadataCache.invalidateDirEntry("not exists", "no name");
metadataCache.updateStat("foobar", getIntializedStatBuilder().build());
metadataCache.invalidateDirEntry("foobar", "no name");
DirectoryEntries entries = getDummyDirEntries();
DirectoryEntry entry = DirectoryEntry.newBuilder().setName("dir1")
.setStbuf(getIntializedStatBuilder()).build();
DirectoryEntry entry2 = DirectoryEntry.newBuilder().setName("dir2")
.setStbuf(getIntializedStatBuilder()).build();
entries = entries.toBuilder().addEntries(entry).addEntries(entry2).build();
metadataCache.updateDirEntries("foobar", entries);
DirectoryEntries getEntries = metadataCache.getDirEntries("foobar", 0, 10);
assertEquals(entries, getEntries);
metadataCache.invalidateDirEntry("foobar", "dir2");
getEntries = metadataCache.getDirEntries("foobar", 0, 10);
assertEquals(2, getEntries.getEntriesCount());
for (DirectoryEntry dirEntry : getEntries.getEntriesList()) {
assertNotSame("dir2", dirEntry.getName());
}
}
@Test
public void testInvalidateDirEntries() throws Exception {
metadataCache.updateDirEntries("foobar", getDummyDirEntries());
assertEquals(getDummyDirEntries(), metadataCache.getDirEntries("foobar", 0, 100));
metadataCache.invalidateDirEntries("foobar");
assertNull(metadataCache.getDirEntries("foobar", 0, 100));
}
@Test
public void testGetXAttrSize() throws Exception {
metadataCache.updateXAttrs("foobar", getDummyXattrs());
metadataCache.updateXAttr("foobar", "aNewXattr", "0123456789");
Tupel<Integer, Boolean> tupel = metadataCache.getXAttrSize("foobar", "aNewXattr");
assertEquals(10, tupel.getFirst().intValue());
assertTrue(tupel.getSecond());
}
private listxattrResponse getDummyXattrs() {
return listxattrResponse.newBuilder()
.addXattrs(XAttr.newBuilder().setName("foo").setValue("bar").build()).build();
}
private DirectoryEntries getDummyDirEntries() {
DirectoryEntry entry = DirectoryEntry.newBuilder().setName("foo")
.setStbuf(getIntializedStatBuilder()).build();
DirectoryEntries entries = DirectoryEntries.newBuilder().addEntries(entry).build();
return entries;
}
}
|
kleingeist/xtreemfs
|
java/servers/test/org/xtreemfs/common/libxtreemfs/MetadataCacheTest.java
|
Java
|
bsd-3-clause
| 31,522 |
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.components.autofill_assistant;
import android.app.Activity;
import androidx.annotation.Nullable;
import androidx.annotation.VisibleForTesting;
import org.chromium.base.UserData;
import org.chromium.base.annotations.CalledByNative;
import org.chromium.base.annotations.JNINamespace;
import org.chromium.base.annotations.NativeMethods;
import org.chromium.base.supplier.Supplier;
import org.chromium.components.autofill_assistant.metrics.FeatureModuleInstallation;
import org.chromium.content_public.browser.WebContents;
import org.chromium.ui.base.WindowAndroid;
import java.util.HashMap;
import java.util.Map;
/**
* Connects to a native starter for which it acts as a platform delegate, providing the necessary
* dependencies to start autofill-assistant flows.
*/
@JNINamespace("autofill_assistant")
public class Starter implements AssistantTabObserver, UserData {
/** A supplier for the activity of the tab that this starter tracks. */
private final Supplier<Activity> mActivitySupplier;
private final AssistantStaticDependencies mStaticDependencies;
private final AssistantIsGsaFunction mIsGsaFunction;
private final AssistantIsMsbbEnabledFunction mIsMsbbEnabledFunction;
private final AssistantModuleInstallUi.Provider mModuleInstallUiProvider;
/**
* The WebContents associated with the tab which this starter is monitoring, unless detached.
*/
private @Nullable WebContents mWebContents;
/**
* The pointer to the native C++ starter. Can be 0 while waiting for the web contents to be
* available.
*/
private long mNativeStarter;
/** The dependencies required to start a flow. */
@Nullable
private AssistantDependencies mDependencies;
/** A helper to show and hide the onboarding. */
@Nullable
private AssistantOnboardingHelper mOnboardingHelper;
/**
* A field to temporarily hold a startup request's trigger context while the tab is
* being initialized.
*/
@Nullable
private TriggerContext mPendingTriggerContext;
/**
* Constructs a java-side starter.
*
* This will wait for dependencies to become available and then create the native-side starter.
* NOTE: The caller must register the Starter as a {@link AssistantTabObserver} so it can keep
* track of changes.
*/
public Starter(Supplier<Activity> activitySupplier, @Nullable WebContents webContents,
AssistantStaticDependencies staticDependencies, AssistantIsGsaFunction isGsaFunction,
AssistantIsMsbbEnabledFunction isMsbbEnabledFunction,
AssistantModuleInstallUi.Provider moduleInstallUiProvider) {
mActivitySupplier = activitySupplier;
mStaticDependencies = staticDependencies;
mIsGsaFunction = isGsaFunction;
mIsMsbbEnabledFunction = isMsbbEnabledFunction;
mModuleInstallUiProvider = moduleInstallUiProvider;
detectWebContentsChange(webContents);
}
@Override
public void destroy() {
safeNativeDetach();
}
/**
* Attempts to start a new flow for {@code triggerContext}. This will wait for the necessary
* dependencies (such as the web-contents) to be available before attempting the startup. New
* calls to this method will supersede earlier invocations, potentially cancelling the previous
* flow (as there can be only one flow maximum per tab).
*/
public void start(TriggerContext triggerContext) {
// Starter is not yet ready, we need to wait for the web-contents to be available.
if (mNativeStarter == 0) {
mPendingTriggerContext = triggerContext;
return;
}
StarterJni.get().start(mNativeStarter, Starter.this, triggerContext.getExperimentIds(),
triggerContext.getParameters().keySet().toArray(new String[0]),
triggerContext.getParameters().values().toArray(new String[0]),
triggerContext.getDeviceOnlyParameters().keySet().toArray(new String[0]),
triggerContext.getDeviceOnlyParameters().values().toArray(new String[0]),
triggerContext.getInitialUrl());
}
/**
* Should be called whenever the Tab's WebContents may have changed. Disconnects from the
* existing WebContents, if necessary, and then connects to the new WebContents.
*/
private void detectWebContentsChange(@Nullable WebContents webContents) {
if (mWebContents != webContents) {
mWebContents = webContents;
safeNativeDetach();
if (mWebContents != null) {
// Some dependencies are tied to the web-contents and need to be fetched again.
mDependencies = null;
mNativeStarter =
StarterJni.get().fromWebContents(mWebContents, mStaticDependencies);
// Note: This is intentionally split into two methods (fromWebContents, attach).
// It ensures that at the time of attach, the native pointer is already set and
// this instance is ready to serve requests from native.
StarterJni.get().attach(mNativeStarter, Starter.this);
if (mPendingTriggerContext != null) {
start(mPendingTriggerContext);
mPendingTriggerContext = null;
}
}
}
}
@Override
public void onContentChanged(@Nullable WebContents webContents) {
detectWebContentsChange(webContents);
}
@Override
public void onWebContentsSwapped(
@Nullable WebContents webContents, boolean didStartLoad, boolean didFinishLoad) {
detectWebContentsChange(webContents);
}
@Override
public void onDestroyed(@Nullable WebContents webContents) {
safeNativeDetach();
}
@Override
public void onActivityAttachmentChanged(
@Nullable WebContents webContents, @Nullable WindowAndroid window) {
detectWebContentsChange(webContents);
safeNativeOnActivityAttachmentChanged();
}
@Override
public void onInteractabilityChanged(
@Nullable WebContents webContents, boolean isInteractable) {
safeNativeOnInteractabilityChanged(isInteractable);
}
/**
* Forces native to re-evaluate the Chrome settings. Integration tests may need to call this to
* ensure that programmatic updates to the Chrome settings are received by the native starter.
*/
@VisibleForTesting
public void forceSettingsChangeNotificationForTesting() {
safeNativeOnInteractabilityChanged(true);
}
private void safeNativeDetach() {
if (mNativeStarter == 0) {
return;
}
StarterJni.get().detach(mNativeStarter, Starter.this);
mNativeStarter = 0;
}
private void safeNativeOnFeatureModuleInstalled(int result) {
if (mNativeStarter == 0) {
return;
}
StarterJni.get().onFeatureModuleInstalled(mNativeStarter, Starter.this, result);
}
private void safeNativeOnInteractabilityChanged(boolean isInteractable) {
if (mNativeStarter == 0) {
return;
}
StarterJni.get().onInteractabilityChanged(mNativeStarter, Starter.this, isInteractable);
}
private void safeNativeOnActivityAttachmentChanged() {
if (mNativeStarter == 0) {
return;
}
StarterJni.get().onActivityAttachmentChanged(mNativeStarter, Starter.this);
}
@CalledByNative
static boolean getFeatureModuleInstalled() {
return AutofillAssistantModuleEntryProvider.INSTANCE.isInstalled();
}
@CalledByNative
private void installFeatureModule(boolean showUi) {
if (getFeatureModuleInstalled()) {
safeNativeOnFeatureModuleInstalled(FeatureModuleInstallation.DFM_ALREADY_INSTALLED);
return;
}
AutofillAssistantModuleEntryProvider.INSTANCE.getModuleEntry(
(moduleEntry)
-> safeNativeOnFeatureModuleInstalled(moduleEntry != null
? FeatureModuleInstallation
.DFM_FOREGROUND_INSTALLATION_SUCCEEDED
: FeatureModuleInstallation
.DFM_FOREGROUND_INSTALLATION_FAILED),
mModuleInstallUiProvider, showUi);
}
@CalledByNative
private static boolean getIsFirstTimeUser() {
return AutofillAssistantPreferencesUtil.isAutofillAssistantFirstTimeTriggerScriptUser();
}
@CalledByNative
private static void setIsFirstTimeUser(boolean firstTimeUser) {
AutofillAssistantPreferencesUtil.setFirstTimeTriggerScriptUserPreference(firstTimeUser);
}
@CalledByNative
private static boolean getOnboardingAccepted() {
return !AutofillAssistantPreferencesUtil.getShowOnboarding();
}
@CalledByNative
private static void setOnboardingAccepted(boolean accepted) {
AutofillAssistantPreferencesUtil.setInitialPreferences(accepted);
}
@CalledByNative
private void showOnboarding(AssistantOnboardingHelper onboardingHelper,
boolean useDialogOnboarding, String experimentIds, String[] parameterKeys,
String[] parameterValues) {
if (!AutofillAssistantPreferencesUtil.getShowOnboarding()) {
safeNativeOnOnboardingFinished(
/* shown = */ false, 3 /* AssistantOnboardingResult.ACCEPTED*/);
return;
}
assert parameterKeys.length == parameterValues.length;
Map<String, String> parameters = new HashMap<>();
for (int i = 0; i < parameterKeys.length; i++) {
parameters.put(parameterKeys[i], parameterValues[i]);
}
onboardingHelper.showOnboarding(useDialogOnboarding, experimentIds, parameters,
result -> safeNativeOnOnboardingFinished(true, result));
}
@CalledByNative
private void hideOnboarding(AssistantOnboardingHelper onboardingHelper) {
onboardingHelper.hideOnboarding();
}
private void safeNativeOnOnboardingFinished(boolean shown, int result) {
if (mNativeStarter == 0) {
return;
}
StarterJni.get().onOnboardingFinished(mNativeStarter, Starter.this, shown, result);
}
@CalledByNative
static boolean getProactiveHelpSettingEnabled() {
return AutofillAssistantPreferencesUtil.isProactiveHelpOn();
}
@CalledByNative
private static void setProactiveHelpSettingEnabled(boolean enabled) {
AutofillAssistantPreferencesUtil.setProactiveHelpPreference(enabled);
}
@CalledByNative
private boolean getMakeSearchesAndBrowsingBetterSettingEnabled() {
return mIsMsbbEnabledFunction.getAsBoolean();
}
private AutofillAssistantModuleEntry getModuleOrThrow() {
if (!getFeatureModuleInstalled()) {
throw new RuntimeException(
"Failed to create dependencies: Feature module not installed");
}
return AutofillAssistantModuleEntryProvider.INSTANCE.getModuleEntryIfInstalled();
}
/**
* Returns and optionally refreshes the dependencies and the onboarding helper. Since the
* onboarding helper gets invalidated when the dependencies are invalidated we use the same
* method to refresh them.
* */
@CalledByNative
private Object[] getOrCreateDependenciesAndOnboardingHelper() {
if (mDependencies == null) {
AutofillAssistantModuleEntry module = getModuleOrThrow();
mDependencies = mStaticDependencies.createDependencies(mActivitySupplier.get());
mOnboardingHelper = module.createOnboardingHelper(mWebContents, mDependencies);
}
return new Object[] {mDependencies, mOnboardingHelper};
}
@CalledByNative
private boolean getIsTabCreatedByGSA() {
return mIsGsaFunction.apply(mActivitySupplier.get());
}
@NativeMethods
interface Natives {
long fromWebContents(
WebContents webContents, AssistantStaticDependencies staticDependencies);
void attach(long nativeStarterDelegateAndroid, Starter caller);
void detach(long nativeStarterDelegateAndroid, Starter caller);
void onFeatureModuleInstalled(
long nativeStarterDelegateAndroid, Starter caller, int result);
void onOnboardingFinished(
long nativeStarterDelegateAndroid, Starter caller, boolean shown, int result);
void onInteractabilityChanged(
long nativeStarterDelegateAndroid, Starter caller, boolean isInteractable);
void onActivityAttachmentChanged(long nativeStarterDelegateAndroid, Starter caller);
void start(long nativeStarterDelegateAndroid, Starter caller, String experimentIds,
String[] parameterNames, String[] parameterValues,
String[] deviceOnlyParameterNames, String[] deviceOnlyParameterValues,
String initialUrl);
}
}
|
chromium/chromium
|
components/autofill_assistant/android/public/java/src/org/chromium/components/autofill_assistant/Starter.java
|
Java
|
bsd-3-clause
| 13,350 |
package org.activiti.web.simple.webapp.model.example;
import java.util.Date;
import org.springframework.format.annotation.DateTimeFormat;
/**
* 表单实体
* @author Administrator
*
*/
public class User {
private String firstName;
private String lastName;
@DateTimeFormat(pattern="yyyy-MM-dd HH:mm")
private Date birthday;
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public Date getBirthday() {
return birthday;
}
public void setBirthday(Date birthday) {
this.birthday = birthday;
}
}
|
feiyue/maven-framework-project
|
workflow-activiti-example/spring-activiti-webapp/src/main/java/org/activiti/web/simple/webapp/model/example/User.java
|
Java
|
mit
| 752 |
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.data;
import com.google.common.base.Optional;
import org.spongepowered.api.service.persistence.DataBuilder;
import org.spongepowered.api.service.persistence.InvalidDataException;
import java.util.List;
class SimpleDataBuilder implements DataBuilder<SimpleData> {
@Override
public Optional<SimpleData> build(final DataView container) {
Optional<Integer> testInt = container.getInt(new DataQuery("myInt"));
if (!testInt.isPresent()) {
throw new InvalidDataException("Missing important data: {myInt}");
}
Optional<Double> testDouble = container.getDouble(new DataQuery("myDouble"));
if (!testDouble.isPresent()) {
throw new InvalidDataException("Missing important data: {myDouble}");
}
Optional<String> testString = container.getString(new DataQuery("myString"));
if (!testString.isPresent()) {
throw new InvalidDataException("Missing important data: {myString}");
}
Optional<List<String>> testList = container.getStringList(new DataQuery("myStringList"));
if (!testList.isPresent()) {
throw new InvalidDataException("Missing important data: {myStringList}");
}
return Optional.of(new SimpleData(testInt.get(),
testDouble.get(),
testString.get(),
testList.get()));
}
}
|
joshgarde/SpongeAPI
|
src/test/java/org/spongepowered/api/data/SimpleDataBuilder.java
|
Java
|
mit
| 2,779 |
/*
* GeoServer-Manager - Simple Manager Library for GeoServer
*
* Copyright (C) 2007,2011 GeoSolutions S.A.S.
* http://www.geo-solutions.it
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package it.geosolutions.geoserver.rest.decoder;
import it.geosolutions.geoserver.rest.decoder.utils.JDOMBuilder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.jdom.Element;
/**
* This decode turns index format for a GeoServer StructuredGridCoverageReader into something
* useful, giving access to the definition of the single attributes.
*
* <P>This is the XML REST representation:
* <PRE>
{@code
<Schema>
<attributes>
<Attribute>
<name>the_geom</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>com.vividsolutions.jts.geom.Polygon</binding>
</Attribute>
<Attribute>
<name>location</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.lang.String</binding>
</Attribute>
<Attribute>
<name>imageindex</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.lang.Integer</binding>
</Attribute>
<Attribute>
<name>time</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.sql.Timestamp</binding>
</Attribute>
<Attribute>
<name>elevation</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.lang.Double</binding>
</Attribute>
<Attribute>
<name>fileDate</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.sql.Timestamp</binding>
</Attribute>
<Attribute>
<name>updated</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>java.sql.Timestamp</binding>
</Attribute>
</attributes>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" rel="alternate" href="http://localhost:8080/geoserver/rest/workspaces/it.geosolutions/coveragestores/polyphemus/coverages/V/index/granules.xml" type="application/xml"/>
</Schema>
}</PRE>
* @author Simone Giannecchini, GeoSolutions SAS
*
*/
public class RESTStructuredCoverageIndexSchema implements Iterable<RESTStructuredCoverageIndexSchema.RESTStructuredCoverageIndexAttribute> {
private final List<RESTStructuredCoverageIndexAttribute> attributeList;
/**
* @param list
*/
@SuppressWarnings("unchecked")
protected RESTStructuredCoverageIndexSchema(Element schema) {
// check ordering of elements
if(!schema.getName().equals("Schema")){
throw new IllegalStateException("Root element should be Schema");
}
Element attributes = schema.getChild("attributes");
if(attributes==null){
throw new IllegalStateException("Root element should be Schema");
}
List<RESTStructuredCoverageIndexAttribute> tmpList = new ArrayList<RESTStructuredCoverageIndexAttribute>();
for(Element el : (List<Element>)attributes.getChildren()){
tmpList.add(new RESTStructuredCoverageIndexAttribute(el));
}
attributeList = Collections.unmodifiableList(tmpList);
}
public static RESTStructuredCoverageIndexSchema build(String response) {
if(response == null)
return null;
Element pb = JDOMBuilder.buildElement(response);
if(pb != null){
return new RESTStructuredCoverageIndexSchema(pb);
} else {
return null;
}
}
public int size() {
return attributeList.size();
}
public boolean isEmpty() {
return attributeList.isEmpty();
}
public RESTStructuredCoverageIndexAttribute get(int index) {
return attributeList.get(index);
}
/* (non-Javadoc)
* @see java.lang.Iterable#iterator()
*/
@Override
public Iterator<RESTStructuredCoverageIndexAttribute> iterator() {
return attributeList.iterator();
}
/**
* Generic granule of the index.
*
* <P>This is the XML REST representation:
* <PRE>
{@code
<Attribute>
<name>the_geom</name>
<minOccurs>0</minOccurs>
<maxOccurs>1</maxOccurs>
<nillable>true</nillable>
<binding>com.vividsolutions.jts.geom.Polygon</binding>
</Attribute>
* @author Simone Giannecchini, GeoSolutions SAS
*
*/
public static class RESTStructuredCoverageIndexAttribute {
protected final Element attribute;
public RESTStructuredCoverageIndexAttribute(Element elem) {
this.attribute = elem;
}
public String getName() {
return attribute.getChildTextTrim("name");
}
public String getMinOccurs() {
return attribute.getChildTextTrim("minOccurs");
}
public String getMaxOccurs() {
return attribute.getChildTextTrim("maxOccurs");
}
public String getNillable() {
return attribute.getChildTextTrim("nillable");
}
public String getBinding() {
return attribute.getChildTextTrim("binding");
}
@Override
public String toString() {
return "RESTStructuredCoverageGranule [getName()=" + getName()
+ ", getMinOccurs()=" + getMinOccurs() + ", getMaxOccurs()=" + getMaxOccurs()
+ ", getNillable()=" + getNillable() + ", getBinding()=" + getBinding() + "]";
}
}
}
|
oscarfonts/geoserver-manager
|
src/main/java/it/geosolutions/geoserver/rest/decoder/RESTStructuredCoverageIndexSchema.java
|
Java
|
mit
| 6,869 |
/*******************************************************************************
* Copyright (c) 2000, 2008 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.jdt.internal.corext.refactoring.nls.changes;
import java.io.IOException;
import java.io.InputStream;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.resources.IFile;
import org.eclipse.jdt.core.IJavaModelStatusConstants;
import org.eclipse.jdt.core.JavaModelException;
import org.eclipse.jdt.internal.corext.refactoring.nls.NLSUtil;
public class CreateTextFileChange extends CreateFileChange {
private final String fTextType;
public CreateTextFileChange(IPath path, String source, String encoding, String textType) {
super(path, source, encoding);
fTextType= textType;
}
public String getTextType() {
return fTextType;
}
public String getCurrentContent() throws JavaModelException {
IFile file= getOldFile(new NullProgressMonitor());
if (! file.exists())
return ""; //$NON-NLS-1$
InputStream stream= null;
try{
stream= file.getContents();
String encoding= file.getCharset();
String c= NLSUtil.readString(stream, encoding);
return (c == null) ? "": c; //$NON-NLS-1$
} catch (CoreException e){
throw new JavaModelException(e, IJavaModelStatusConstants.CORE_EXCEPTION);
} finally {
try {
if (stream != null)
stream.close();
} catch (IOException x) {
}
}
}
public String getPreview() {
return getSource();
}
}
|
brunyuriy/quick-fix-scout
|
org.eclipse.jdt.ui_3.7.1.r371_v20110824-0800/src/org/eclipse/jdt/internal/corext/refactoring/nls/changes/CreateTextFileChange.java
|
Java
|
mit
| 1,961 |
// MESSAGE LOG_REQUEST_DATA PACKING
package com.MAVLink.common;
import com.MAVLink.MAVLinkPacket;
import com.MAVLink.Messages.MAVLinkMessage;
import com.MAVLink.Messages.MAVLinkPayload;
//import android.util.Log;
/**
* Request a chunk of a log
*/
public class msg_log_request_data extends MAVLinkMessage{
public static final int MAVLINK_MSG_ID_LOG_REQUEST_DATA = 119;
public static final int MAVLINK_MSG_LENGTH = 12;
private static final long serialVersionUID = MAVLINK_MSG_ID_LOG_REQUEST_DATA;
/**
* Offset into the log
*/
public int ofs;
/**
* Number of bytes
*/
public int count;
/**
* Log id (from LOG_ENTRY reply)
*/
public short id;
/**
* System ID
*/
public byte target_system;
/**
* Component ID
*/
public byte target_component;
/**
* Generates the payload for a mavlink message for a message of this type
* @return
*/
public MAVLinkPacket pack(){
MAVLinkPacket packet = new MAVLinkPacket();
packet.len = MAVLINK_MSG_LENGTH;
packet.sysid = 255;
packet.compid = 190;
packet.msgid = MAVLINK_MSG_ID_LOG_REQUEST_DATA;
packet.payload.putInt(ofs);
packet.payload.putInt(count);
packet.payload.putShort(id);
packet.payload.putByte(target_system);
packet.payload.putByte(target_component);
return packet;
}
/**
* Decode a log_request_data message into this class fields
*
* @param payload The message to decode
*/
public void unpack(MAVLinkPayload payload) {
payload.resetIndex();
this.ofs = payload.getInt();
this.count = payload.getInt();
this.id = payload.getShort();
this.target_system = payload.getByte();
this.target_component = payload.getByte();
}
/**
* Constructor for a new message, just initializes the msgid
*/
public msg_log_request_data(){
msgid = MAVLINK_MSG_ID_LOG_REQUEST_DATA;
}
/**
* Constructor for a new message, initializes the message with the payload
* from a mavlink packet
*
*/
public msg_log_request_data(MAVLinkPacket mavLinkPacket){
this.sysid = mavLinkPacket.sysid;
this.compid = mavLinkPacket.compid;
this.msgid = MAVLINK_MSG_ID_LOG_REQUEST_DATA;
unpack(mavLinkPacket.payload);
//Log.d("MAVLink", "LOG_REQUEST_DATA");
//Log.d("MAVLINK_MSG_ID_LOG_REQUEST_DATA", toString());
}
/**
* Returns a string with the MSG name and data
*/
public String toString(){
return "MAVLINK_MSG_ID_LOG_REQUEST_DATA -"+" ofs:"+ofs+" count:"+count+" id:"+id+" target_system:"+target_system+" target_component:"+target_component+"";
}
}
|
Yndal/ArduPilot-SensorPlatform
|
Tower_with_3drservices/dependencyLibs/Mavlink/src/com/MAVLink/common/msg_log_request_data.java
|
Java
|
mit
| 3,163 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* ChangeDateFormat.java
* Copyright (C) 2004 University of Waikato, Hamilton, New Zealand
*
*/
package weka.filters.unsupervised.attribute;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SingleIndex;
import weka.core.UnsupportedAttributeTypeException;
import weka.core.Utils;
import weka.core.Capabilities.Capability;
import weka.filters.Filter;
import weka.filters.StreamableFilter;
import weka.filters.UnsupervisedFilter;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Enumeration;
import java.util.Vector;
/**
<!-- globalinfo-start -->
* Changes the date format used by a date attribute. This is most useful for converting to a format with less precision, for example, from an absolute date to day of year, etc. This changes the format string, and changes the date values to those that would be parsed by the new format.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -C <col>
* Sets the attribute index (default last).</pre>
*
* <pre> -F <value index>
* Sets the output date format string (default corresponds to ISO-8601).</pre>
*
<!-- options-end -->
*
* @author <a href="mailto:len@reeltwo.com">Len Trigg</a>
* @version $Revision: 5543 $
*/
public class ChangeDateFormat
extends Filter
implements UnsupervisedFilter, StreamableFilter, OptionHandler {
/** for serialization */
static final long serialVersionUID = -1609344074013448737L;
/** The default output date format. Corresponds to ISO-8601 format. */
private static final SimpleDateFormat DEFAULT_FORMAT = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
/** The attribute's index setting. */
private SingleIndex m_AttIndex = new SingleIndex("last");
/** The output date format. */
private SimpleDateFormat m_DateFormat = DEFAULT_FORMAT;
/** The output attribute. */
private Attribute m_OutputAttribute;
/**
* Returns a string describing this filter
*
* @return a description of the filter suitable for
* displaying in the explorer/experimenter gui
*/
public String globalInfo() {
return
"Changes the date format used by a date attribute. This is most "
+ "useful for converting to a format with less precision, for example, "
+ "from an absolute date to day of year, etc. This changes the format "
+ "string, and changes the date values to those that would be parsed "
+ "by the new format.";
}
/**
* Returns the Capabilities of this filter.
*
* @return the capabilities of this object
* @see Capabilities
*/
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enableAllAttributes();
result.enable(Capability.MISSING_VALUES);
// class
result.enableAllClasses();
result.enable(Capability.MISSING_CLASS_VALUES);
result.enable(Capability.NO_CLASS);
return result;
}
/**
* Sets the format of the input instances.
*
* @param instanceInfo an Instances object containing the input
* instance structure (any instances contained in the object are
* ignored - only the structure is required).
* @return true if the outputFormat may be collected immediately
* @throws Exception if the input format can't be set
* successfully
*/
public boolean setInputFormat(Instances instanceInfo)
throws Exception {
super.setInputFormat(instanceInfo);
m_AttIndex.setUpper(instanceInfo.numAttributes() - 1);
if (!instanceInfo.attribute(m_AttIndex.getIndex()).isDate()) {
throw new UnsupportedAttributeTypeException("Chosen attribute not date.");
}
setOutputFormat();
return true;
}
/**
* Input an instance for filtering.
*
* @param instance the input instance
* @return true if the filtered instance may now be
* collected with output().
* @throws Exception if the input format was not set or the date format cannot
* be parsed
*/
public boolean input(Instance instance) throws Exception {
if (getInputFormat() == null) {
throw new IllegalStateException("No input instance format defined");
}
if (m_NewBatch) {
resetQueue();
m_NewBatch = false;
}
Instance newInstance = (Instance)instance.copy();
int index = m_AttIndex.getIndex();
if (!newInstance.isMissing(index)) {
double value = instance.value(index);
try {
// Format and parse under the new format to force any required
// loss in precision.
value = m_OutputAttribute.parseDate(m_OutputAttribute.formatDate(value));
} catch (ParseException pe) {
throw new RuntimeException("Output date format couldn't parse its own output!!");
}
newInstance.setValue(index, value);
}
push(newInstance);
return true;
}
/**
* Returns an enumeration describing the available options
*
* @return an enumeration of all the available options
*/
public Enumeration listOptions() {
Vector newVector = new Vector(2);
newVector.addElement(new Option(
"\tSets the attribute index (default last).",
"C", 1, "-C <col>"));
newVector.addElement(new Option(
"\tSets the output date format string (default corresponds to ISO-8601).",
"F", 1, "-F <value index>"));
return newVector.elements();
}
/**
* Parses a given list of options. <p/>
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -C <col>
* Sets the attribute index (default last).</pre>
*
* <pre> -F <value index>
* Sets the output date format string (default corresponds to ISO-8601).</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
String attIndex = Utils.getOption('C', options);
if (attIndex.length() != 0) {
setAttributeIndex(attIndex);
} else {
setAttributeIndex("last");
}
String formatString = Utils.getOption('F', options);
if (formatString.length() != 0) {
setDateFormat(formatString);
} else {
setDateFormat(DEFAULT_FORMAT);
}
if (getInputFormat() != null) {
setInputFormat(getInputFormat());
}
}
/**
* Gets the current settings of the filter.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
String [] options = new String [4];
int current = 0;
options[current++] = "-C";
options[current++] = "" + getAttributeIndex();
options[current++] = "-F";
options[current++] = "" + getDateFormat().toPattern();
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String attributeIndexTipText() {
return "Sets which attribute to process. This "
+ "attribute must be of type date (\"first\" and \"last\" are valid values)";
}
/**
* Gets the index of the attribute converted.
*
* @return the index of the attribute
*/
public String getAttributeIndex() {
return m_AttIndex.getSingleIndex();
}
/**
* Sets the index of the attribute used.
*
* @param attIndex the index of the attribute
*/
public void setAttributeIndex(String attIndex) {
m_AttIndex.setSingleIndex(attIndex);
}
/**
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String dateFormatTipText() {
return "The date format to change to. This should be a "
+ "format understood by Java's SimpleDateFormat class.";
}
/**
* Get the date format used in output.
*
* @return the output date format.
*/
public SimpleDateFormat getDateFormat() {
return m_DateFormat;
}
/**
* Sets the output date format.
*
* @param dateFormat the output date format.
*/
public void setDateFormat(String dateFormat) {
setDateFormat(new SimpleDateFormat(dateFormat));
}
/**
* Sets the output date format.
*
* @param dateFormat the output date format.
*/
public void setDateFormat(SimpleDateFormat dateFormat) {
if (dateFormat == null) {
throw new NullPointerException();
}
m_DateFormat = dateFormat;
}
/**
* Set the output format. Changes the format of the specified date
* attribute.
*/
private void setOutputFormat() {
// Create new attributes
FastVector newAtts = new FastVector(getInputFormat().numAttributes());
for (int j = 0; j < getInputFormat().numAttributes(); j++) {
Attribute att = getInputFormat().attribute(j);
if (j == m_AttIndex.getIndex()) {
newAtts.addElement(new Attribute(att.name(), getDateFormat().toPattern()));
} else {
newAtts.addElement(att.copy());
}
}
// Create new header
Instances newData = new Instances(getInputFormat().relationName(), newAtts, 0);
newData.setClassIndex(getInputFormat().classIndex());
m_OutputAttribute = newData.attribute(m_AttIndex.getIndex());
setOutputFormat(newData);
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision: 5543 $");
}
/**
* Main method for testing this class.
*
* @param argv should contain arguments to the filter:
* use -h for help
*/
public static void main(String [] argv) {
runFilter(new ChangeDateFormat(), argv);
}
}
|
williamClanton/singularity
|
weka/src/main/java/weka/filters/unsupervised/attribute/ChangeDateFormat.java
|
Java
|
mit
| 10,765 |
/*******************************************************************************
* Copyright (c) 2011, 2015 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Oracle - initial API and implementation
*
******************************************************************************/
package org.eclipse.persistence.jpa.tests.jpql.tools;
import org.eclipse.persistence.jpa.jpql.tools.DefaultRefactoringTool;
import org.eclipse.persistence.jpa.jpql.tools.RefactoringTool;
import org.eclipse.persistence.jpa.tests.jpql.UniqueSignature;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* The abstract definition of a unit-test that tests {@link org.eclipse.persistence.jpa.jpql.
* RefactoringTool RefactoringTool} when the JPA version is 1.0.
*
* @version 2.5
* @since 2.4
* @author Pascal Filion
*/
@UniqueSignature
@SuppressWarnings("nls")
public final class RefactoringToolTest1_0 extends AbstractRefactoringToolTest {
private RefactoringTool buildRefactoringTool(String jpqlQuery) throws Exception {
return new DefaultRefactoringTool(getPersistenceUnit(), getJPQLQueryBuilder(), jpqlQuery);
}
@Test
public void test_RenameClassName_1() throws Exception {
String jpqlQuery = "SELECT NEW java.util.Vector(a.employees) FROM Address A";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("java.util.Vector", "java.util.ArrayList");
String expected = "SELECT NEW java.util.ArrayList(a.employees) FROM Address A";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_2() throws Exception {
String jpqlQuery = "SELECT NEW org.eclipse.persistence.Collection.Vector(a.employees) FROM Address A";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("org.eclipse.persistence.Collection", "org.eclipse.persistence.Type");
String expected = "SELECT NEW org.eclipse.persistence.Type.Vector(a.employees) FROM Address A";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_3() throws Exception {
String jpqlQuery = "SELECT NEW org.eclipse.persistence.Vector(a.employees) FROM Address A";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("org.eclipse.persistence.AbstractSession", "org.eclipse.persistence.session.Session");
assertEquals(jpqlQuery, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_4() throws Exception {
String jpqlQuery = "SELECT p FROM Product p WHERE p.enumType = jpql.query.EnumType.FIRST_NAME";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("jpql.query.EnumType", "org.eclipse.persistence.Type");
String expected = "SELECT p FROM Product p WHERE p.enumType = org.eclipse.persistence.Type.FIRST_NAME";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_5() throws Exception {
String jpqlQuery = "SELECT p FROM Product p WHERE p.enumType = jpql.query.EnumType.";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("jpql.query.EnumType", "org.eclipse.persistence.Type");
String expected = "SELECT p FROM Product p WHERE p.enumType = org.eclipse.persistence.Type.";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_6() throws Exception {
String jpqlQuery = "SELECT p FROM Product p WHERE p.enumType = jpql.query.Employee.EnumType.FIRST_NAME";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("jpql.query.Employee", "org.eclipse.persistence.Type");
String expected = "SELECT p FROM Product p WHERE p.enumType = org.eclipse.persistence.Type.EnumType.FIRST_NAME";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameClassName_7() throws Exception {
String jpqlQuery = "SELECT p FROM Product p WHERE p.enumType = jpql.query.EnumType.FIRST_NAME";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameClassName("jpql.query.Employee", "org.eclipse.persistence.Type");
assertEquals(jpqlQuery, refactoringTool.toActualText());
}
@Test
public void test_RenameEntityName_1() throws Exception {
String jpqlQuery = "SELECT a FROM Address A";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameEntityName("Address", "Employee");
String expected = "SELECT a FROM Employee A";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameEntityName_2() throws Exception {
String jpqlQuery = "SELECT a FROM Address A WHERE EXISTS(SELECT e FROM Employee e)";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameEntityName("Employee", "Manager");
String expected = "SELECT a FROM Address A WHERE EXISTS(SELECT e FROM Manager e)";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameEnumConstant_1() throws Exception {
String jpqlQuery = "UPDATE Employee SET name = javax.persistence.AccessType.FIELD";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameEnumConstant("javax.persistence.AccessType.FIELD", "javax.persistence.AccessType.PROPERTY");
String expected = "UPDATE Employee SET name = javax.persistence.AccessType.PROPERTY";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameEnumConstant_2() throws Exception {
String jpqlQuery = "UPDATE Employee e SET e.name = e.lName";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameEnumConstant("javax.persistence.AccessType.FIELD", "javax.persistence.AccessType.PROPERTY");
assertEquals(jpqlQuery, refactoringTool.toActualText());
}
@Test
public void test_RenameFieldName_1() throws Exception {
String jpqlQuery = "SELECT e.address.zip FROM Employee e";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameField("jpql.query.Employee", "address", "addr");
String expected = "SELECT e.addr.zip FROM Employee e";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameFieldName_2() throws Exception {
String jpqlQuery = "SELECT e.address.zipcode FROM Employee e";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameField("jpql.query.Address", "zip", "zipcode");
assertEquals(jpqlQuery, refactoringTool.toActualText());
}
@Test
public void test_RenameVariable_1() throws Exception {
String jpqlQuery = "SELECT a FROM Address A";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameVariable("a", "addr");
String expected = "SELECT addr FROM Address addr";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameVariable_2() throws Exception {
String jpqlQuery = "SELECT a FROM Address a JOIN a.employees ad";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameVariable("a", "addr");
String expected = "SELECT addr FROM Address addr JOIN addr.employees ad";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameVariable_3() throws Exception {
String jpqlQuery = "SELECT a FROM Address a WHERE EXISTS(SELECT e FROM a.employee)";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameVariable("a", "addr");
String expected = "SELECT addr FROM Address addr WHERE EXISTS(SELECT e FROM addr.employee)";
assertEquals(expected, refactoringTool.toActualText());
}
@Test
public void test_RenameVariable_4() throws Exception {
String jpqlQuery = "SELECT FROM Address";
RefactoringTool refactoringTool = buildRefactoringTool(jpqlQuery);
refactoringTool.renameVariable("a", "addr");
assertEquals(jpqlQuery, refactoringTool.toActualText());
}
}
|
RallySoftware/eclipselink.runtime
|
jpa/org.eclipse.persistence.jpa.jpql.test/src/org/eclipse/persistence/jpa/tests/jpql/tools/RefactoringToolTest1_0.java
|
Java
|
epl-1.0
| 9,223 |
/*******************************************************************************
* Copyright (c) 2000, 2009 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.jdt.internal.compiler;
/**
* A callback interface for receiving compilation results.
*/
public interface ICompilerRequestor {
/**
* Accept a compilation result.
*/
public void acceptResult(CompilationResult result);
}
|
elucash/eclipse-oxygen
|
org.eclipse.jdt.core/src/org/eclipse/jdt/internal/compiler/ICompilerRequestor.java
|
Java
|
epl-1.0
| 789 |
/*******************************************************************************
* Copyright (c) 2011, 2015 Oracle. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
*
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* - rbarkhouse - 04 October 2011 - 2.4 - Initial implementation
******************************************************************************/
package org.eclipse.persistence.testing.jaxb.sun.xmllocation;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import org.eclipse.persistence.jaxb.JAXBContextFactory;
import junit.framework.TestCase;
public class XmlLocationErrorTestCases extends TestCase {
public void testInvalidXmlLocationType() {
Exception caughtEx = null;
try {
JAXBContext c = JAXBContextFactory.createContext(new Class[] { DataError.class }, null);
} catch (Exception e) {
caughtEx = e;
}
assertNotNull("Exception was not thrown as expected.", caughtEx);
assertTrue("Unexpected exception type was thrown.", caughtEx instanceof JAXBException);
JAXBException jEx = (JAXBException) caughtEx;
assertTrue("Unexpected exception type was thrown.", jEx.getLinkedException() instanceof org.eclipse.persistence.exceptions.JAXBException);
org.eclipse.persistence.exceptions.JAXBException elEx = (org.eclipse.persistence.exceptions.JAXBException) jEx.getLinkedException();
assertEquals("", org.eclipse.persistence.exceptions.JAXBException.INVALID_XMLLOCATION, elEx.getErrorCode());
}
}
|
RallySoftware/eclipselink.runtime
|
moxy/eclipselink.moxy.test/src/org/eclipse/persistence/testing/jaxb/sun/xmllocation/XmlLocationErrorTestCases.java
|
Java
|
epl-1.0
| 1,898 |
package info.novatec.testit.livingdoc.samples.application.mortgage;
public final class MortgageBalanceCalculator {
public static final float MAX_NOTARY_FEE = 1200;
public static final float MIN_CASH_DOWN_RATIO = 0.25f;
public static final float MIN_CASH_DOWN_FEE_RATIO = 0.025f;
public static final float MAX_MORTGAGE_RATIO = 0.75f;
private MortgageBalanceCalculator() {
}
public static boolean isInsufficient(float morgageAllowance, float cashDown) {
return ( cashDown / morgageAllowance ) < MIN_CASH_DOWN_RATIO;
}
public static float evaluate(float mortgageAllowance, float cashDown) {
float mortgageBalance = mortgageAllowance - cashDown;
if (isInsufficient(mortgageAllowance, cashDown)) {
mortgageBalance = mortgageBalance * ( 1 + MIN_CASH_DOWN_FEE_RATIO );
}
return mortgageBalance;
}
public static float reevaluateBankPayYourBillsOption(float mortgageAllowance, float notaryFees, float bienvenueTax) {
return mortgageAllowance + getPayedNotaryFee(notaryFees) + bienvenueTax;
}
public static float getMortgageAllowance(float commercialValue, float purchasedPrice) {
return Math.min(commercialValue * MAX_MORTGAGE_RATIO, purchasedPrice);
}
public static float getPayedNotaryFee(float bigNotaryFee) {
return Math.min(MAX_NOTARY_FEE, bigNotaryFee);
}
}
|
mruiz84/livingdoc-core
|
livingdoc-samples/src/main/java/info/novatec/testit/livingdoc/samples/application/mortgage/MortgageBalanceCalculator.java
|
Java
|
gpl-3.0
| 1,403 |
package fr.xephi.authme.data.auth;
import org.junit.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.fail;
/**
* Test for {@link PlayerAuth} and its builder.
*/
public class PlayerAuthTest {
@Test
public void shouldRemoveDatabaseDefaults() {
// given / when
PlayerAuth auth = PlayerAuth.builder()
.name("Bobby")
.lastLogin(0L)
.lastIp("127.0.0.1")
.email("your@email.com")
.build();
// then
assertThat(auth.getNickname(), equalTo("bobby"));
assertThat(auth.getLastLogin(), nullValue());
// Note ljacqu 20171020: Although 127.0.0.1 is the default value, we need to keep it because it might
// legitimately be the resolved IP of a player
assertThat(auth.getLastIp(), equalTo("127.0.0.1"));
assertThat(auth.getEmail(), nullValue());
}
@Test
public void shouldThrowForMissingName() {
try {
// given / when
PlayerAuth.builder()
.email("test@example.org")
.groupId(3)
.build();
// then
fail("Expected exception to be thrown");
} catch (NullPointerException e) {
// all good
}
}
@Test
public void shouldCreatePlayerAuthWithNullValues() {
// given / when
PlayerAuth auth = PlayerAuth.builder()
.name("Charlie")
.email(null)
.lastLogin(null)
.lastIp(null)
.groupId(19)
.locPitch(123.004f)
.build();
// then
assertThat(auth.getEmail(), nullValue());
assertThat(auth.getLastLogin(), nullValue());
assertThat(auth.getLastIp(), nullValue());
assertThat(auth.getGroupId(), equalTo(19));
assertThat(auth.getPitch(), equalTo(123.004f));
}
}
|
Xephi/AuthMeReloaded
|
src/test/java/fr/xephi/authme/data/auth/PlayerAuthTest.java
|
Java
|
gpl-3.0
| 2,017 |
package eu.excitementproject.eop.common.representation.partofspeech;
import java.util.HashSet;
import java.util.Set;
import eu.excitementproject.eop.common.datastructures.SimpleValueSetMap;
import eu.excitementproject.eop.common.datastructures.immutable.ImmutableSet;
import eu.excitementproject.eop.common.utilities.Utils;
/**
* Class that sets the Part of Speech tags for the DKPro tagset
* Maps each DKPro tag onto the corresponding
* {@link CanonicalPosTag}.
*
* @author Roberto Zanoli
*
*/
public class DKProPartOfSpeech extends PartOfSpeech
{
private static final long serialVersionUID = 6238674224249428653L;
/**
* All DKPro pos tags
*/
public enum DKProPosTag {
ADJ, ADV, CONJ, PR, PP, O, CARD, ART, NN, NP, PUNC, V, POS
};
public static final Set<String> PUNCTUATION;
public static final Set<String> SYMBOLS;
static
{
PUNCTUATION = Utils.arrayToCollection(
new String[]{},
new HashSet<String>());
SYMBOLS = Utils.arrayToCollection(new String[]{"#", "$"}, new HashSet<String>());
}
/**
* a bidirectional map between canonical POSs and their corresponding DKPro POSs
*/
private static final SimpleValueSetMap<CanonicalPosTag, DKProPosTag> DKPRO_TO_CANONICAL_MAP = new SimpleValueSetMap<CanonicalPosTag, DKProPosTag>();
static
{
// map between all canonical POSs and DKPro POSs
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.V, DKProPosTag.V);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.N, DKProPosTag.NN);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.N, DKProPosTag.NP);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.ADJ, DKProPosTag.ADJ);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.ART, DKProPosTag.ART);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.PP, DKProPosTag.PR);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.PP, DKProPosTag.PP);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.ADV, DKProPosTag.ADV);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.PP, DKProPosTag.CONJ);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.PUNC, DKProPosTag.PUNC);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.OTHER, DKProPosTag.CARD);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.OTHER, DKProPosTag.O);
DKPRO_TO_CANONICAL_MAP.put(CanonicalPosTag.OTHER, DKProPosTag.POS);
}
// -----------------------------------------------------------------
/**
* A constructor receiving a string and converts it to DKPro pos tag.
* When possible, use the constructor that accepts <code>DKProPosTag</code> to identify errors at compilation time
* @param posTagString - a string that conforms with the DKPro pos tag set
* @throws UnsupportedPosTagString - in case a non valid string is sent
*/
public DKProPartOfSpeech(String posTagString) throws UnsupportedPosTagStringException {
super(posTagString);
}
/**
* A constructor receiving a DKPro pos tag. This is a safer version than the one receiving string. When possible use this one.
*
* @param dkproPos - one of the values of the DKPro tag set
* @throws UnsupportedPosTagString
*/
public DKProPartOfSpeech(DKProPosTag dkproPos) throws UnsupportedPosTagStringException {
super(dkproPos.name());
this.posTagString = dkproPos.name().toUpperCase();
}
@Override
public PartOfSpeech createNewPartOfSpeech(String posTagString) throws UnsupportedPosTagStringException
{
DKProPartOfSpeech ret = new DKProPartOfSpeech(posTagString);
return ret;
}
/**
* get the canonical Pos corresponding to the given DKPro pos
*
* @param dkproPos
* @return
*/
public static CanonicalPosTag dkproPosToCannonical(DKProPosTag dkproPos)
{
return DKPRO_TO_CANONICAL_MAP.getKeysOf(dkproPos).iterator().next();
}
/**
* get the set of DKPro POSs corresponding to the given canonical Pos
*
* @param canonPos
* @return
*/
public static ImmutableSet<DKProPosTag> canonicalPosToDKPro(CanonicalPosTag canonPos)
{
return DKPRO_TO_CANONICAL_MAP.get(canonPos);
}
/* (non-Javadoc)
* @see ac.biu.nlp.nlp.representation.PartOfSpeech#setCanonicalPosTag()
*/
@Override
protected void setCanonicalPosTag()
{
setDKProPos(posTagString);
if (null==_dkproPos)
this.canonicalPosTag = CanonicalPosTag.OTHER;
else
{
// look it up in the DKPro to Canonicals map
this.canonicalPosTag = DKPRO_TO_CANONICAL_MAP.containsValue(_dkproPos) ? DKPRO_TO_CANONICAL_MAP.getKeysOf(_dkproPos).iterator().next()
: CanonicalPosTag.OTHER; // default
}
}
@Override
protected void validatePosTagString(String posTagString) throws UnsupportedPosTagStringException
{
setDKProPos(posTagString);
if (_dkproPos==null)
throw new UnsupportedPosTagStringException("pos tag '" + posTagString + "' is unsupported DKPro pos tag");
}
protected void setDKProPos(String posTagString)
{
if (_dkproPos==null)
{
_dkproPos = null;
if (PUNCTUATION.contains(posTagString)){
_dkproPos = DKProPosTag.PUNC;
}
//else if(SYMBOLS.contains(posTagString)){
//_dkproPos = DKProPosTag.SYM1;
//}
else{
try
{
_dkproPos = DKProPosTag.valueOf(posTagString);
} catch (IllegalArgumentException e)
{
_dkproPos = null;
}
}
}
}
private DKProPosTag _dkproPos = null;
}
|
madhumita-dfki/Excitement-Open-Platform
|
common/src/main/java/eu/excitementproject/eop/common/representation/partofspeech/DKProPartOfSpeech.java
|
Java
|
gpl-3.0
| 5,179 |
//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.clinical.vo.beans;
public class ClinicalCorrespondenceVoBean extends ims.vo.ValueObjectBean
{
public ClinicalCorrespondenceVoBean()
{
}
public ClinicalCorrespondenceVoBean(ims.clinical.vo.ClinicalCorrespondenceVo vo)
{
this.id = vo.getBoId();
this.version = vo.getBoVersion();
this.clinicalcontact = vo.getClinicalContact() == null ? null : (ims.core.vo.beans.ClinicalContactShortVoBean)vo.getClinicalContact().getBean();
this.carecontext = vo.getCareContext() == null ? null : (ims.core.vo.beans.CareContextListVoBean)vo.getCareContext().getBean();
this.verifyinghcp = vo.getVerifyingHCP() == null ? null : (ims.core.vo.beans.HcpLiteVoBean)vo.getVerifyingHCP().getBean();
this.verifyingdatetime = vo.getVerifyingDateTime() == null ? null : (ims.framework.utils.beans.DateTimeBean)vo.getVerifyingDateTime().getBean();
this.currentstatus = vo.getCurrentStatus() == null ? null : (ims.clinical.vo.beans.CorrespondenceStatusVoBean)vo.getCurrentStatus().getBean();
this.opdnotes = vo.getOPDNotes() == null ? null : (ims.clinical.vo.beans.OutpatientNotesOutcomeVoBean)vo.getOPDNotes().getBean();
this.hasobjectivenote = vo.getHasObjectiveNote();
this.hasplannote = vo.getHasPlanNote();
this.hasinstructionsnote = vo.getHasInstructionsNote();
this.inpatientdischargesummary = vo.getInpatientDischargeSummary() == null ? null : (ims.clinical.vo.beans.InpatientDischargeNoteFollowupVoBean)vo.getInpatientDischargeSummary().getBean();
this.type = vo.getType() == null ? null : (ims.vo.LookupInstanceBean)vo.getType().getBean();
this.episodeofcare = vo.getEpisodeOfCare() == null ? null : (ims.core.vo.beans.EpisodeofCareShortVoBean)vo.getEpisodeOfCare().getBean();
this.authoringinformation = vo.getAuthoringInformation() == null ? null : (ims.core.vo.beans.AuthoringInformationVoBean)vo.getAuthoringInformation().getBean();
this.recipients = vo.getRecipients() == null ? null : vo.getRecipients().getBeanCollection();
this.statushistory = vo.getStatusHistory() == null ? null : vo.getStatusHistory().getBeanCollection();
this.problems = vo.getProblems() == null ? null : vo.getProblems().getBeanCollection();
this.diagnosescomplications = vo.getDiagnosesComplications() == null ? null : vo.getDiagnosesComplications().getBeanCollection();
this.procedures = vo.getProcedures() == null ? null : vo.getProcedures().getBeanCollection();
this.medication = vo.getMedication() == null ? null : new ims.vo.RefVoBean(vo.getMedication().getBoId(), vo.getMedication().getBoVersion());
this.primaryrecipient = vo.getPrimaryRecipient() == null ? null : (ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean)vo.getPrimaryRecipient().getBean();
this.ruserverifyingdatetime = vo.getRUserVerifyingDateTime() == null ? null : (ims.framework.utils.beans.DateTimeBean)vo.getRUserVerifyingDateTime().getBean();
this.ruserverifyinghcp = vo.getRUserVerifyingHCP() == null ? null : (ims.core.vo.beans.MemberOfStaffLiteVoBean)vo.getRUserVerifyingHCP().getBean();
}
public void populate(ims.vo.ValueObjectBeanMap map, ims.clinical.vo.ClinicalCorrespondenceVo vo)
{
this.id = vo.getBoId();
this.version = vo.getBoVersion();
this.clinicalcontact = vo.getClinicalContact() == null ? null : (ims.core.vo.beans.ClinicalContactShortVoBean)vo.getClinicalContact().getBean(map);
this.carecontext = vo.getCareContext() == null ? null : (ims.core.vo.beans.CareContextListVoBean)vo.getCareContext().getBean(map);
this.verifyinghcp = vo.getVerifyingHCP() == null ? null : (ims.core.vo.beans.HcpLiteVoBean)vo.getVerifyingHCP().getBean(map);
this.verifyingdatetime = vo.getVerifyingDateTime() == null ? null : (ims.framework.utils.beans.DateTimeBean)vo.getVerifyingDateTime().getBean();
this.currentstatus = vo.getCurrentStatus() == null ? null : (ims.clinical.vo.beans.CorrespondenceStatusVoBean)vo.getCurrentStatus().getBean(map);
this.opdnotes = vo.getOPDNotes() == null ? null : (ims.clinical.vo.beans.OutpatientNotesOutcomeVoBean)vo.getOPDNotes().getBean(map);
this.hasobjectivenote = vo.getHasObjectiveNote();
this.hasplannote = vo.getHasPlanNote();
this.hasinstructionsnote = vo.getHasInstructionsNote();
this.inpatientdischargesummary = vo.getInpatientDischargeSummary() == null ? null : (ims.clinical.vo.beans.InpatientDischargeNoteFollowupVoBean)vo.getInpatientDischargeSummary().getBean(map);
this.type = vo.getType() == null ? null : (ims.vo.LookupInstanceBean)vo.getType().getBean();
this.episodeofcare = vo.getEpisodeOfCare() == null ? null : (ims.core.vo.beans.EpisodeofCareShortVoBean)vo.getEpisodeOfCare().getBean(map);
this.authoringinformation = vo.getAuthoringInformation() == null ? null : (ims.core.vo.beans.AuthoringInformationVoBean)vo.getAuthoringInformation().getBean(map);
this.recipients = vo.getRecipients() == null ? null : vo.getRecipients().getBeanCollection();
this.statushistory = vo.getStatusHistory() == null ? null : vo.getStatusHistory().getBeanCollection();
this.problems = vo.getProblems() == null ? null : vo.getProblems().getBeanCollection();
this.diagnosescomplications = vo.getDiagnosesComplications() == null ? null : vo.getDiagnosesComplications().getBeanCollection();
this.procedures = vo.getProcedures() == null ? null : vo.getProcedures().getBeanCollection();
this.medication = vo.getMedication() == null ? null : new ims.vo.RefVoBean(vo.getMedication().getBoId(), vo.getMedication().getBoVersion());
this.primaryrecipient = vo.getPrimaryRecipient() == null ? null : (ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean)vo.getPrimaryRecipient().getBean(map);
this.ruserverifyingdatetime = vo.getRUserVerifyingDateTime() == null ? null : (ims.framework.utils.beans.DateTimeBean)vo.getRUserVerifyingDateTime().getBean();
this.ruserverifyinghcp = vo.getRUserVerifyingHCP() == null ? null : (ims.core.vo.beans.MemberOfStaffLiteVoBean)vo.getRUserVerifyingHCP().getBean(map);
}
public ims.clinical.vo.ClinicalCorrespondenceVo buildVo()
{
return this.buildVo(new ims.vo.ValueObjectBeanMap());
}
public ims.clinical.vo.ClinicalCorrespondenceVo buildVo(ims.vo.ValueObjectBeanMap map)
{
ims.clinical.vo.ClinicalCorrespondenceVo vo = null;
if(map != null)
vo = (ims.clinical.vo.ClinicalCorrespondenceVo)map.getValueObject(this);
if(vo == null)
{
vo = new ims.clinical.vo.ClinicalCorrespondenceVo();
map.addValueObject(this, vo);
vo.populate(map, this);
}
return vo;
}
public Integer getId()
{
return this.id;
}
public void setId(Integer value)
{
this.id = value;
}
public int getVersion()
{
return this.version;
}
public void setVersion(int value)
{
this.version = value;
}
public ims.core.vo.beans.ClinicalContactShortVoBean getClinicalContact()
{
return this.clinicalcontact;
}
public void setClinicalContact(ims.core.vo.beans.ClinicalContactShortVoBean value)
{
this.clinicalcontact = value;
}
public ims.core.vo.beans.CareContextListVoBean getCareContext()
{
return this.carecontext;
}
public void setCareContext(ims.core.vo.beans.CareContextListVoBean value)
{
this.carecontext = value;
}
public ims.core.vo.beans.HcpLiteVoBean getVerifyingHCP()
{
return this.verifyinghcp;
}
public void setVerifyingHCP(ims.core.vo.beans.HcpLiteVoBean value)
{
this.verifyinghcp = value;
}
public ims.framework.utils.beans.DateTimeBean getVerifyingDateTime()
{
return this.verifyingdatetime;
}
public void setVerifyingDateTime(ims.framework.utils.beans.DateTimeBean value)
{
this.verifyingdatetime = value;
}
public ims.clinical.vo.beans.CorrespondenceStatusVoBean getCurrentStatus()
{
return this.currentstatus;
}
public void setCurrentStatus(ims.clinical.vo.beans.CorrespondenceStatusVoBean value)
{
this.currentstatus = value;
}
public ims.clinical.vo.beans.OutpatientNotesOutcomeVoBean getOPDNotes()
{
return this.opdnotes;
}
public void setOPDNotes(ims.clinical.vo.beans.OutpatientNotesOutcomeVoBean value)
{
this.opdnotes = value;
}
public Boolean getHasObjectiveNote()
{
return this.hasobjectivenote;
}
public void setHasObjectiveNote(Boolean value)
{
this.hasobjectivenote = value;
}
public Boolean getHasPlanNote()
{
return this.hasplannote;
}
public void setHasPlanNote(Boolean value)
{
this.hasplannote = value;
}
public Boolean getHasInstructionsNote()
{
return this.hasinstructionsnote;
}
public void setHasInstructionsNote(Boolean value)
{
this.hasinstructionsnote = value;
}
public ims.clinical.vo.beans.InpatientDischargeNoteFollowupVoBean getInpatientDischargeSummary()
{
return this.inpatientdischargesummary;
}
public void setInpatientDischargeSummary(ims.clinical.vo.beans.InpatientDischargeNoteFollowupVoBean value)
{
this.inpatientdischargesummary = value;
}
public ims.vo.LookupInstanceBean getType()
{
return this.type;
}
public void setType(ims.vo.LookupInstanceBean value)
{
this.type = value;
}
public ims.core.vo.beans.EpisodeofCareShortVoBean getEpisodeOfCare()
{
return this.episodeofcare;
}
public void setEpisodeOfCare(ims.core.vo.beans.EpisodeofCareShortVoBean value)
{
this.episodeofcare = value;
}
public ims.core.vo.beans.AuthoringInformationVoBean getAuthoringInformation()
{
return this.authoringinformation;
}
public void setAuthoringInformation(ims.core.vo.beans.AuthoringInformationVoBean value)
{
this.authoringinformation = value;
}
public ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean[] getRecipients()
{
return this.recipients;
}
public void setRecipients(ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean[] value)
{
this.recipients = value;
}
public ims.clinical.vo.beans.CorrespondenceStatusVoBean[] getStatusHistory()
{
return this.statushistory;
}
public void setStatusHistory(ims.clinical.vo.beans.CorrespondenceStatusVoBean[] value)
{
this.statushistory = value;
}
public ims.clinical.vo.beans.CorrespondenceProblemVoBean[] getProblems()
{
return this.problems;
}
public void setProblems(ims.clinical.vo.beans.CorrespondenceProblemVoBean[] value)
{
this.problems = value;
}
public ims.clinical.vo.beans.CorrespondenceDiagnosisCompVoBean[] getDiagnosesComplications()
{
return this.diagnosescomplications;
}
public void setDiagnosesComplications(ims.clinical.vo.beans.CorrespondenceDiagnosisCompVoBean[] value)
{
this.diagnosescomplications = value;
}
public ims.clinical.vo.beans.CorrespondenceProcedureVoBean[] getProcedures()
{
return this.procedures;
}
public void setProcedures(ims.clinical.vo.beans.CorrespondenceProcedureVoBean[] value)
{
this.procedures = value;
}
public ims.vo.RefVoBean getMedication()
{
return this.medication;
}
public void setMedication(ims.vo.RefVoBean value)
{
this.medication = value;
}
public ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean getPrimaryRecipient()
{
return this.primaryrecipient;
}
public void setPrimaryRecipient(ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean value)
{
this.primaryrecipient = value;
}
public ims.framework.utils.beans.DateTimeBean getRUserVerifyingDateTime()
{
return this.ruserverifyingdatetime;
}
public void setRUserVerifyingDateTime(ims.framework.utils.beans.DateTimeBean value)
{
this.ruserverifyingdatetime = value;
}
public ims.core.vo.beans.MemberOfStaffLiteVoBean getRUserVerifyingHCP()
{
return this.ruserverifyinghcp;
}
public void setRUserVerifyingHCP(ims.core.vo.beans.MemberOfStaffLiteVoBean value)
{
this.ruserverifyinghcp = value;
}
private Integer id;
private int version;
private ims.core.vo.beans.ClinicalContactShortVoBean clinicalcontact;
private ims.core.vo.beans.CareContextListVoBean carecontext;
private ims.core.vo.beans.HcpLiteVoBean verifyinghcp;
private ims.framework.utils.beans.DateTimeBean verifyingdatetime;
private ims.clinical.vo.beans.CorrespondenceStatusVoBean currentstatus;
private ims.clinical.vo.beans.OutpatientNotesOutcomeVoBean opdnotes;
private Boolean hasobjectivenote;
private Boolean hasplannote;
private Boolean hasinstructionsnote;
private ims.clinical.vo.beans.InpatientDischargeNoteFollowupVoBean inpatientdischargesummary;
private ims.vo.LookupInstanceBean type;
private ims.core.vo.beans.EpisodeofCareShortVoBean episodeofcare;
private ims.core.vo.beans.AuthoringInformationVoBean authoringinformation;
private ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean[] recipients;
private ims.clinical.vo.beans.CorrespondenceStatusVoBean[] statushistory;
private ims.clinical.vo.beans.CorrespondenceProblemVoBean[] problems;
private ims.clinical.vo.beans.CorrespondenceDiagnosisCompVoBean[] diagnosescomplications;
private ims.clinical.vo.beans.CorrespondenceProcedureVoBean[] procedures;
private ims.vo.RefVoBean medication;
private ims.clinical.vo.beans.ClinicalCorrespondenceRecipientsVoBean primaryrecipient;
private ims.framework.utils.beans.DateTimeBean ruserverifyingdatetime;
private ims.core.vo.beans.MemberOfStaffLiteVoBean ruserverifyinghcp;
}
|
open-health-hub/openMAXIMS
|
openmaxims_workspace/ValueObjects/src/ims/clinical/vo/beans/ClinicalCorrespondenceVoBean.java
|
Java
|
agpl-3.0
| 14,671 |
//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.icp.vo.beans;
public class PatientICPPhase_PresentationVoBean extends ims.vo.ValueObjectBean
{
public PatientICPPhase_PresentationVoBean()
{
}
public PatientICPPhase_PresentationVoBean(ims.icp.vo.PatientICPPhase_PresentationVo vo)
{
this.id = vo.getBoId();
this.version = vo.getBoVersion();
this.icp = vo.getICP() == null ? null : new ims.vo.RefVoBean(vo.getICP().getBoId(), vo.getICP().getBoVersion());
this.phase = vo.getPhase() == null ? null : (ims.icp.vo.beans.ICPPhaseLiteVoBean)vo.getPhase().getBean();
this.currentstatus = vo.getCurrentStatus() == null ? null : (ims.icp.vo.beans.PatientICPPhaseStatusVoBean)vo.getCurrentStatus().getBean();
this.actions = vo.getActions() == null ? null : vo.getActions().getBeanCollection();
}
public void populate(ims.vo.ValueObjectBeanMap map, ims.icp.vo.PatientICPPhase_PresentationVo vo)
{
this.id = vo.getBoId();
this.version = vo.getBoVersion();
this.icp = vo.getICP() == null ? null : new ims.vo.RefVoBean(vo.getICP().getBoId(), vo.getICP().getBoVersion());
this.phase = vo.getPhase() == null ? null : (ims.icp.vo.beans.ICPPhaseLiteVoBean)vo.getPhase().getBean(map);
this.currentstatus = vo.getCurrentStatus() == null ? null : (ims.icp.vo.beans.PatientICPPhaseStatusVoBean)vo.getCurrentStatus().getBean(map);
this.actions = vo.getActions() == null ? null : vo.getActions().getBeanCollection();
}
public ims.icp.vo.PatientICPPhase_PresentationVo buildVo()
{
return this.buildVo(new ims.vo.ValueObjectBeanMap());
}
public ims.icp.vo.PatientICPPhase_PresentationVo buildVo(ims.vo.ValueObjectBeanMap map)
{
ims.icp.vo.PatientICPPhase_PresentationVo vo = null;
if(map != null)
vo = (ims.icp.vo.PatientICPPhase_PresentationVo)map.getValueObject(this);
if(vo == null)
{
vo = new ims.icp.vo.PatientICPPhase_PresentationVo();
map.addValueObject(this, vo);
vo.populate(map, this);
}
return vo;
}
public Integer getId()
{
return this.id;
}
public void setId(Integer value)
{
this.id = value;
}
public int getVersion()
{
return this.version;
}
public void setVersion(int value)
{
this.version = value;
}
public ims.vo.RefVoBean getICP()
{
return this.icp;
}
public void setICP(ims.vo.RefVoBean value)
{
this.icp = value;
}
public ims.icp.vo.beans.ICPPhaseLiteVoBean getPhase()
{
return this.phase;
}
public void setPhase(ims.icp.vo.beans.ICPPhaseLiteVoBean value)
{
this.phase = value;
}
public ims.icp.vo.beans.PatientICPPhaseStatusVoBean getCurrentStatus()
{
return this.currentstatus;
}
public void setCurrentStatus(ims.icp.vo.beans.PatientICPPhaseStatusVoBean value)
{
this.currentstatus = value;
}
public ims.icp.vo.beans.PatientICPAction_PresentationVoBean[] getActions()
{
return this.actions;
}
public void setActions(ims.icp.vo.beans.PatientICPAction_PresentationVoBean[] value)
{
this.actions = value;
}
private Integer id;
private int version;
private ims.vo.RefVoBean icp;
private ims.icp.vo.beans.ICPPhaseLiteVoBean phase;
private ims.icp.vo.beans.PatientICPPhaseStatusVoBean currentstatus;
private ims.icp.vo.beans.PatientICPAction_PresentationVoBean[] actions;
}
|
open-health-hub/openMAXIMS
|
openmaxims_workspace/ValueObjects/src/ims/icp/vo/beans/PatientICPPhase_PresentationVoBean.java
|
Java
|
agpl-3.0
| 4,836 |
//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.ocrr.domain.base.impl;
import ims.domain.impl.DomainImpl;
public abstract class BaseImportResultsDialogImpl extends DomainImpl implements ims.ocrr.domain.ImportResultsDialog, ims.domain.impl.Transactional
{
private static final long serialVersionUID = 1L;
@SuppressWarnings("unused")
public void validatesaveReferralAppointmentDetailsOrderInvestigationVo(ims.ocrr.vo.ReferralAppointmentDetailsOrderInvestigationVo referralAppointmentDetailsOrderInvestigationVo)
{
}
@SuppressWarnings("unused")
public void validatelistInvestigations(ims.RefMan.vo.CatsReferralRefVo refCatsReferralRefVo)
{
}
}
|
open-health-hub/openMAXIMS
|
openmaxims_workspace/OCRR/src/ims/ocrr/domain/base/impl/BaseImportResultsDialogImpl.java
|
Java
|
agpl-3.0
| 2,290 |
//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.ocrr.forms.orderinvstatushistory;
import java.io.Serializable;
import ims.framework.Context;
import ims.framework.FormName;
import ims.framework.FormAccessLogic;
public class BaseAccessLogic extends FormAccessLogic implements Serializable
{
private static final long serialVersionUID = 1L;
public final void setContext(Context context, FormName formName)
{
form = new CurrentForm(new GlobalContext(context), new CurrentForms());
engine = new CurrentEngine(formName);
}
public boolean isAccessible()
{
if(!form.getGlobalContext().OCRR.getSelectedInvsIsNotNull())
return false;
return true;
}
public boolean isReadOnly()
{
return false;
}
public CurrentEngine engine;
public CurrentForm form;
public final static class CurrentForm implements Serializable
{
private static final long serialVersionUID = 1L;
CurrentForm(GlobalContext globalcontext, CurrentForms forms)
{
this.globalcontext = globalcontext;
this.forms = forms;
}
public final GlobalContext getGlobalContext()
{
return globalcontext;
}
public final CurrentForms getForms()
{
return forms;
}
private GlobalContext globalcontext;
private CurrentForms forms;
}
public final static class CurrentEngine implements Serializable
{
private static final long serialVersionUID = 1L;
CurrentEngine(FormName formName)
{
this.formName = formName;
}
public final FormName getFormName()
{
return formName;
}
private FormName formName;
}
public static final class CurrentForms implements Serializable
{
private static final long serialVersionUID = 1L;
protected final class LocalFormName extends FormName
{
private static final long serialVersionUID = 1L;
protected LocalFormName(int value)
{
super(value);
}
}
private CurrentForms()
{
}
}
}
|
open-health-hub/openMAXIMS
|
openmaxims_workspace/OCRR/src/ims/ocrr/forms/orderinvstatushistory/BaseAccessLogic.java
|
Java
|
agpl-3.0
| 3,507 |
/**
* ProcedimentoResumido.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Apr 22, 2006 (06:55:48 PDT) WSDL2Java emitter.
*/
package br.gov.mj.sislegis.app.seiws;
public class ProcedimentoResumido implements java.io.Serializable {
private java.lang.String idProcedimento;
private java.lang.String procedimentoFormatado;
private br.gov.mj.sislegis.app.seiws.TipoProcedimento tipoProcedimento;
public ProcedimentoResumido() {
}
public ProcedimentoResumido(
java.lang.String idProcedimento,
java.lang.String procedimentoFormatado,
br.gov.mj.sislegis.app.seiws.TipoProcedimento tipoProcedimento) {
this.idProcedimento = idProcedimento;
this.procedimentoFormatado = procedimentoFormatado;
this.tipoProcedimento = tipoProcedimento;
}
/**
* Gets the idProcedimento value for this ProcedimentoResumido.
*
* @return idProcedimento
*/
public java.lang.String getIdProcedimento() {
return idProcedimento;
}
/**
* Sets the idProcedimento value for this ProcedimentoResumido.
*
* @param idProcedimento
*/
public void setIdProcedimento(java.lang.String idProcedimento) {
this.idProcedimento = idProcedimento;
}
/**
* Gets the procedimentoFormatado value for this ProcedimentoResumido.
*
* @return procedimentoFormatado
*/
public java.lang.String getProcedimentoFormatado() {
return procedimentoFormatado;
}
/**
* Sets the procedimentoFormatado value for this ProcedimentoResumido.
*
* @param procedimentoFormatado
*/
public void setProcedimentoFormatado(java.lang.String procedimentoFormatado) {
this.procedimentoFormatado = procedimentoFormatado;
}
/**
* Gets the tipoProcedimento value for this ProcedimentoResumido.
*
* @return tipoProcedimento
*/
public br.gov.mj.sislegis.app.seiws.TipoProcedimento getTipoProcedimento() {
return tipoProcedimento;
}
/**
* Sets the tipoProcedimento value for this ProcedimentoResumido.
*
* @param tipoProcedimento
*/
public void setTipoProcedimento(br.gov.mj.sislegis.app.seiws.TipoProcedimento tipoProcedimento) {
this.tipoProcedimento = tipoProcedimento;
}
private java.lang.Object __equalsCalc = null;
public synchronized boolean equals(java.lang.Object obj) {
if (!(obj instanceof ProcedimentoResumido)) return false;
ProcedimentoResumido other = (ProcedimentoResumido) obj;
if (obj == null) return false;
if (this == obj) return true;
if (__equalsCalc != null) {
return (__equalsCalc == obj);
}
__equalsCalc = obj;
boolean _equals;
_equals = true &&
((this.idProcedimento==null && other.getIdProcedimento()==null) ||
(this.idProcedimento!=null &&
this.idProcedimento.equals(other.getIdProcedimento()))) &&
((this.procedimentoFormatado==null && other.getProcedimentoFormatado()==null) ||
(this.procedimentoFormatado!=null &&
this.procedimentoFormatado.equals(other.getProcedimentoFormatado()))) &&
((this.tipoProcedimento==null && other.getTipoProcedimento()==null) ||
(this.tipoProcedimento!=null &&
this.tipoProcedimento.equals(other.getTipoProcedimento())));
__equalsCalc = null;
return _equals;
}
private boolean __hashCodeCalc = false;
public synchronized int hashCode() {
if (__hashCodeCalc) {
return 0;
}
__hashCodeCalc = true;
int _hashCode = 1;
if (getIdProcedimento() != null) {
_hashCode += getIdProcedimento().hashCode();
}
if (getProcedimentoFormatado() != null) {
_hashCode += getProcedimentoFormatado().hashCode();
}
if (getTipoProcedimento() != null) {
_hashCode += getTipoProcedimento().hashCode();
}
__hashCodeCalc = false;
return _hashCode;
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(ProcedimentoResumido.class, true);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("Sei", "ProcedimentoResumido"));
org.apache.axis.description.ElementDesc elemField = new org.apache.axis.description.ElementDesc();
elemField.setFieldName("idProcedimento");
elemField.setXmlName(new javax.xml.namespace.QName("", "IdProcedimento"));
elemField.setXmlType(new javax.xml.namespace.QName("http://www.w3.org/2001/XMLSchema", "string"));
elemField.setNillable(false);
typeDesc.addFieldDesc(elemField);
elemField = new org.apache.axis.description.ElementDesc();
elemField.setFieldName("procedimentoFormatado");
elemField.setXmlName(new javax.xml.namespace.QName("", "ProcedimentoFormatado"));
elemField.setXmlType(new javax.xml.namespace.QName("http://www.w3.org/2001/XMLSchema", "string"));
elemField.setNillable(false);
typeDesc.addFieldDesc(elemField);
elemField = new org.apache.axis.description.ElementDesc();
elemField.setFieldName("tipoProcedimento");
elemField.setXmlName(new javax.xml.namespace.QName("", "TipoProcedimento"));
elemField.setXmlType(new javax.xml.namespace.QName("Sei", "TipoProcedimento"));
elemField.setNillable(false);
typeDesc.addFieldDesc(elemField);
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
/**
* Get Custom Serializer
*/
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.BeanSerializer(
_javaType, _xmlType, typeDesc);
}
/**
* Get Custom Deserializer
*/
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.BeanDeserializer(
_javaType, _xmlType, typeDesc);
}
}
|
paulojeronimo/sislegis-app
|
src/main/java/br/gov/mj/sislegis/app/seiws/ProcedimentoResumido.java
|
Java
|
agpl-3.0
| 6,557 |
//#############################################################################
//# #
//# Copyright (C) <2014> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751)
// Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.core.vo;
public class RiskAssessmentForSummaryOverviewVo extends ims.vo.ValueObject implements ims.vo.ImsCloneable, Comparable
{
private static final long serialVersionUID = 1L;
public RiskAssessmentForSummaryOverviewVo()
{
}
public RiskAssessmentForSummaryOverviewVo(ims.core.vo.beans.RiskAssessmentForSummaryOverviewVoBean bean)
{
this.assessmenttype = bean.getAssessmentType();
this.date = bean.getDate() == null ? null : bean.getDate().buildDate();
this.hcp = bean.getHcp() == null ? null : bean.getHcp().buildVo();
this.score = bean.getScore();
}
public void populate(ims.vo.ValueObjectBeanMap map, ims.core.vo.beans.RiskAssessmentForSummaryOverviewVoBean bean)
{
this.assessmenttype = bean.getAssessmentType();
this.date = bean.getDate() == null ? null : bean.getDate().buildDate();
this.hcp = bean.getHcp() == null ? null : bean.getHcp().buildVo(map);
this.score = bean.getScore();
}
public ims.vo.ValueObjectBean getBean()
{
return this.getBean(new ims.vo.ValueObjectBeanMap());
}
public ims.vo.ValueObjectBean getBean(ims.vo.ValueObjectBeanMap map)
{
ims.core.vo.beans.RiskAssessmentForSummaryOverviewVoBean bean = null;
if(map != null)
bean = (ims.core.vo.beans.RiskAssessmentForSummaryOverviewVoBean)map.getValueObjectBean(this);
if (bean == null)
{
bean = new ims.core.vo.beans.RiskAssessmentForSummaryOverviewVoBean();
map.addValueObjectBean(this, bean);
bean.populate(map, this);
}
return bean;
}
public boolean getAssessmentTypeIsNotNull()
{
return this.assessmenttype != null;
}
public Integer getAssessmentType()
{
return this.assessmenttype;
}
public void setAssessmentType(Integer value)
{
this.isValidated = false;
this.assessmenttype = value;
}
public boolean getDateIsNotNull()
{
return this.date != null;
}
public ims.framework.utils.Date getDate()
{
return this.date;
}
public void setDate(ims.framework.utils.Date value)
{
this.isValidated = false;
this.date = value;
}
public boolean getHcpIsNotNull()
{
return this.hcp != null;
}
public ims.core.vo.HcpLiteVo getHcp()
{
return this.hcp;
}
public void setHcp(ims.core.vo.HcpLiteVo value)
{
this.isValidated = false;
this.hcp = value;
}
public boolean getScoreIsNotNull()
{
return this.score != null;
}
public String getScore()
{
return this.score;
}
public static int getScoreMaxLength()
{
return 255;
}
public void setScore(String value)
{
this.isValidated = false;
this.score = value;
}
public final String getIItemText()
{
return toString();
}
public final Integer getBoId()
{
return null;
}
public final String getBoClassName()
{
return null;
}
public boolean isValidated()
{
if(this.isBusy)
return true;
this.isBusy = true;
if(!this.isValidated)
{
this.isBusy = false;
return false;
}
this.isBusy = false;
return true;
}
public String[] validate()
{
return validate(null);
}
public String[] validate(String[] existingErrors)
{
if(this.isBusy)
return null;
this.isBusy = true;
java.util.ArrayList<String> listOfErrors = new java.util.ArrayList<String>();
if(existingErrors != null)
{
for(int x = 0; x < existingErrors.length; x++)
{
listOfErrors.add(existingErrors[x]);
}
}
int errorCount = listOfErrors.size();
if(errorCount == 0)
{
this.isBusy = false;
this.isValidated = true;
return null;
}
String[] result = new String[errorCount];
for(int x = 0; x < errorCount; x++)
result[x] = (String)listOfErrors.get(x);
this.isBusy = false;
this.isValidated = false;
return result;
}
public Object clone()
{
if(this.isBusy)
return this;
this.isBusy = true;
RiskAssessmentForSummaryOverviewVo clone = new RiskAssessmentForSummaryOverviewVo();
clone.assessmenttype = this.assessmenttype;
if(this.date == null)
clone.date = null;
else
clone.date = (ims.framework.utils.Date)this.date.clone();
if(this.hcp == null)
clone.hcp = null;
else
clone.hcp = (ims.core.vo.HcpLiteVo)this.hcp.clone();
clone.score = this.score;
clone.isValidated = this.isValidated;
this.isBusy = false;
return clone;
}
public int compareTo(Object obj)
{
return compareTo(obj, true);
}
public int compareTo(Object obj, boolean caseInsensitive)
{
if (obj == null)
{
return -1;
}
if(caseInsensitive); // this is to avoid eclipse warning only.
if (!(RiskAssessmentForSummaryOverviewVo.class.isAssignableFrom(obj.getClass())))
{
throw new ClassCastException("A RiskAssessmentForSummaryOverviewVo object cannot be compared an Object of type " + obj.getClass().getName());
}
RiskAssessmentForSummaryOverviewVo compareObj = (RiskAssessmentForSummaryOverviewVo)obj;
int retVal = 0;
if (retVal == 0)
{
if(this.getAssessmentType() == null && compareObj.getAssessmentType() != null)
return -1;
if(this.getAssessmentType() != null && compareObj.getAssessmentType() == null)
return 1;
if(this.getAssessmentType() != null && compareObj.getAssessmentType() != null)
retVal = this.getAssessmentType().compareTo(compareObj.getAssessmentType());
}
return retVal;
}
public synchronized static int generateValueObjectUniqueID()
{
return ims.vo.ValueObject.generateUniqueID();
}
public int countFieldsWithValue()
{
int count = 0;
if(this.assessmenttype != null)
count++;
if(this.date != null)
count++;
if(this.hcp != null)
count++;
if(this.score != null)
count++;
return count;
}
public int countValueObjectFields()
{
return 4;
}
protected Integer assessmenttype;
protected ims.framework.utils.Date date;
protected ims.core.vo.HcpLiteVo hcp;
protected String score;
private boolean isValidated = false;
private boolean isBusy = false;
}
|
open-health-hub/openMAXIMS
|
openmaxims_workspace/ValueObjects/src/ims/core/vo/RiskAssessmentForSummaryOverviewVo.java
|
Java
|
agpl-3.0
| 7,442 |
/*************** <auto-copyright.pl BEGIN do not edit this line> **************
*
* VR Juggler is (C) Copyright 1998-2011 by Iowa State University
*
* Original Authors:
* Allen Bierbaum, Christopher Just,
* Patrick Hartling, Kevin Meinert,
* Carolina Cruz-Neira, Albert Baker
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*************** <auto-copyright.pl END do not edit this line> ***************/
package org.vrjuggler.tweek.services;
import org.vrjuggler.tweek.beans.BeanRegistrationEvent;
import org.vrjuggler.tweek.beans.BeanRegistrationListener;
/**
* This class provides access to the global preferences for the entire base
* system. Access is granted through a singleton interface.
*/
public interface GlobalPreferencesService
extends BeanRegistrationListener
{
public static final double PREFS_VERSION_VALUE = 1.2;
/**
* Returns the name of the directory where Tweek-specific data files and
* preferences should be stored. This will be rooted under the
* platform-specific application data directory, as returned by
* EnvironmentService.getAppDataDir().
*
* @see EnvironmentService
*/
public String getPrefsDir();
/**
* Called by the BeanRegistry singleton whenever a new bean is registered
* with it.
*
* @param evt the event describing the bean that got registered
*/
public void beanRegistered(BeanRegistrationEvent evt);
public static final int MIN_USER_LEVEL = 1;
public static final int MAX_USER_LEVEL = 10;
public static final String CWD_START = "<cwd>";
public static final String HOME_START = "<home>";
public static final String DEFAULT_START = CWD_START;
/**
* Returns a list of the basic starting directories from which the user may
* choose. This does not necessarily include the user's current preference.
* A separate check should be done to determine how to handle the user's
* preferred start directory versus the default possible choices.
*
* @see #getChooserStartDir()
* @see #setChooserStartDir(String)
*/
public java.util.List getStartDirList();
public void setUserLevel(int level);
public int getUserLevel();
public void setLookAndFeel(String laf);
public String getLookAndFeel();
public java.util.Vector getBeanViewers();
public void setBeanViewer(String v);
public String getBeanViewer();
public void setWindowWidth(int width);
public int getWindowWidth();
public void setWindowHeight(int height);
public int getWindowHeight();
/**
* Sets the preferred start directory for file choosers. It is up to the
* code that opens file choosers to act on this preference. The given
* directory may be one of the default possible choices, or it may be a
* user-entered string, possibly containing one or more environment
* variables.
*/
public void setChooserStartDir(String d);
/**
* Returns the user's current preferred starting directory for file
* choosers. The string returned is the actual preferred path rather than
* the internal preference setting.
*
* @return A string that represents a path on the local system. This can
* be passed directly to the java.io.File constructor, for example.
*/
public String getChooserStartDir();
/**
* Returns the "raw" version of the user's current preferred starting
* directory for file choosers. This may be one of the default list, or it
* may be a user-defined string (an actual path).
*/
public String getRawChooserStartDir();
/**
* Sets the user's preference for lazy Panel Bean instantiation. This
* defines whether Panel Beans are instantiated upon discovery or upon
* first interaction.
*/
public void setLazyPanelBeanInstantiation(boolean enabled);
public boolean getLazyPanelBeanInstantiation();
/**
* This method was renamed from setDefaultCorbaHost() in version 1.3.4.
*
* @since 1.3.4
*/
public void setDefaultNamingServiceHost(String host);
/**
* This method was renamed from getDefaultCorbaHost() in version 1.3.4.
*
* @since 1.3.4
*/
public String getDefaultNamingServiceHost();
/**
* This method was renamed from setDefaultCorbaPort() in version 1.3.4.
*
* @since 1.3.4
*/
public void setDefaultNamingServicePort(int port);
/**
* This method was renamed from getDefaultCorbaPort() in version 1.3.4.
*
* @since 1.3.4
*/
public int getDefaultNamingServicePort();
public void setDefaultIiopVersion(String version);
public String getDefaultIiopVersion();
/**
* Sets the default GIOP end point host address.
*
* @since 1.3.4
*/
public void setDefaultOrbAddress(String addr);
/**
* Returns the default GIOP end point host address.
*
* @since 1.3.4
*/
public String getDefaultOrbAddress();
/**
* Sets the default GIOP end point port number.
*
* @since 1.3.4
*/
public void setDefaultOrbPort(int port);
/**
* Returns the deffault GIOP end point port number.
*
* @since 1.3.4
*/
public int getDefaultOrbPort();
/**
* Changes the default preferences file name to be the given name.
*/
public void setFileName(String name);
/**
* Changes the default preferences file object to use the given object.
*/
public void setFile(java.io.File prefsFile);
/**
* Loads the user's prefernces file if one exists. If the user has not
* defined a preferences file (i.e., the named preferences file does not
* exist), one is created.
*/
public void load();
/**
* Saves the current prefernces document.
*/
public void save();
}
|
MichaelMcDonnell/vrjuggler
|
modules/tweek/java/org/vrjuggler/tweek/services/GlobalPreferencesService.java
|
Java
|
lgpl-2.1
| 6,487 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.