code
stringlengths 3
1.04M
| repo_name
stringlengths 5
109
| path
stringlengths 6
306
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.04M
|
---|---|---|---|---|---|
/******************************************************************************
* 版权所有 刘大磊 2013-07-01 *
* 作者:刘大磊 *
* 电话:13336390671 *
* email:ldlqdsd@126.com *
*****************************************************************************/
package com.delmar.core.service;
import com.delmar.core.model.CorePage;
import com.delmar.core.service.CoreService;
/**
* @author 刘大磊 2016-08-26 17:08:24
*/
public interface CorePageService extends CoreService<CorePage> {
/**
* @param ids
*/
void deleteCorePageList(Integer[] ids);
} | ldlqdsdcn/wms | core/src/main/java/com/delmar/core/service/CorePageService.java | Java | apache-2.0 | 729 |
package de.hsmainz.pubapp.geocoder.controller;
import com.google.gson.Gson;
import de.hsmainz.pubapp.geocoder.model.ClientInputJson;
import de.hsmainz.pubapp.geocoder.model.ErrorJson;
import de.hsmainz.pubapp.geocoder.model.geojson.GeoJsonCollection;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Locale;
import java.util.ResourceBundle;
/**
* Interface for all different geocoder APIs
*
* @author Arno
* @since 15.12.2016
*/
public abstract class HttpAPIRequest {
//****************************************
// CONSTANTS
//****************************************
static final ResourceBundle lables = ResourceBundle.getBundle("lable", Locale.getDefault());
static final Logger logger = LogManager.getLogger(HttpGraphhopperRequest.class);
//****************************************
// VARIABLES
//****************************************
Gson gson = new Gson();
//****************************************
// INIT/CONSTRUCTOR
//****************************************
//****************************************
// GETTER/SETTER
//****************************************
//****************************************
// PUBLIC METHODS
//****************************************
/**
* Executes request to geocoder API and creates GeoJSON. Custom ClientJson is used for the input
*
* @param inputJson the request parameters combined in a custom ClientJson
* @return API response converted to a String
*/
public String requestGeocoder(ClientInputJson inputJson) {
String returnString;
if (!validateInput(inputJson)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
returnString = requestGeocoder(inputJson.getQueryString(), inputJson.getLocale());
}
return returnString;
}
/**
* Executes request to geocoder API and creates GeoJSON
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return API response converted to a String
*/
public String requestGeocoder(String queryString, String locale) {
String returnString;
if (!validateInput(queryString)) {
returnString = gson.toJson(new ErrorJson(lables.getString("message_Input_Empty")));
} else {
try {
URI uri = buildUri(queryString, locale);
returnString = request(uri);
} catch (URISyntaxException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_incorrect_URI")));
}
}
return returnString;
}
//****************************************
// PRIVATE METHODS
//****************************************
/**
* Creates the URI for API request
*
* @param queryString the string containing the address
* @param locale the string defining the used language
* @return Uri for geocoder request to graphhopper API
*/
abstract URI buildUri(String queryString, String locale) throws URISyntaxException;
/**
* Executes the request to the API
*
* @param uri the geocoder URL
* @return the requested geoJSON
* @throws throws an exception if the request fails
*/
abstract GeoJsonCollection doHttpGet(URI uri) throws IOException;
/**
* Method to catch exceptions and create ErrorJSONs
*
* @param uri
* @return returns the GeoJSON or ErrorJSON as a String
*/
String request(URI uri) {
String returnString;
try {
GeoJsonCollection geoJsonCollection = doHttpGet(uri);
if (validateOutput(geoJsonCollection)) {
returnString = gson.toJson(geoJsonCollection);
} else {
returnString = gson.toJson(new ErrorJson(lables.getString("message_no_location")));
}
} catch (IOException e) {
logger.catching(e);
returnString = gson.toJson(new ErrorJson(lables.getString("error_API_request_Faild")));
}
return returnString;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputJson the InputJSON to be validated
* @return returns true if InputJSON is valid
*/
boolean validateInput(ClientInputJson inputJson) {
boolean returnValue = true;
if (inputJson.getQueryString() == null || inputJson.getQueryString().isEmpty()) {
returnValue = false;
}
if (inputJson.getLocale() == null || inputJson.getLocale().isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the Input to reduce unnecessary request to API
*
* @param inputString the Input String to be validated
* @return true if Input String is not Empty
*/
boolean validateInput(String inputString) {
boolean returnValue = true;
if (inputString == null || inputString.isEmpty()) {
returnValue = false;
}
return returnValue;
}
/**
* validates the output from the API
*
* @param geoJsonCollection the API outputJSON to be validated
* @return returns true if the outputJSON is not empty
*/
private boolean validateOutput(GeoJsonCollection geoJsonCollection) {
return !geoJsonCollection.getFeatures().isEmpty();
}
//****************************************
// INNER CLASSES
//****************************************
}
| ArnoHeid/PubApp | geocoder/src/main/java/de/hsmainz/pubapp/geocoder/controller/HttpAPIRequest.java | Java | apache-2.0 | 5,824 |
package com.txtr.hibernatedelta.model;
import static javax.xml.bind.annotation.XmlAccessType.FIELD;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElementWrapper;
import javax.xml.bind.annotation.XmlType;
import org.apache.commons.lang3.StringUtils;
@XmlAccessorType(FIELD)
@XmlType(propOrder = {"name", "columns", "explicitIndexes"})
public class HibernateTable implements IHibernateDatabaseObject {
@XmlAttribute
private String name;
@XmlElementWrapper(name = "columns")
@XmlElement(name = "column")
private List<HibernateColumn> columns = new ArrayList<HibernateColumn>();
@XmlElementWrapper(name = "indexes")
@XmlElement(name = "index")
private List<ExplicitHibernateIndex> explicitIndexes = new ArrayList<ExplicitHibernateIndex>();
@XmlAttribute
private String sequenceName;
@XmlAttribute
private boolean virtualRootTable;
public HibernateTable(String name, String sequenceName, boolean virtualRootTable) {
this.sequenceName = sequenceName;
this.virtualRootTable = virtualRootTable;
this.name = name;
}
@SuppressWarnings("UnusedDeclaration")
public HibernateTable() {
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<HibernateColumn> getColumns() {
return columns;
}
public List<ExplicitHibernateIndex> getExplicitIndexes() {
return explicitIndexes;
}
public void addColumn(HibernateColumn column) {
columns.add(column);
}
public HibernateColumn getColumn(String name) {
for (HibernateColumn column : columns) {
if (column.getName().equalsIgnoreCase(name)) {
return column;
}
}
throw new IllegalArgumentException("column not found: " + name);
}
public void addExplicitIndex(ExplicitHibernateIndex hibernateIndex) {
explicitIndexes.add(hibernateIndex);
}
public String getIndexPrefix() {
return StringUtils.left(name, 28);
}
public List<HibernateColumn> getPrimaryKeyColumns() {
List<HibernateColumn> result = new ArrayList<HibernateColumn>();
for (HibernateColumn column : columns) {
if (column.isPrimaryKey()) {
result.add(column);
}
}
return result;
}
public String getSequenceName() {
return sequenceName;
}
public boolean isVirtualRootTable() {
return virtualRootTable;
}
}
| storecast/hibernate-delta | src/main/java/com/txtr/hibernatedelta/model/HibernateTable.java | Java | apache-2.0 | 2,751 |
/**
*
*/
package me.learn.personal.month5;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Title :
*
* Date : Dec 23, 2020
*
* @author bramanarayan
*
*/
public class WordBreakable {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
}
public boolean wordBreak(String s, List<String> wordDict) {
Set<String> wordDictSet = new HashSet<>(wordDict);
boolean[] dp = new boolean[s.length() + 1];
dp[0] = true;
for (int i = 1; i <= s.length(); i++) {
for (int j = 0; j < i; j++) {
if (dp[j] && wordDictSet.contains(s.substring(j, i))) {
dp[i] = true;
break;
}
}
}
return dp[s.length()];
}
}
| balajiboggaram/algorithms | src/me/learn/personal/month5/WordBreakable.java | Java | apache-2.0 | 724 |
package web;
import graphUtil.CycleChainDecomposition;
import graphUtil.EdgeChain;
import ilog.concert.IloException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import kepLib.KepInstance;
import kepLib.KepProblemData;
import kepModeler.ChainsForcedRemainOpenOptions;
import kepModeler.KepModeler;
import kepModeler.ModelerInputs;
import kepModeler.ObjectiveMode;
import replicator.DonorEdge;
import threading.FixedThreadPool;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import data.Donor;
import data.ExchangeUnit;
import database.KidneyDataBase;
import exchangeGraph.CycleChainPackingSubtourElimination;
import exchangeGraph.SolverOption;
public class KidneyServerSolver {
private KidneyDataBase database;
private Map<String, ModelerInputs<ExchangeUnit, DonorEdge>> dataCache = new HashMap<String, ModelerInputs<ExchangeUnit, DonorEdge>>();
private Optional<FixedThreadPool> threadPool;
Optional<Double> maxSolveTimeMs = Optional.of(100.0);
public KidneyServerSolver(KidneyDataBase database,
Optional<FixedThreadPool> threadPool) {
this.database = database;
this.threadPool = threadPool;
}
public ImmutableList<String> availableDatasets() {
return database.availableDatasets();
}
public Map<Object, Object> getInputs(String databaseName) {
return flattenModelerInputs(getModelerInputs(databaseName));
}
public Map<Object, Object> getSolution(String databaseName)
throws IloException {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = getModelerInputs(databaseName);
KepModeler modeler = new KepModeler(3, Integer.MAX_VALUE,
ChainsForcedRemainOpenOptions.none,
new ObjectiveMode.MaximumCardinalityMode());
KepInstance<ExchangeUnit, DonorEdge> instance = modeler.makeKepInstance(
inputs, null);
CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge> solver = new CycleChainPackingSubtourElimination<ExchangeUnit, DonorEdge>(
instance, true, maxSolveTimeMs, threadPool,
SolverOption.makeCheckedOptions(SolverOption.cutsetMode,
SolverOption.lazyConstraintCallback, SolverOption.userCutCallback));
solver.solve();
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution = solver
.getSolution();
solver.cleanUp();
return flattenSolution(inputs.getKepProblemData(), solution);
}
private ModelerInputs<ExchangeUnit, DonorEdge> getModelerInputs(
String databaseName) {
if (this.dataCache.containsKey(databaseName)) {
return this.dataCache.get(databaseName);
} else {
ModelerInputs<ExchangeUnit, DonorEdge> inputs = database
.loadInputs(databaseName);
this.dataCache.put(databaseName, inputs);
return inputs;
}
}
public static Map<Object, Object> flattenModelerInputs(
ModelerInputs<ExchangeUnit, DonorEdge> inputs) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatUnits = Lists.newArrayList();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (ExchangeUnit unit : inputs.getKepProblemData().getGraph()
.getVertices()) {
flatUnits.add(flattenExchangeUnit(inputs, unit));
}
for (DonorEdge edge : inputs.getKepProblemData().getGraph().getEdges()) {
flatEdges.add(flattenDonorEdge(inputs.getKepProblemData(), edge));
}
ans.put("nodes", flatUnits);
ans.put("links", flatEdges);
return ans;
}
public static Map<Object, Object> flattenSolution(
KepProblemData<ExchangeUnit, DonorEdge> problemData,
CycleChainDecomposition<ExchangeUnit, DonorEdge> solution) {
Map<Object, Object> ans = new HashMap<Object, Object>();
List<Map<Object, Object>> flatEdges = Lists.newArrayList();
for (EdgeChain<DonorEdge> edgeChain : solution.getEdgeChains()) {
for (DonorEdge edge : edgeChain) {
flatEdges.add(flattenDonorEdge(problemData, edge));
}
}
ans.put("links", flatEdges);
return ans;
}
private static Map<Object, Object> flattenDonorEdge(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, DonorEdge edge) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ExchangeUnit source = kepProblemData.getGraph().getSource(edge);
ExchangeUnit dest = kepProblemData.getGraph().getDest(edge);
String sourceId = makeNodeId(kepProblemData, source);
String destId = makeNodeId(kepProblemData, dest);
ans.put("sourceId", sourceId);
ans.put("targetId", destId);
ans.put("id", sourceId + destId);
return ans;
}
private static Map<Object, Object> flattenExchangeUnit(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<Object, Object> ans = new HashMap<Object, Object>();
ans.put("id", makeNodeId(inputs.getKepProblemData(), unit));
ans.put("type", makeType(inputs.getKepProblemData(), unit));
ans.put("reachable", true);
ans.put("sensitized", computeSensitization(inputs, unit));
return ans;
}
private static String makeNodeId(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return unit.getDonor().get(0).getId();
} else {
return unit.getReceiver().getId();
}
}
private static String makeType(
KepProblemData<ExchangeUnit, DonorEdge> kepProblemData, ExchangeUnit unit) {
if (kepProblemData.getRootNodes().contains(unit)) {
return "root";
} else if (kepProblemData.getPairedNodes().contains(unit)) {
return "paired";
} else if (kepProblemData.getTerminalNodes().contains(unit)) {
return "terminal";
} else {
throw new RuntimeException();
}
}
private static int computeSensitization(
ModelerInputs<ExchangeUnit, DonorEdge> inputs, ExchangeUnit unit) {
Map<ExchangeUnit, Double> donorPower = inputs.getAuxiliaryInputStatistics()
.getDonorPowerPostPreference();
Map<ExchangeUnit, Double> receiverPower = inputs
.getAuxiliaryInputStatistics().getReceiverPowerPostPreference();
// System.out.println(donorPower);
// System.out.println(receiverPower);
if (inputs.getKepProblemData().getRootNodes().contains(unit)) {
if (donorPower.containsKey(unit.getDonor().get(0))) {
return singlePersonSensitization(donorPower.get(unit.getDonor().get(0)));
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getPairedNodes().contains(unit)) {
double unitDonorPower = 0;
for (Donor donor : unit.getDonor()) {
if (donorPower.containsKey(donor)) {
unitDonorPower += donorPower.get(donor);
} else {
// System.err.println("missing donor power data for: " + unit);
return 0;
}
}
if (receiverPower.containsKey(unit.getReceiver())) {
return twoPersonSensitization(unitDonorPower,
receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else if (inputs.getKepProblemData().getTerminalNodes().contains(unit)) {
if (receiverPower.containsKey(unit.getReceiver())) {
return singlePersonSensitization(receiverPower.get(unit.getReceiver()));
} else {
// System.err.println("missing receiver power for: " + unit);
return 0;
}
} else {
throw new RuntimeException();
}
}
private static int singlePersonSensitization(double matchPower) {
if (matchPower < .01) {
return 3;
} else if (matchPower < .08) {
return 2;
} else if (matchPower < .2) {
return 1;
} else {
return 0;
}
}
private static int twoPersonSensitization(double donorMatchPower,
double receiverMatchPower) {
double pmp = 10000 * donorMatchPower * receiverMatchPower;
if (pmp < .1) {
return 4;
} else if (pmp < 5) {
return 3;
} else if (pmp < 20) {
return 2;
} else if (pmp < 60) {
return 1;
} else {
return 0;
}
}
}
| rma350/kidneyExchange | kidneyMatching/src/web/KidneyServerSolver.java | Java | apache-2.0 | 8,249 |
package com.jaivox.ui.appmaker;
import java.io.*;
import java.util.*;
import bitpix.list.*;
public class Rule2Fsm {
static String dir = "./";
basicTree tree;
TreeMap <String, String> states;
TreeMap <String, String> tags;
static String name = "data/road1.tree";
static String yes = "yes";
String startState = "def";
static String casedefault = "(default) (def)";
static basicNode casedefaultnode;
Vector <String> store;
public Rule2Fsm () {
String filename = dir + name;
startState = startState;
tree = new basicTree (filename);
// tree.WriteTree ();
states = new TreeMap <String, String> ();
tags = new TreeMap <String, String> ();
Vector <bitpix.list.basicNode> list = tree.Root.ListChild;
casedefaultnode = new basicNode (casedefault);
store = new Vector <String> ();
store.add ("\n#include errors.dlg\n");
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, startState);
}
int pos = filename.lastIndexOf (".");
String outfile = filename.substring (0, pos+1) + "dlg";
// writefile (outfile, store);
}
void Debug (String s) {
System.out.println ("[Rule2Fsm]" + s);
}
void gt (basicNode node, String sofar) {
Vector <bitpix.list.basicNode> list = node.ListChild;
if (list == null || list.size () == 0) {
// emit a state with def
emit (node, sofar, "def");
}
else {
String nextstate = createNextState (node);
String morefar = sofar + " " + nextstate;
emit (node, sofar, nextstate);
list.add (casedefaultnode);
for (int i=0; i<list.size (); i++) {
basicNode child = list.elementAt (i);
gt (child, morefar);
}
}
}
void emit (basicNode node, String sofar, String next) {
int pos = sofar.lastIndexOf (" ");
pos++;
String last = sofar.substring (pos);
String tag = sofar.replaceAll (" ", "_");
tag = tag + "_" + next;
tag = getuniquetag (tag);
StringBuffer sb = new StringBuffer ();
sb.append ("{\n["+tag+"]\n");
String t = (String)node.Tag;
if (t.trim ().length () == 0) return;
StringTokenizer st = new StringTokenizer (t, "()");
if (st.countTokens () < 2) {
Debug ("Don't have two tokens from "+t);
return;
}
String input = filter (st.nextToken ()).trim ();
String output = filter (st.nextToken ()).trim ();
while (output.length () == 0)
output = filter (st.nextToken ()).trim ();
// Debug ("tag="+t+" / input="+input+" output="+output);
// sb.append ("\t"+sofar+" ;\n");
// with Gui2Gram, convert input and output to use dotted head tag form
String indot = input.replaceAll (" ", ".");
String outdot = output.replaceAll (" ", ".");
sb.append ("\t"+last+" ;\n");
// sb.append ("\t"+input+" ;\n");
// sb.append ("\t"+output+" ;\n");
sb.append ("\t"+indot+" ;\n");
sb.append ("\t"+outdot+" ;\n");
sb.append ("\t"+next+" ;\n");
sb.append ("}\n");
String all = new String (sb);
store.add (all);
// System.out.println (all);
}
static String filter (String line) {
return Gui2Gram.filter (line);
}
String createNextState (basicNode node) {
String tag = (String)(node.Tag);
StringTokenizer st = new StringTokenizer (tag, "()");
if (st.countTokens () < 2) {
Debug ("don't have two tokens in "+tag);
return "def";
}
String input = st.nextToken ().trim ();
String output = st.nextToken ().trim ();
while (output.length () == 0)
output = st.nextToken ().trim ();
StringTokenizer tt = new StringTokenizer (output);
int n = tt.countTokens ();
StringBuffer sb = new StringBuffer ();
for (int i=0; i<Math.min (n, 3); i++) {
String token = tt.nextToken ();
sb.append (token.charAt (0));
}
if (n < 3) {
for (int j=n; j<3; j++) {
sb.append ('x');
}
}
String s = new String (sb);
String test = states.get (s);
if (test != null) {
for (int i=1; i<10; i++) {
String next = s + i;
if (states.get (next) == null) {
s = next;
break;
}
}
}
states.put (s, yes);
return s;
}
String getuniquetag (String in) {
if (tags.get (in) == null) {
tags.put (in, yes);
return in;
}
else {
for (int i=1; i<99; i++) {
String next = in+"_"+i;
if (tags.get (next) != null) {
continue;
}
tags.put (next, yes);
return next;
}
Debug ("More than 99 tags starting with "+in);
return "error";
}
}
void writeRules (PrintWriter out) {
try {
for (int i=0; i<store.size (); i++) {
out.println (store.elementAt (i));
}
}
catch (Exception e) {
e.printStackTrace ();
}
}
}
| jaivox/tools | v2/com/jaivox/ui/appmaker/Rule2Fsm.java | Java | apache-2.0 | 4,520 |
/*
* Copyright (C) 2012 Jason Gedge <http://www.gedge.ca>
*
* This file is part of the OpGraph project.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Provides commands for the Application API.
*/
package ca.phon.opgraph.app.commands;
| ghedlund/opgraph | app/src/main/java/ca/phon/opgraph/app/commands/package-info.java | Java | apache-2.0 | 855 |
/*
* Copyright 2013 JCertifLab.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jcertif.android.fragments;
import android.app.Activity;
import android.content.Intent;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Bundle;
import android.provider.CalendarContract;
import android.provider.CalendarContract.Events;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ListView;
import android.widget.Toast;
import com.actionbarsherlock.view.ActionMode;
import com.actionbarsherlock.view.Menu;
import com.actionbarsherlock.view.MenuInflater;
import com.actionbarsherlock.view.MenuItem;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.jcertif.android.JcertifApplication;
import com.jcertif.android.MainActivity;
import com.jcertif.android.R;
import com.jcertif.android.adapters.SessionAdapter;
import com.jcertif.android.adapters.SpeedScrollListener;
import com.jcertif.android.dao.SessionProvider;
import com.jcertif.android.dao.SpeakerProvider;
import com.jcertif.android.model.Session;
import com.jcertif.android.model.Speaker;
import com.jcertif.android.service.RESTService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import uk.co.senab.actionbarpulltorefresh.extras.actionbarsherlock.PullToRefreshAttacher;
/**
*
* @author Patrick Bashizi
*
*/
public class SessionListFragment extends RESTResponderFragment implements PullToRefreshAttacher.OnRefreshListener{
public static final String SESSIONS_LIST_URI = JcertifApplication.BASE_URL
+ "/session/list";
public static final String CATEGORY_LIST_URI = JcertifApplication.BASE_URL
+ "/ref/category/list";
private static String TAG = SessionListFragment.class.getName();
private List<Session> mSessions = new ArrayList<Session>();;
private ListView mLvSessions;
private SessionAdapter mAdapter;
private SessionProvider mProvider;
private SpeedScrollListener mListener;
private ActionMode mActionMode;
private Session mSelectedSession;
private PullToRefreshAttacher mPullToRefreshAttacher ;
public SessionListFragment() {
// Empty constructor required for fragment subclasses
}
public interface OnSessionUpdatedListener {
void onSessionUpdated(Session session);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// setRetainInstance(true);
View rootView = inflater.inflate(R.layout.fragment_session, container,
false);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
String session = getResources().getStringArray(R.array.menu_array)[0];
setHasOptionsMenu(true);
getActivity().setTitle(session);
mLvSessions = (ListView) rootView.findViewById(R.id.lv_session);
mPullToRefreshAttacher=((MainActivity)getSherlockActivity()).getmPullToRefreshAttacher();
mPullToRefreshAttacher.addRefreshableView(mLvSessions, this);
mLvSessions.setOnItemClickListener(new OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int pos,
long position) {
mAdapter.setSelectedIndex(pos);
mSelectedSession = ((Session) parent
.getItemAtPosition((int) position));
updateSession(mSelectedSession);
}
});
mLvSessions
.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() {
@Override
public boolean onItemLongClick(AdapterView<?> arg0,
View arg1, int pos, long arg3) {
if (mActionMode != null) {
return false;
}
mActionMode = getSherlockActivity().startActionMode(
mActionModeCallback);
mSelectedSession = ((Session) arg0
.getItemAtPosition((int) pos));
mAdapter.setSelectedIndex(pos);
return true;
}
});
return rootView;
}
private ActionMode.Callback mActionModeCallback = new ActionMode.Callback() {
@Override
public boolean onCreateActionMode(ActionMode mode, Menu menu) {
MenuInflater inflater = mode.getMenuInflater();
inflater.inflate(R.menu.context_menu_session, menu);
return true;
}
@Override
public boolean onPrepareActionMode(ActionMode mode, Menu menu) {
return false;
}
@Override
public boolean onActionItemClicked(ActionMode mode, MenuItem item) {
switch (item.getItemId()) {
case R.id.menu_share:
shareSessionItem();
mode.finish(); // Action picked, so close the CAB
break;
case R.id.menu_add_to_schedule:
addSessionItemToSchedule();
mode.finish(); // Action picked, so close the CAB
break;
default:
return false;
}
return true;
}
public void onDestroyActionMode(ActionMode mode) {
mActionMode = null;
}
};
private void addSessionItemToSchedule() {
if (android.os.Build.VERSION.SDK_INT >= 14){
Intent intent = new Intent(Intent.ACTION_INSERT);
intent.setType("vnd.android.cursor.item/event");
intent.putExtra(Events.TITLE, mSelectedSession.getTitle());
intent.putExtra(Events.EVENT_LOCATION,"Room"+ mSelectedSession.getSalle());
intent.putExtra(Events.DESCRIPTION, mSelectedSession.getDescription());
Date evStartDate= mSelectedSession.getStart();
Date evEndDate= mSelectedSession.getStart();
// Setting dates
GregorianCalendar startcalDate = new GregorianCalendar();
startcalDate.setTime(evStartDate);
// Setting dates
GregorianCalendar endCalDate = new GregorianCalendar();
endCalDate.setTime(evEndDate);
intent.putExtra(CalendarContract.EXTRA_EVENT_BEGIN_TIME,startcalDate.getTimeInMillis());
intent.putExtra(CalendarContract.EXTRA_EVENT_END_TIME,endCalDate.getTimeInMillis());
// Make it a full day event
intent.putExtra(CalendarContract.EXTRA_EVENT_ALL_DAY, true);
// Make it a recurring Event
// intent.putExtra(Events.RRULE, "WKST=SU");
// Making it private and shown as busy
intent.putExtra(Events.ACCESS_LEVEL, Events.ACCESS_PRIVATE);
intent.putExtra(Events.AVAILABILITY, Events.AVAILABILITY_BUSY);
//intent.putExtra(Events.DISPLAY_COLOR, Events.EVENT_COLOR);
startActivity(intent);
}else{
Toast.makeText(this.getSherlockActivity(),
"Not supported for your device :(", Toast.LENGTH_SHORT).show();
}
}
private void shareSessionItem() {
Speaker sp = new SpeakerProvider(this.getSherlockActivity())
.getByEmail(mSelectedSession.getSpeakers()[0]);
Intent intent = new Intent(android.content.Intent.ACTION_SEND);
intent.setType("text/plain");
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_WHEN_TASK_RESET);
intent.putExtra(Intent.EXTRA_SUBJECT, "Share Session");
intent.putExtra(
Intent.EXTRA_TEXT,
"Checking out this #Jcertif2013 session : "
+ mSelectedSession.getTitle() + " by "
+ sp.getFirstname() + " " + sp.getLastname());
startActivity(intent);
}
protected void updateSession(Session s) {
if(onTablet()){
((OnSessionUpdatedListener) getParentFragment()).onSessionUpdated(s);
}else{
Intent intent = new Intent(this.getActivity().getApplicationContext(),
SessionDetailFragmentActivity.class);
String sessionJson= new Gson().toJson(s);
intent.putExtra("session",sessionJson);
startActivity(intent);
getSherlockActivity().overridePendingTransition ( 0 , R.anim.slide_up_left);
}
}
public SessionProvider getProvider() {
if (mProvider == null)
mProvider = new SessionProvider(this.getSherlockActivity());
return mProvider;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
// This gets called each time our Activity has finished creating itself.
// First check the local cache, if it's empty data will be fetched from
// web
mSessions = loadSessionsFromCache();
setSessions();
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
}
/**
* We cache our stored session here so that we can return right away on
* multiple calls to setSession() during the Activity lifecycle events (such
* as when the user rotates their device).
*/
private void setSessions() {
MainActivity activity = (MainActivity) getActivity();
setLoading(true);
if (mSessions.isEmpty() && activity != null) {
// This is where we make our REST call to the service. We also pass
// in our ResultReceiver
// defined in the RESTResponderFragment super class.
// We will explicitly call our Service since we probably want to
// keep it as a private component in our app.
Intent intent = new Intent(activity, RESTService.class);
intent.setData(Uri.parse(SESSIONS_LIST_URI));
// Here we are going to place our REST call parameters.
Bundle params = new Bundle();
params.putString(RESTService.KEY_JSON_PLAYLOAD, null);
intent.putExtra(RESTService.EXTRA_PARAMS, params);
intent.putExtra(RESTService.EXTRA_RESULT_RECEIVER,getResultReceiver());
// Here we send our Intent to our RESTService.
activity.startService(intent);
} else if (activity != null) {
// Here we check to see if our activity is null or not.
// We only want to update our views if our activity exists.
// Load our list adapter with our session.
updateList();
setLoading(false);
}
}
void updateList() {
mListener = new SpeedScrollListener();
mLvSessions.setOnScrollListener(mListener);
mAdapter = new SessionAdapter(this.getActivity(), mListener, mSessions);
mLvSessions.setAdapter(mAdapter);
if(refreshing){
refreshing=false;
mPullToRefreshAttacher.setRefreshComplete();
}
}
private boolean onTablet() {
return ((getResources().getConfiguration().screenLayout & Configuration.SCREENLAYOUT_SIZE_MASK) >= Configuration.SCREENLAYOUT_SIZE_LARGE);
}
public void updateList(String cat) {
if (cat.equals("All") || cat.equals("Tous")) {
mSessions = loadSessionsFromCache();
} else {
mSessions = getProvider().getSessionsByCategory(cat);
}
updateList();
}
@Override
public void onRESTResult(int code, Bundle resultData) {
// Here is where we handle our REST response.
// Check to see if we got an HTTP 200 code and have some data.
String result = null;
if (resultData != null) {
result = resultData.getString(RESTService.REST_RESULT);
} else {
return;
}
if (code == 200 && result != null) {
mSessions = parseSessionJson(result);
Log.d(TAG, result);
setSessions();
saveToCache(mSessions);
} else {
Activity activity = getActivity();
if (activity != null) {
Toast.makeText(
activity,
"Failed to load Session data. Check your internet settings.",
Toast.LENGTH_SHORT).show();
}
}
setLoading(false);
}
private List<Session> parseSessionJson(String result) {
Gson gson = new GsonBuilder().setDateFormat("dd/MM/yyyy hh:mm")
.create();
Session[] sessions = gson.fromJson(result, Session[].class);
return Arrays.asList(sessions);
}
protected void saveToCache(final List<Session> sessions) {
new Thread(new Runnable() {
@Override
public void run() {
for (Session session : sessions)
mProvider.store(session);
}
}).start();
}
private List<Session> loadSessionsFromCache() {
List<Session> list = getProvider().getAll(Session.class);
return list;
}
@Override
public void onPause() {
super.onDestroy();
}
@Override
public void onDestroy() {
super.onDestroy();
}
@Override
public void onRefreshStarted(View view) {
mProvider.deleteAll(Session.class);
//mLvSessions.setAdapter(null);
mSessions = loadSessionsFromCache();
setSessions();
refreshing=true;
}
}
| JCERTIFLab/jcertif-android-2013 | src/main/java/com/jcertif/android/fragments/SessionListFragment.java | Java | apache-2.0 | 12,340 |
/**
* Copyright 2014 Jordan Zimmerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.soabase.core.features.attributes;
import io.soabase.core.listening.Listenable;
import java.util.Collection;
/**
* Gives access to dynamic attributes. The various get methods return
* the current value for the given key after applying overrides and scopes, etc.
* Always call the methods to get the current value as it may change during runtime.
*/
public interface DynamicAttributes
{
public String getAttribute(String key);
public String getAttribute(String key, String defaultValue);
public boolean getAttributeBoolean(String key);
public boolean getAttributeBoolean(String key, boolean defaultValue);
public int getAttributeInt(String key);
public int getAttributeInt(String key, int defaultValue);
public long getAttributeLong(String key);
public long getAttributeLong(String key, long defaultValue);
public double getAttributeDouble(String key);
public double getAttributeDouble(String key, double defaultValue);
public void temporaryOverride(String key, boolean value);
public void temporaryOverride(String key, int value);
public void temporaryOverride(String key, long value);
public void temporaryOverride(String key, double value);
public void temporaryOverride(String key, String value);
public boolean removeOverride(String key);
public Collection<String> getKeys();
public Listenable<DynamicAttributeListener> getListenable();
}
| soabase/soabase | soabase-core/src/main/java/io/soabase/core/features/attributes/DynamicAttributes.java | Java | apache-2.0 | 2,045 |
/*
* Copyright © 2009 HotPads (admin@hotpads.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.datarouter.instrumentation.trace;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
import java.util.Random;
import java.util.regex.Pattern;
public class Traceparent{
private static final Pattern TRACEPARENT_PATTERN = Pattern.compile(
"^[0-9a-f]{2}-[0-9a-f]{32}-[0-9a-f]{16}-[0-9a-f]{2}$");
private static final String TRACEPARENT_DELIMITER = "-";
private static final Integer MIN_CHARS_TRACEPARENT = 55;
private static final String CURRENT_VERSION = "00";
public static final int TRACE_ID_HEX_SIZE = 32;
public static final int PARENT_ID_HEX_SIZE = 16;
public final String version = CURRENT_VERSION;
public final String traceId;
public final String parentId;
private String traceFlags;
public Traceparent(String traceId, String parentId, String traceFlags){
this.traceId = traceId;
this.parentId = parentId;
this.traceFlags = traceFlags;
}
public Traceparent(String traceId){
this(traceId, createNewParentId());
}
public Traceparent(String traceId, String parentId){
this(traceId, parentId, createDefaultTraceFlag());
}
public static Traceparent generateNew(long createdTimestamp){
return new Traceparent(createNewTraceId(createdTimestamp), createNewParentId(),
createDefaultTraceFlag());
}
public static Traceparent generateNewWithCurrentTimeInNs(){
return new Traceparent(createNewTraceId(Trace2Dto.getCurrentTimeInNs()), createNewParentId(),
createDefaultTraceFlag());
}
public Traceparent updateParentId(){
return new Traceparent(traceId, createNewParentId(), traceFlags);
}
/*
* TraceId is a 32 hex digit String. We convert the root request created unix time into lowercase base16
* and append it with a randomly generated long lowercase base16 representation.
* */
private static String createNewTraceId(long createdTimestamp){
return String.format("%016x", createdTimestamp) + String.format("%016x", new Random().nextLong());
}
/*
* ParentId is a 16 hex digit String. We use a randomly generated long and convert it into lowercase base16
* representation.
* */
public static String createNewParentId(){
return String.format("%016x", new Random().nextLong());
}
public long getTimestampInMs(){
return Long.parseLong(traceId.substring(0, 16), 16);
}
public Instant getInstant(){
return Instant.ofEpochMilli(getTimestampInMs());
}
/*----------- trace flags ------------*/
private static String createDefaultTraceFlag(){
return TraceContextFlagMask.DEFAULT.toHexCode();
}
public void enableSample(){
this.traceFlags = TraceContextFlagMask.enableTrace(traceFlags);
}
public void enableLog(){
this.traceFlags = TraceContextFlagMask.enableLog(traceFlags);
}
public boolean shouldSample(){
return TraceContextFlagMask.isTraceEnabled(traceFlags);
}
public boolean shouldLog(){
return TraceContextFlagMask.isLogEnabled(traceFlags);
}
@Override
public String toString(){
return String.join(TRACEPARENT_DELIMITER, version, traceId, parentId, traceFlags);
}
@Override
public boolean equals(Object obj){
if(!(obj instanceof Traceparent)){
return false;
}
Traceparent other = (Traceparent)obj;
return Objects.equals(version, other.version)
&& Objects.equals(traceId, other.traceId)
&& Objects.equals(parentId, other.parentId)
&& Objects.equals(traceFlags, other.traceFlags);
}
@Override
public int hashCode(){
return Objects.hash(version, traceId, parentId, traceFlags);
}
public static Optional<Traceparent> parse(String traceparentStr){
if(traceparentStr == null || traceparentStr.isEmpty()){
return Optional.empty();
}else if(traceparentStr.length() < MIN_CHARS_TRACEPARENT){
return Optional.empty();
}else if(!TRACEPARENT_PATTERN.matcher(traceparentStr).matches()){
return Optional.empty();
}
String[] tokens = traceparentStr.split(Traceparent.TRACEPARENT_DELIMITER);
if(!Traceparent.CURRENT_VERSION.equals(tokens[0])){
return Optional.empty();
}
return Optional.of(new Traceparent(tokens[1], tokens[2], tokens[3]));
}
}
| hotpads/datarouter | datarouter-instrumentation/src/main/java/io/datarouter/instrumentation/trace/Traceparent.java | Java | apache-2.0 | 4,663 |
package com.bzu.yhd.pocketcampus.bottomnav.user.view;
import android.content.Context;
import android.util.AttributeSet;
import com.facebook.rebound.SimpleSpringListener;
import com.facebook.rebound.Spring;
import com.facebook.rebound.SpringSystem;
import de.hdodenhof.circleimageview.CircleImageView;
/**
* Created by xmuSistone.
*/
public class AnimateImageView extends CircleImageView {
private Spring springX, springY;
private SimpleSpringListener followerListenerX, followerListenerY; // 此为跟踪的回调,当前面一个view移动的时候,此为后面的view,需要更新endValue
public AnimateImageView(Context context) {
this(context, null);
}
public AnimateImageView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public AnimateImageView(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
SpringSystem mSpringSystem = SpringSystem.create();
springX = mSpringSystem.createSpring();
springY = mSpringSystem.createSpring();
springX.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
setScreenX(xPos);
}
});
springY.addListener(new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
setScreenY(yPos);
}
});
followerListenerX = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int xPos = (int) spring.getCurrentValue();
springX.setEndValue(xPos);
}
};
followerListenerY = new SimpleSpringListener() {
@Override
public void onSpringUpdate(Spring spring) {
int yPos = (int) spring.getCurrentValue();
springY.setEndValue(yPos);
}
};
}
private void setScreenX(int screenX) {
this.offsetLeftAndRight(screenX - getLeft());
}
private void setScreenY(int screenY) {
this.offsetTopAndBottom(screenY - getTop());
}
public void animTo(int xPos, int yPos) {
springX.setEndValue(xPos);
springY.setEndValue(yPos);
}
/**
* 顶部ImageView强行停止动画
*/
public void stopAnimation() {
springX.setAtRest();
springY.setAtRest();
}
/**
* 只为最顶部的view调用,触点松开后,回归原点
*/
public void onRelease(int xPos, int yPos) {
setCurrentSpringPos(getLeft(), getTop());
animTo(xPos, yPos);
}
/**
* 设置当前spring位置
*/
public void setCurrentSpringPos(int xPos, int yPos) {
springX.setCurrentValue(xPos);
springY.setCurrentValue(yPos);
}
public Spring getSpringX() {
return springX;
}
public Spring getSpringY() {
return springY;
}
public SimpleSpringListener getFollowerListenerX() {
return followerListenerX;
}
public SimpleSpringListener getFollowerListenerY() {
return followerListenerY;
}
}
| GolvenH/PocketCampus | app/src/main/java/com/bzu/yhd/pocketcampus/bottomnav/user/view/AnimateImageView.java | Java | apache-2.0 | 3,350 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import static com.amazonaws.util.StringUtils.UTF8;
import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.regex.Pattern;
import com.amazonaws.AmazonClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.BinaryUtils;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.StringInputStream;
import com.amazonaws.util.json.*;
/**
* DeleteAssociationRequest Marshaller
*/
public class DeleteAssociationRequestMarshaller implements
Marshaller<Request<DeleteAssociationRequest>, DeleteAssociationRequest> {
public Request<DeleteAssociationRequest> marshall(
DeleteAssociationRequest deleteAssociationRequest) {
if (deleteAssociationRequest == null) {
throw new AmazonClientException(
"Invalid argument passed to marshall(...)");
}
Request<DeleteAssociationRequest> request = new DefaultRequest<DeleteAssociationRequest>(
deleteAssociationRequest, "AWSSimpleSystemsManagement");
request.addHeader("X-Amz-Target", "AmazonSSM.DeleteAssociation");
request.setHttpMethod(HttpMethodName.POST);
request.setResourcePath("");
try {
StringWriter stringWriter = new StringWriter();
JSONWriter jsonWriter = new JSONWriter(stringWriter);
jsonWriter.object();
if (deleteAssociationRequest.getName() != null) {
jsonWriter.key("Name")
.value(deleteAssociationRequest.getName());
}
if (deleteAssociationRequest.getInstanceId() != null) {
jsonWriter.key("InstanceId").value(
deleteAssociationRequest.getInstanceId());
}
jsonWriter.endObject();
String snippet = stringWriter.toString();
byte[] content = snippet.getBytes(UTF8);
request.setContent(new StringInputStream(snippet));
request.addHeader("Content-Length",
Integer.toString(content.length));
request.addHeader("Content-Type", "application/x-amz-json-1.1");
} catch (Throwable t) {
throw new AmazonClientException(
"Unable to marshall request to JSON: " + t.getMessage(), t);
}
return request;
}
}
| trasa/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DeleteAssociationRequestMarshaller.java | Java | apache-2.0 | 3,452 |
package com.qmx.wxmp.common.web;
/**
* 带UTF-8 charset 定义的MediaType.
*
* Jax-RS和Spring的MediaType没有UTF-8的版本,
* Google的MediaType必须再调用toString()函数而不是常量,不能用于Restful方法的annotation。
*
* @author free lance
*/
public class MediaTypes {
public static final String APPLICATION_XML = "application/xml";
public static final String APPLICATION_XML_UTF_8 = "application/xml; charset=UTF-8";
public static final String JSON = "application/json";
public static final String JSON_UTF_8 = "application/json; charset=UTF-8";
public static final String JAVASCRIPT = "application/javascript";
public static final String JAVASCRIPT_UTF_8 = "application/javascript; charset=UTF-8";
public static final String APPLICATION_XHTML_XML = "application/xhtml+xml";
public static final String APPLICATION_XHTML_XML_UTF_8 = "application/xhtml+xml; charset=UTF-8";
public static final String TEXT_PLAIN = "text/plain";
public static final String TEXT_PLAIN_UTF_8 = "text/plain; charset=UTF-8";
public static final String TEXT_XML = "text/xml";
public static final String TEXT_XML_UTF_8 = "text/xml; charset=UTF-8";
public static final String TEXT_HTML = "text/html";
public static final String TEXT_HTML_UTF_8 = "text/html; charset=UTF-8";
}
| lingyi2017/wxmp | src/main/java/com/qmx/wxmp/common/web/MediaTypes.java | Java | apache-2.0 | 1,306 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.directory.fortress.core.samples;
import org.apache.directory.fortress.core.DelAdminMgr;
import org.apache.directory.fortress.core.DelAdminMgrFactory;
import org.apache.directory.fortress.core.SecurityException;
import org.apache.directory.fortress.core.model.OrgUnit;
import org.apache.directory.fortress.core.impl.TestUtils;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* CreateUserOrgHierarchySample JUnit Test. This test program will show how to build a simple User OrgUnit hierarchy which are
* used to enable administrators to group Users by organizational structure. This system supports multiple
* inheritance between OrgUnits and there are no limits on how deep a hierarchy can be. The OrgUnits require name and type. Optionally can
* include a description. The User OrgUnit must be associated with Users and are used to provide Administratrive RBAC control
* over who may perform User Role assigns and deassigns in directory.
* @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
*/
public class CreateUserOrgHierarchySample extends TestCase
{
private static final String CLS_NM = CreateUserOrgHierarchySample.class.getName();
private static final Logger LOG = LoggerFactory.getLogger( CLS_NM );
// This constant will be added to index for creation of multiple nodes in directory.
public static final String TEST_HIER_USERORG_PREFIX = "sampleHierUserOrg";
public static final String TEST_HIER_BASE_USERORG = "sampleHierUserOrg1";
public static final int TEST_NUMBER = 6;
public static final String TEST_HIER_DESC_USERORG_PREFIX = "sampleHierUserOrgD";
public static final String TEST_HIER_ASC_USERORG_PREFIX = "sampleHierUserOrgA";
/**
* Simple constructor kicks off JUnit test suite.
* @param name
*/
public CreateUserOrgHierarchySample(String name)
{
super(name);
}
/**
* Run the User OrgUnit test cases.
*
* @return Test
*/
public static Test suite()
{
TestSuite suite = new TestSuite();
if(!AllSamplesJUnitTest.isFirstRun())
{
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
}
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
/*
suite.addTest(new CreateUserOrgHierarchySample("testDeleteHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateHierUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateDescendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testDeleteAscendantUserOrgs"));
suite.addTest(new CreateUserOrgHierarchySample("testCreateAscendantUserOrgs"));
*/
return suite;
}
/**
* Remove the simple hierarchical OrgUnits from the directory. Before removal call the API to move the relationship
* between the parent and child OrgUnits. Once the relationship is removed the parent OrgUnit can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testDeleteHierUserOrgs()
{
String szLocation = ".testDeleteHierUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
for (int i = 1; i < TEST_NUMBER; i++)
{
// The key that must be set to locate any OrgUnit is simply the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(new OrgUnit(TEST_HIER_USERORG_PREFIX + TEST_NUMBER, OrgUnit.Type.USER));
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Add a simple OrgUnit hierarchy to ldap. The OrgUnits will named to include a name,'sampleHierUserOrg', appended with the
* sequence of 1 - 6. 'sampleHierUserOrg1' is the root or highest level OrgUnit in the structure while sampleHierUserOrg6 is the lowest
* most child. Fortress OrgUnits may have multiple parents which is demonstrated in testCreateAscendantUserOrgs sample.
* <p>
* <img src="./doc-files/HierUserOrgSimple.png" alt="">
*/
public static void testCreateHierUserOrgs()
{
String szLocation = ".testCreateHierUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the root OrgUnit entity. OrgUnit requires name and type before addition.
OrgUnit baseOrgUnit = new OrgUnit(TEST_HIER_BASE_USERORG, OrgUnit.Type.USER);
// Add the root OrgUnit entity to the directory.
delAdminMgr.add(baseOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrg2' - 'sampleHierUserOrg6'.
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the OrgUnit entity.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Add the OrgUnit entity to the directory.
delAdminMgr.add(childOrgUnit);
// Instantiate the parent OrgUnit. The key is the name and type.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_USERORG_PREFIX + (i - 1), OrgUnit.Type.USER);
// Add a relationship between the parent and child OrgUnits:
delAdminMgr.addInheritance(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate teardown of a parent to child relationship of one-to-many. Each child must first remove the inheritance
* relationship with parent before being removed from ldap. The parent OrgUnit will be removed from ldap last.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testDeleteDescendantUserOrgs()
{
String szLocation = ".testDeleteDescendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This parent has many children. They must be deleted before parent itself can.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// There are N User OrgUnits to process:
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the child OrgUnit entity. The key is the name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship from the parent and child OrgUnit:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
}
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a parent to child OrgUnit structure of one-to-many. The parent OrgUnit must be created before
* the call to addDescendant which will Add a new OrgUnit node and set a OrgUnit relationship with parent node.
* <p>
* <img src="./doc-files/HierUserOrgDescendants.png" alt="">
*/
public static void testCreateDescendantUserOrgs()
{
String szLocation = ".testCreateDescendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the parent User OrgUnit entity. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This parent will have many children:
delAdminMgr.add(parentOrgUnit);
// Create User OrgUnits, 'sampleHierUserOrgD2' - 'sampleHierUserOrgD6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Now add relationship to the directory between parent and child User OrgUnits.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_DESC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add child OrgUnit entity to directory and add relationship with existing parent OrgUnit.
delAdminMgr.addDescendant(parentOrgUnit, childOrgUnit);
}
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* This example demonstrates tear down of a child to parent represented as one-to-many. The parents must all
* be removed from the child before the child can be removed.
* User OrgUnit removal is not allowed (SecurityException will be thrown) if ou is assigned to Users in ldap.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testDeleteAscendantUserOrgs()
{
String szLocation = ".testDeleteAscendantUserOrgs";
if(AllSamplesJUnitTest.isFirstRun())
{
return;
}
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// This child OrgUnit has many parents:
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
for (int i = 2; i < TEST_NUMBER + 1; i++)
{
// Instantiate the parent. This needs a name and type before it can be used in operation.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + i, OrgUnit.Type.USER);
// Remove the relationship between parent and child OrgUnits:
delAdminMgr.deleteInheritance(parentOrgUnit, childOrgUnit);
// Remove the parent OrgUnit from directory:
delAdminMgr.delete(parentOrgUnit);
}
// Remove the child OrgUnit from directory:
delAdminMgr.delete(childOrgUnit);
LOG.info(szLocation + " success");
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
/**
* Demonstrate a child to parent OrgUnit structure of one-to-many. To use this API, the child OrgUnit must be created before
* the call to addAscendant which will Add a new OrgUnit node and set a OrgUnit relationship with child node.
* <p>
* <img src="./doc-files/HierUserOrgAscendants.png" alt="">
*/
public static void testCreateAscendantUserOrgs()
{
String szLocation = ".testCreateAscendantUserOrgs";
try
{
// Instantiate the DelAdminMgr implementation which is used to provision ARBAC policies.
DelAdminMgr delAdminMgr = DelAdminMgrFactory.createInstance(TestUtils.getContext());
// Instantiate the child OrgUnit. This needs a name and type.
OrgUnit childOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + 1, OrgUnit.Type.USER);
// This child will have many parents:
delAdminMgr.add(childOrgUnit);
// Create OrgUnits, 'sampleHierUserOrgA2' - 'sampleHierUserOrgA6'.
for (int i = 1; i < TEST_NUMBER; i++)
{
// Instantiate the parent OrgUnit. This needs a name and type before it can be added to ldap.
OrgUnit parentOrgUnit = new OrgUnit(TEST_HIER_ASC_USERORG_PREFIX + (i + 1), OrgUnit.Type.USER);
// Now add parent OrgUnit entity to directory and add relationship with existing child OrgUnit.
delAdminMgr.addAscendant(childOrgUnit, parentOrgUnit);
}
}
catch (SecurityException ex)
{
LOG.error(szLocation + " caught SecurityException rc=" + ex.getErrorId() + ", msg=" + ex.getMessage(), ex);
fail(ex.getMessage());
}
}
}
| PennState/directory-fortress-core-1 | src/test/java/org/apache/directory/fortress/core/samples/CreateUserOrgHierarchySample.java | Java | apache-2.0 | 15,806 |
//
// Copyright (c) 2014 Limit Point Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tools.viewer.user;
import tools.viewer.common.*;
import tools.viewer.render.*;
import tools.common.gui.*;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import javax.swing.*;
import javax.swing.event.*;
import javax.swing.border.*;
import java.text.*;
import vtk.*;
/**
* Implementation of <code>G3DFieldActorPropertiesPanel</code> for editing the
* values of a <code>HedgeHogFieldActorDescriptor</code>.
*/
public class HedgeHogFieldActorPropertiesPanel
extends G3DFieldActorPropertiesPanel
{
// CONSTANTS FACET
protected static final String[] VECTOR_MODES =
{ ViewerConstants.VECTOR_MAGNITUDE,
ViewerConstants.VECTOR_NORMAL };
// GUI FACET
protected JPanel hedgeHogPanel;
protected JSpinner scaleFactorSpinner;
protected JComboBox vectorModeComboBox;
// CONSTRUCTORS
/**
* Constructor
*/
public HedgeHogFieldActorPropertiesPanel(G3DViewer xviewer,
FieldActorDescriptor[] xdescriptors)
{
super(xviewer, xdescriptors);
hedgeHogPanel = createHedgeHogPanel();
tabbedPane.addTab("Hedge Hog", hedgeHogPanel);
initValues();
}
// CREATE FACET
/**
* Create hedge hog panel
*/
protected JPanel createHedgeHogPanel()
{
JPanel result = new JPanel();
result.setLayout(new BoxLayout(result, BoxLayout.PAGE_AXIS));
result.setBorder(
BorderFactory.createCompoundBorder(
BorderFactory.createEmptyBorder(6, 12, 6, 12),
BorderFactory.createTitledBorder("Hedge Hog:")));
//=====
result.add(Box.createVerticalGlue());
JPanel panel = new JPanel();
JLabel scaleFactorLabel = new JLabel("Scale Factor: ", JLabel.RIGHT);
scaleFactorLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
SpinnerModel scaleFactorModel = new SpinnerNumberModel(1.0, 0.0,
10000000.0, 0.01);
scaleFactorSpinner = new JSpinner(scaleFactorModel);
panel.add(scaleFactorLabel);
panel.add(scaleFactorSpinner);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
panel = new JPanel();
JLabel vectorModeLabel = new JLabel("Vector Mode:", JLabel.RIGHT);
vectorModeLabel.setAlignmentX(Component.CENTER_ALIGNMENT);
vectorModeComboBox = new JComboBox(VECTOR_MODES);
panel.add(vectorModeLabel);
panel.add(vectorModeComboBox);
result.add(panel);
result.add(Box.createVerticalGlue());
//=====
return result;
}
// INITIALIZE FACET
/**
*
*/
public void initValues()
{
super.initValues();
// Use the first actor in the list to initialize the
// user interface.
HedgeHogFieldActorDescriptor actor =
(HedgeHogFieldActorDescriptor) descriptors[0];
initHedgeHogPanel(actor);
}
/**
*
*/
protected void initHedgeHogPanel(HedgeHogFieldActorDescriptor actor)
{
scaleFactorSpinner.setValue(actor.scaleFactor);
vectorModeComboBox.setSelectedItem(actor.vectorMode);
}
// APPLY FACET
/**
*
*/
public void doApply()
{
// Set the wait state to true, it is restored by
// UpdatePropertiesPanelEvent.
setWaitState(true);
synchronized (viewer.getScript())
{
synchronized (viewer.getScene())
{
// Apply the changed to the descriptors
HedgeHogFieldActorDescriptor actor;
for(int i=0; i<descriptors.length; i++)
{
actor = (HedgeHogFieldActorDescriptor) descriptors[i];
applyHedgeHog(actor);
}
}
}
super.doApply(false);
}
/**
*
*/
public void applyHedgeHog(HedgeHogFieldActorDescriptor actor)
{
actor.scaleFactor = ((SpinnerNumberModel)scaleFactorSpinner.getModel()).getNumber().doubleValue();
actor.vectorMode = (String) vectorModeComboBox.getSelectedItem();
}
}
| LimitPointSystems/SheafSystem | tools/viewer/user/HedgeHogFieldActorPropertiesPanel.java | Java | apache-2.0 | 4,423 |
package cn.oeaom.CoolWeather;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Typeface;
import android.media.Image;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import java.io.IOException;
import cn.oeaom.CoolWeather.GSON.Weather;
import cn.oeaom.CoolWeather.Util.Utility;
import okhttp3.Call;
import okhttp3.Callback;
import cn.oeaom.CoolWeather.Util.HttpUtil;
import okhttp3.Response;
public class WeatherActivity extends AppCompatActivity {
private static final String TAG = "WeatherActivity";
private static final String API_KEY = "bc0418b57b2d4918819d3974ac1285d9";
//鉴权码
//天气信息面板所要展现的东西
public DrawerLayout drawerLayout; //左侧滑动和点击小房子展现的界面
//public TextView tvTitle; //标题 *弃用
private TextView weatherTime; //天气信息的时间
private TextView weatherDegree; //天气信息的温度值
private TextView measure2; //天气信息的温度单位
private TextView weatherPlace; //天气信息的地点
private TextView weatherType; //天气信息的类型
private String mWeatherId; //城市的编号
private ImageView weatherStat;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_weather);
//Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
//setSupportActionBar(toolbar);
// FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab);
// fab.setOnClickListener(new View.OnClickListener() {
// @Override
// public void onClick(View view) {
// Snackbar.make(view, "Replace with your own action", Snackbar.LENGTH_LONG)
// .setAction("Action", null).show();
// }
// });
Typeface fontFace = Typeface.createFromAsset(getAssets(), "fonts/AndroidClock.ttf");
// 字体文件必须是true type font的格式(ttf);
// 当使用外部字体却又发现字体没有变化的时候(以 Droid Sans代替),通常是因为
// 这个字体android没有支持,而非你的程序发生了错误
weatherTime = (TextView)findViewById(R.id.weather_info_time);
weatherTime.setTypeface(fontFace);
//
weatherDegree = (TextView)findViewById(R.id.degree_value);
weatherDegree.setTypeface(fontFace);
TextView measure = (TextView)findViewById(R.id.degree_measure);
// measure.setTypeface(fontFace);
measure2 = (TextView)findViewById(R.id.degree_measure2);
//measure2.setTypeface(fontFace);
weatherPlace = (TextView)findViewById(R.id.weather_info_place);
//weatherPlace.setTypeface(fontFace);
weatherType = (TextView)findViewById(R.id.weather_info_text);
//weatherType.setTypeface(fontFace);
weatherStat = (ImageView)findViewById(R.id.weatherIcon);
//
// TextView weatherInfo = (TextView)findViewById(R.id.weather_info_text);
//
// weatherInfo.setTypeface(fontFace);
//
//text.setTextSize(50);
Intent intent=getIntent();
//获取这个Intent对象的Extra中对应键的值
String weatherId=intent.getStringExtra("weather_id");
String CountryName = intent.getStringExtra("CountryName");
// tvTitle = (TextView)findViewById(R.id.title_text_weather);
// //tvTitle.setText(weatherId);
// tvTitle.setText(CountryName);
// // tvTitle.setTextSize(60);
// tvTitle.setTypeface(fontFace);
drawerLayout = (DrawerLayout)findViewById(R.id.drawer_layout);
Button btnBack = (Button)findViewById(R.id.btn_home);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
//Intent intent = new Intent(WeatherActivity.this,MainActivity.class);
//startActivity(intent);
// WeatherActivity.this.finish();
drawerLayout.openDrawer(GravityCompat.START);
Log.v(TAG,"Clicked nav btn");
}
});
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this);
String weatherString = prefs.getString("weather", null);
if (weatherString != null) {
// 有缓存时直接解析天气数据
Weather weather = Utility.handleWeatherResponse(weatherString);
mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
// 无缓存时去服务器查询天气
mWeatherId = getIntent().getStringExtra("weather_id");
// weatherLayout.setVisibility(View.INVISIBLE);
requestWeather(mWeatherId);
}
// swipeRefresh.setOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() {
// @Override
// public void onRefresh() {
// requestWeather(mWeatherId);
// }
// });
}
// public void requestWeather(final String weatherId){
// tvTitle.setText(weatherId);
// }
public void requestWeather(final String weatherId) {
String weatherUrl = "http://guolin.tech/api/weather?cityid=" + weatherId + "&key="+API_KEY;
HttpUtil.sendOkHttpRequest(weatherUrl, new Callback() {
@Override
public void onResponse(Call call, Response response) throws IOException {
final String responseText = response.body().string();
Log.v(TAG,"=======================================================================");
Log.v(TAG,responseText);
Log.v(TAG,"=======================================================================");
final Weather weather = Utility.handleWeatherResponse(responseText);
runOnUiThread(new Runnable() {
@Override
public void run() {
if (weather != null && "ok".equals(weather.status)) {
SharedPreferences.Editor editor = PreferenceManager.getDefaultSharedPreferences(WeatherActivity.this).edit();
editor.putString("weather", responseText);
editor.apply();
//mWeatherId = weather.basic.weatherId;
showWeatherInfo(weather);
} else {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
}
//swipeRefresh.setRefreshing(false);
}
});
}
@Override
public void onFailure(Call call, IOException e) {
e.printStackTrace();
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(WeatherActivity.this, "获取天气信息失败", Toast.LENGTH_SHORT).show();
//swipeRefresh.setRefreshing(false);
}
});
}
});
//loadBingPic();
}
private int findWeatherIconByName(String weatherName)
{
switch(weatherName)
{
case "晴":return R.drawable.a044;
case "多云":return R.drawable.a045;
case "少云":return R.drawable.a046;
case "晴间多云":return R.drawable.a047;
case "阴":return R.drawable.a048;
case "有风":return R.drawable.a049;
case "平静":return R.drawable.a050;
case "微风":return R.drawable.a000;
case "和风":return R.drawable.a001;
case "清风":return R.drawable.a002;
case "强风":return R.drawable.a003;
case "劲风":return R.drawable.a003;
case "大风":return R.drawable.a004;
case "烈风":return R.drawable.a005;
case "风暴":return R.drawable.a006;
case "狂爆风":return R.drawable.a007;
case "龙卷风":return R.drawable.a008;
case "热带风暴":return R.drawable.a009;
case "阵雨":return R.drawable.a012;
case "强阵雨":return R.drawable.a013;
case "雷阵雨":return R.drawable.a014;
case "强雷阵雨":return R.drawable.a015;
case "雷阵雨伴有冰雹":return R.drawable.a016;
case "小雨":return R.drawable.a017;
case "中雨":return R.drawable.a018;
case "大雨":return R.drawable.a019;
case "极端降雨":return R.drawable.a020;
case "毛毛雨":return R.drawable.a021;
case "细雨":return R.drawable.a021;
case "暴雨":return R.drawable.a022;
case "大暴雨":return R.drawable.a023;
case "特大暴雨":return R.drawable.a024;
case "冻雨":return R.drawable.a025;
case "小雪":return R.drawable.a026;
case "中雪":return R.drawable.a027;
case "大雪":return R.drawable.a028;
case "暴雪":return R.drawable.a029;
case "雨夹雪":return R.drawable.a030;
case "雨雪天气":return R.drawable.a031;
case "阵雨夹雪":return R.drawable.a032;
case "阵雪":return R.drawable.a033;
case "薄雾":return R.drawable.a034;
case "雾":return R.drawable.a035;
case "霾":return R.drawable.a036;
case "扬沙":return R.drawable.a037;
case "浮尘":return R.drawable.a038;
case "沙尘暴":return R.drawable.a039;
case "热":return R.drawable.a041;
case "冷":return R.drawable.a042;
case "强沙尘暴":return R.drawable.a040;
case "未知":return R.drawable.a043;
default:{
break;
}
}
return -1;
}
private void showWeatherInfo(Weather weather) {
String cityName = weather.basic.cityName;
String updateTime = weather.basic.update.updateTime.split(" ")[1];
String degree = weather.now.temperature;
String weatherInfo = weather.now.more.info;
weatherPlace.setText(cityName);
weatherTime.setText(updateTime);
weatherDegree.setText(degree);
weatherType.setText(weatherInfo);
weatherStat.setImageResource(findWeatherIconByName(weatherInfo));
// forecastLayout.removeAllViews();
// for (Forecast forecast : weather.forecastList) {
// View view = LayoutInflater.from(this).inflate(R.layout.forecast_item, forecastLayout, false);
// TextView dateText = (TextView) view.findViewById(R.id.date_text);
// TextView infoText = (TextView) view.findViewById(R.id.info_text);
// TextView maxText = (TextView) view.findViewById(R.id.max_text);
// TextView minText = (TextView) view.findViewById(R.id.min_text);
// dateText.setText(forecast.date);
// infoText.setText(forecast.more.info);
// maxText.setText(forecast.temperature.max);
// minText.setText(forecast.temperature.min);
// forecastLayout.addView(view);
// }
// if (weather.aqi != null) {
// aqiText.setText(weather.aqi.city.aqi);
// pm25Text.setText(weather.aqi.city.pm25);
// }
// String comfort = "舒适度:" + weather.suggestion.comfort.info;
// String carWash = "洗车指数:" + weather.suggestion.carWash.info;
// String sport = "运行建议:" + weather.suggestion.sport.info;
// comfortText.setText(comfort);
// carWashText.setText(carWash);
// sportText.setText(sport);
// weatherLayout.setVisibility(View.VISIBLE);
// Intent intent = new Intent(this, AutoUpdateService.class);
// startService(intent);
}
}
| applicationsky/MyCoolWeather | app/src/main/java/cn/oeaom/CoolWeather/WeatherActivity.java | Java | apache-2.0 | 12,783 |
package com.structurizr.view;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.structurizr.model.*;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.stream.Collectors;
@JsonIgnoreProperties(ignoreUnknown=true)
public abstract class View implements Comparable<View> {
private SoftwareSystem softwareSystem;
private String softwareSystemId;
private String description = "";
private PaperSize paperSize = PaperSize.A4_Portrait;
private Set<ElementView> elementViews = new LinkedHashSet<>();
View() {
}
public View(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
@JsonIgnore
public Model getModel() {
return softwareSystem.getModel();
}
@JsonIgnore
public SoftwareSystem getSoftwareSystem() {
return softwareSystem;
}
public void setSoftwareSystem(SoftwareSystem softwareSystem) {
this.softwareSystem = softwareSystem;
}
public String getSoftwareSystemId() {
if (this.softwareSystem != null) {
return this.softwareSystem.getId();
} else {
return this.softwareSystemId;
}
}
void setSoftwareSystemId(String softwareSystemId) {
this.softwareSystemId = softwareSystemId;
}
public abstract ViewType getType();
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public PaperSize getPaperSize() {
return paperSize;
}
public void setPaperSize(PaperSize paperSize) {
this.paperSize = paperSize;
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllSoftwareSystems() {
getModel().getSoftwareSystems().forEach(this::addElement);
}
/**
* Adds the given software system to this view.
*
* @param softwareSystem the SoftwareSystem to add
*/
public void addSoftwareSystem(SoftwareSystem softwareSystem) {
addElement(softwareSystem);
}
/**
* Adds all software systems in the model to this view.
*/
public void addAllPeople() {
getModel().getPeople().forEach(this::addElement);
}
/**
* Adds the given person to this view.
*
* @param person the Person to add
*/
public void addPerson(Person person) {
addElement(person);
}
protected void addElement(Element element) {
if (softwareSystem.getModel().contains(element)) {
elementViews.add(new ElementView(element));
}
}
protected void removeElement(Element element) {
ElementView elementView = new ElementView(element);
elementViews.remove(elementView);
}
/**
* Gets the set of elements in this view.
*
* @return a Set of ElementView objects
*/
public Set<ElementView> getElements() {
return elementViews;
}
public Set<RelationshipView> getRelationships() {
Set<Relationship> relationships = new HashSet<>();
Set<Element> elements = getElements().stream()
.map(ElementView::getElement)
.collect(Collectors.toSet());
elements.forEach(b -> relationships.addAll(b.getRelationships()));
return relationships.stream()
.filter(r -> elements.contains(r.getSource()) && elements.contains(r.getDestination()))
.map(RelationshipView::new)
.collect(Collectors.toSet());
}
public void setRelationships(Set<RelationshipView> relationships) {
// do nothing ... this are determined automatically
}
/**
* Removes all elements that have no relationships
* to other elements in this view.
*/
public void removeElementsWithNoRelationships() {
Set<RelationshipView> relationships = getRelationships();
Set<String> elementIds = new HashSet<>();
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getSourceId()));
relationships.forEach(rv -> elementIds.add(rv.getRelationship().getDestinationId()));
elementViews.removeIf(ev -> !elementIds.contains(ev.getId()));
}
public void removeElementsThatCantBeReachedFrom(Element element) {
Set<String> elementIdsToShow = new HashSet<>();
findElementsToShow(element, elementIdsToShow, 1);
elementViews.removeIf(ev -> !elementIdsToShow.contains(ev.getId()));
}
private void findElementsToShow(Element element, Set<String> elementIds, int depth) {
if (elementViews.contains(new ElementView(element))) {
elementIds.add(element.getId());
if (depth < 100) {
element.getRelationships().forEach(r -> findElementsToShow(r.getDestination(), elementIds, depth + 1));
}
}
}
public abstract String getName();
@Override
public int compareTo(View view) {
return getTitle().compareTo(view.getTitle());
}
private String getTitle() {
return getName() + " - " + getDescription();
}
ElementView findElementView(Element element) {
for (ElementView elementView : getElements()) {
if (elementView.getElement().equals(element)) {
return elementView;
}
}
return null;
}
public void copyLayoutInformationFrom(View source) {
this.setPaperSize(source.getPaperSize());
for (ElementView sourceElementView : source.getElements()) {
ElementView destinationElementView = findElementView(sourceElementView.getElement());
if (destinationElementView != null) {
destinationElementView.copyLayoutInformationFrom(sourceElementView);
}
}
}
} | JDriven/structurizr-java | structurizr-core/src/com/structurizr/view/View.java | Java | apache-2.0 | 5,965 |
/*
* Copyright (c) 2016 The original author or authors
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* The Apache License v2.0 is available at
* http://www.opensource.org/licenses/apache2.0.php
*
* You may elect to redistribute this code under either of these licenses.
*/
package io.vertx.ext.consul;
import io.vertx.codegen.annotations.DataObject;
import io.vertx.core.json.JsonObject;
import java.util.List;
/**
* Holds network coordinates of node
*
* @author <a href="mailto:ruslan.sennov@gmail.com">Ruslan Sennov</a>
* @see <a href="https://www.consul.io/docs/internals/coordinates.html">Network coordinates</a>
*/
@DataObject(generateConverter = true)
public class Coordinate {
private String node;
private float adj;
private float err;
private float height;
private List<Float> vec;
/**
* Default constructor
*/
public Coordinate() {}
/**
* Copy constructor
*
* @param coordinate the one to copy
*/
public Coordinate(Coordinate coordinate) {
this.node = coordinate.node;
this.adj = coordinate.adj;
this.err = coordinate.err;
this.height = coordinate.height;
this.vec = coordinate.vec;
}
/**
* Constructor from JSON
*
* @param coordinate the JSON
*/
public Coordinate(JsonObject coordinate) {
CoordinateConverter.fromJson(coordinate, this);
}
/**
* Convert to JSON
*
* @return the JSON
*/
public JsonObject toJson() {
JsonObject jsonObject = new JsonObject();
CoordinateConverter.toJson(this, jsonObject);
return jsonObject;
}
/**
* Get name of node
*
* @return name of node
*/
public String getNode() {
return node;
}
/**
* Get adjustment
*
* @return adjustment
*/
public float getAdj() {
return adj;
}
/**
* Get error
*
* @return error
*/
public float getErr() {
return err;
}
/**
* Get height
*
* @return height
*/
public float getHeight() {
return height;
}
/**
* Get vector
*
* @return vector
*/
public List<Float> getVec() {
return vec;
}
/**
* Set name of node
*
* @param node name of node
* @return reference to this, for fluency
*/
public Coordinate setNode(String node) {
this.node = node;
return this;
}
/**
* Set adjustment
*
* @param adj adjustment
* @return reference to this, for fluency
*/
public Coordinate setAdj(float adj) {
this.adj = adj;
return this;
}
/**
* Set error
*
* @param err error
* @return reference to this, for fluency
*/
public Coordinate setErr(float err) {
this.err = err;
return this;
}
/**
* Set height
*
* @param height height
* @return reference to this, for fluency
*/
public Coordinate setHeight(float height) {
this.height = height;
return this;
}
/**
* Set vector
*
* @param vec vector
* @return reference to this, for fluency
*/
public Coordinate setVec(List<Float> vec) {
this.vec = vec;
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Coordinate that = (Coordinate) o;
if (Float.compare(that.adj, adj) != 0) return false;
if (Float.compare(that.err, err) != 0) return false;
if (Float.compare(that.height, height) != 0) return false;
if (node != null ? !node.equals(that.node) : that.node != null) return false;
return vec != null ? vec.equals(that.vec) : that.vec == null;
}
@Override
public int hashCode() {
int result = node != null ? node.hashCode() : 0;
result = 31 * result + (adj != +0.0f ? Float.floatToIntBits(adj) : 0);
result = 31 * result + (err != +0.0f ? Float.floatToIntBits(err) : 0);
result = 31 * result + (height != +0.0f ? Float.floatToIntBits(height) : 0);
result = 31 * result + (vec != null ? vec.hashCode() : 0);
return result;
}
}
| ruslansennov/vertx-consul-client | src/main/java/io/vertx/ext/consul/Coordinate.java | Java | apache-2.0 | 4,253 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.xml.security.test.dom.transforms.implementations;
import java.io.ByteArrayInputStream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.apache.xml.security.signature.XMLSignatureInput;
import org.apache.xml.security.test.dom.DSNamespaceContext;
import org.apache.xml.security.transforms.Transforms;
import org.apache.xml.security.transforms.implementations.TransformBase64Decode;
import org.apache.xml.security.utils.XMLUtils;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
/**
* Unit test for {@link org.apache.xml.security.transforms.implementations.TransformBase64Decode}
*
* @author Christian Geuer-Pollmann
*/
public class TransformBase64DecodeTest extends org.junit.Assert {
static org.slf4j.Logger log =
org.slf4j.LoggerFactory.getLogger(TransformBase64DecodeTest.class);
static {
org.apache.xml.security.Init.init();
}
@org.junit.Test
public void test1() throws Exception {
// base64 encoded
String s1 =
"VGhlIFVSSSBvZiB0aGUgdHJhbnNmb3JtIGlzIGh0dHA6Ly93d3cudzMub3JnLzIwMDAvMDkveG1s\n"
+ "ZHNpZyNiYXNlNjQ=";
Document doc = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc);
doc.appendChild(t.getElement());
t.addTransform(TransformBase64Decode.implementedTransformURI);
XMLSignatureInput in =
new XMLSignatureInput(new ByteArrayInputStream(s1.getBytes()));
XMLSignatureInput out = t.performTransforms(in);
String result = new String(out.getBytes());
assertTrue(
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
@org.junit.Test
public void test2() throws Exception {
// base64 encoded twice
String s2 =
"VkdobElGVlNTU0J2WmlCMGFHVWdkSEpoYm5ObWIzSnRJR2x6SUdoMGRIQTZMeTkzZDNjdWR6TXVi\n"
+ "M0puTHpJd01EQXZNRGt2ZUcxcwpaSE5wWnlOaVlYTmxOalE9";
Document doc = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc);
doc.appendChild(t.getElement());
t.addTransform(TransformBase64Decode.implementedTransformURI);
XMLSignatureInput in =
new XMLSignatureInput(new ByteArrayInputStream(s2.getBytes()));
XMLSignatureInput out = t.performTransforms(t.performTransforms(in));
String result = new String(out.getBytes());
assertTrue(
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
@org.junit.Test
public void test3() throws Exception {
//J-
String input = ""
+ "<Object xmlns:signature='http://www.w3.org/2000/09/xmldsig#'>\n"
+ "<signature:Base64>\n"
+ "VGhlIFVSSSBvZiB0aGU gdHJhbn<RealText>Nmb 3JtIGlzIG<test/>h0dHA6</RealText>Ly93d3cudzMub3JnLzIwMDAvMDkveG1s\n"
+ "ZHNpZyNiYXNlNjQ=\n"
+ "</signature:Base64>\n"
+ "</Object>\n"
;
//J+
DocumentBuilder db = XMLUtils.createDocumentBuilder(false);
db.setErrorHandler(new org.apache.xml.security.utils.IgnoreAllErrorHandler());
Document doc = db.parse(new ByteArrayInputStream(input.getBytes()));
//XMLUtils.circumventBug2650(doc);
XPathFactory xpf = XPathFactory.newInstance();
XPath xpath = xpf.newXPath();
xpath.setNamespaceContext(new DSNamespaceContext());
String expression = "//ds:Base64";
Node base64Node =
(Node) xpath.evaluate(expression, doc, XPathConstants.NODE);
XMLSignatureInput xmlinput = new XMLSignatureInput(base64Node);
Document doc2 = TransformBase64DecodeTest.createDocument();
Transforms t = new Transforms(doc2);
doc2.appendChild(t.getElement());
t.addTransform(Transforms.TRANSFORM_BASE64_DECODE);
XMLSignatureInput out = t.performTransforms(xmlinput);
String result = new String(out.getBytes());
assertTrue(
"\"" + result + "\"",
result.equals("The URI of the transform is http://www.w3.org/2000/09/xmldsig#base64")
);
}
private static Document createDocument() throws ParserConfigurationException {
DocumentBuilder db = XMLUtils.createDocumentBuilder(false);
Document doc = db.newDocument();
if (doc == null) {
throw new RuntimeException("Could not create a Document");
} else {
log.debug("I could create the Document");
}
return doc;
}
}
| Legostaev/xmlsec-gost | src/test/java/org/apache/xml/security/test/dom/transforms/implementations/TransformBase64DecodeTest.java | Java | apache-2.0 | 5,574 |
/*
* Copyright (C) 2010 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zzn.aeassistant.zxing.decoding;
import android.app.Activity;
import android.content.DialogInterface;
/**
* Simple listener used to exit the app in a few cases.
*
* @author Sean Owen
*/
public final class FinishListener
implements DialogInterface.OnClickListener, DialogInterface.OnCancelListener, Runnable {
private final Activity activityToFinish;
public FinishListener(Activity activityToFinish) {
this.activityToFinish = activityToFinish;
}
@Override
public void onCancel(DialogInterface dialogInterface) {
run();
}
@Override
public void onClick(DialogInterface dialogInterface, int i) {
run();
}
@Override
public void run() {
activityToFinish.finish();
}
}
| ShawnDongAi/AEASSISTANT | AEAssistant/src/com/zzn/aeassistant/zxing/decoding/FinishListener.java | Java | apache-2.0 | 1,311 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $Header:$
*/
package org.apache.beehive.netui.util;
import java.util.Map;
import java.util.List;
import java.lang.reflect.Array;
import org.apache.beehive.netui.util.logging.Logger;
/**
* This class is used by NetUI tags that use parameters.
*/
public class ParamHelper
{
private static final Logger logger = Logger.getInstance(ParamHelper.class);
/**
* Add a new parameter or update an existing parameter's list of values.
* <p/>
* <em>Implementation Note:</em> in the case that a Map was provided for
* the <code>value</code> parameter, the this returns without doing
* anything; in any other case, params is updated (even in
* <code>value</code> is null).
* </p>
* <p/>
* If value is some object (not an array or list), the string
* representation of that object is added as a value for name. If the
* value is a list (or array) of objects, then the string representation
* of each element is added as a value for name. When there are multiple
* values for a name, then an array of Strings is used in Map.
* </p>
*
* @param params an existing Map of names and values to update
* @param name the name of the parameter to add or update
* @param value an item or list of items to put into the map
* @throws IllegalArgumentException in the case that either the params
* <p/>
* or name given was null
*/
public static void addParam(Map params, String name, Object value)
{
if (params == null)
throw new IllegalArgumentException("Parameter map cannot be null");
if (name == null)
throw new IllegalArgumentException("Parameter name cannot be null");
if (value instanceof Map) {
logger.warn(Bundle.getString("Tags_BadParameterType", name));
return;
}
if (value == null)
value = "";
// check to see if we are adding a new element
// or if this is an existing element
Object o = params.get(name);
int length = 0;
if (o != null) {
assert (o instanceof String ||
o instanceof String[]);
if (o.getClass().isArray()) {
length = Array.getLength(o);
}
else {
length++;
}
}
// check how much size the output needs to be
if (value.getClass().isArray()) {
length += Array.getLength(value);
}
else if (value instanceof List) {
length += ((List) value).size();
}
else {
length++;
}
if (length == 0)
return;
//System.err.println("Number of vaues:" + length);
// if there is only a single value push it to the parameter table
if (length == 1) {
if (value.getClass().isArray()) {
Object val = Array.get(value, 0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else if (value instanceof List) {
List list = (List) value;
Object val = list.get(0);
if (val != null)
params.put(name,val.toString());
else
params.put(name,"");
}
else
params.put(name,value.toString());
return;
}
// allocate the string for the multiple values
String[] values = new String[length];
int offset = 0;
// if we had old values, push them to the new array
if (o != null) {
if (o.getClass().isArray()) {
String[] obs = (String[]) o;
for (;offset<obs.length;offset++) {
values[offset] = obs[offset];
}
}
else {
values[0] = o.toString();
offset = 1;
}
}
// now move the new values to the array starting at the offset
// position
if (value.getClass().isArray())
{
//need to convert this array into a String[]
int size = Array.getLength(value);
for (int i=0; i < size; i++)
{
Object val = Array.get(value, i);
if (val != null)
values[i+offset] = val.toString();
else
values[i+offset] = "";
}
}
else if (value instanceof List)
{
List list = (List) value;
int size = list.size();
for (int i=0; i < size; i++)
{
if (list.get(i) != null)
values[i+offset] = list.get(i).toString();
else
values[i+offset] = "";
}
}
else {
values[offset] = value.toString();
}
// store the new values array
params.put(name, values);
}
}
| moparisthebest/beehive | beehive-netui-core/src/main/java/org/apache/beehive/netui/util/ParamHelper.java | Java | apache-2.0 | 5,988 |
package org.jboss.resteasy.spi;
import org.jboss.resteasy.specimpl.MultivaluedMapImpl;
import org.jboss.resteasy.specimpl.PathSegmentImpl;
import org.jboss.resteasy.specimpl.ResteasyUriBuilder;
import org.jboss.resteasy.util.Encode;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.PathSegment;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
/**
* UriInfo implementation with some added extra methods to help process requests
*
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
public class ResteasyUriInfo implements UriInfo
{
private String path;
private String encodedPath;
private String matchingPath;
private MultivaluedMap<String, String> queryParameters;
private MultivaluedMap<String, String> encodedQueryParameters;
private MultivaluedMap<String, String> pathParameters;
private MultivaluedMap<String, String> encodedPathParameters;
private MultivaluedMap<String, PathSegment[]> pathParameterPathSegments;
private MultivaluedMap<String, PathSegment[]> encodedPathParameterPathSegments;
private List<PathSegment> pathSegments;
private List<PathSegment> encodedPathSegments;
private URI absolutePath;
private URI requestURI;
private URI baseURI;
private List<String> matchedUris;
private List<String> encodedMatchedUris;
private List<String> encodedMatchedPaths = new ArrayList<String>();
private List<Object> ancestors;
public ResteasyUriInfo(URI base, URI relative)
{
String b = base.toString();
if (!b.endsWith("/")) b += "/";
String r = relative.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = relative.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + relative.getPath();
}
requestURI = UriBuilder.fromUri(base).path(relative.getRawPath()).replaceQuery(relative.getRawQuery()).build();
baseURI = base;
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
public ResteasyUriInfo(URI requestURI)
{
String r = requestURI.getRawPath();
if (r.startsWith("/"))
{
encodedPath = r;
path = requestURI.getPath();
}
else
{
encodedPath = "/" + r;
path = "/" + requestURI.getPath();
}
this.requestURI = requestURI;
baseURI = UriBuilder.fromUri(requestURI).replacePath("").build();
encodedPathSegments = PathSegmentImpl.parseSegments(encodedPath, false);
this.pathSegments = new ArrayList<PathSegment>(encodedPathSegments.size());
for (PathSegment segment : encodedPathSegments)
{
pathSegments.add(new PathSegmentImpl(((PathSegmentImpl) segment).getOriginal(), true));
}
extractParameters(requestURI.getRawQuery());
extractMatchingPath(encodedPathSegments);
absolutePath = UriBuilder.fromUri(requestURI).replaceQuery(null).build();
}
/**
* matching path without matrix parameters
*
* @param encodedPathSegments
*/
protected void extractMatchingPath(List<PathSegment> encodedPathSegments)
{
StringBuilder preprocessedPath = new StringBuilder();
for (PathSegment pathSegment : encodedPathSegments)
{
preprocessedPath.append("/").append(pathSegment.getPath());
}
matchingPath = preprocessedPath.toString();
}
/**
* Encoded path without matrix parameters
*
* @return
*/
public String getMatchingPath()
{
return matchingPath;
}
/**
* Create a UriInfo from the baseURI
*
* @param relative
* @return
*/
public ResteasyUriInfo setRequestUri(URI relative)
{
String rel = relative.toString();
if (rel.startsWith(baseURI.toString()))
{
relative = URI.create(rel.substring(baseURI.toString().length()));
}
return new ResteasyUriInfo(baseURI, relative);
}
public String getPath()
{
return path;
}
public String getPath(boolean decode)
{
if (decode) return getPath();
return encodedPath;
}
public List<PathSegment> getPathSegments()
{
return pathSegments;
}
public List<PathSegment> getPathSegments(boolean decode)
{
if (decode) return getPathSegments();
return encodedPathSegments;
}
public URI getRequestUri()
{
return requestURI;
}
public UriBuilder getRequestUriBuilder()
{
return UriBuilder.fromUri(requestURI);
}
public URI getAbsolutePath()
{
return absolutePath;
}
public UriBuilder getAbsolutePathBuilder()
{
return UriBuilder.fromUri(absolutePath);
}
public URI getBaseUri()
{
return baseURI;
}
public UriBuilder getBaseUriBuilder()
{
return UriBuilder.fromUri(baseURI);
}
public MultivaluedMap<String, String> getPathParameters()
{
if (pathParameters == null)
{
pathParameters = new MultivaluedMapImpl<String, String>();
}
return pathParameters;
}
public void addEncodedPathParameter(String name, String value)
{
getEncodedPathParameters().add(name, value);
String value1 = Encode.decodePath(value);
getPathParameters().add(name, value1);
}
private MultivaluedMap<String, String> getEncodedPathParameters()
{
if (encodedPathParameters == null)
{
encodedPathParameters = new MultivaluedMapImpl<String, String>();
}
return encodedPathParameters;
}
public MultivaluedMap<String, PathSegment[]> getEncodedPathParameterPathSegments()
{
if (encodedPathParameterPathSegments == null)
{
encodedPathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return encodedPathParameterPathSegments;
}
public MultivaluedMap<String, PathSegment[]> getPathParameterPathSegments()
{
if (pathParameterPathSegments == null)
{
pathParameterPathSegments = new MultivaluedMapImpl<String, PathSegment[]>();
}
return pathParameterPathSegments;
}
public MultivaluedMap<String, String> getPathParameters(boolean decode)
{
if (decode) return getPathParameters();
return getEncodedPathParameters();
}
public MultivaluedMap<String, String> getQueryParameters()
{
if (queryParameters == null)
{
queryParameters = new MultivaluedMapImpl<String, String>();
}
return queryParameters;
}
protected MultivaluedMap<String, String> getEncodedQueryParameters()
{
if (encodedQueryParameters == null)
{
this.encodedQueryParameters = new MultivaluedMapImpl<String, String>();
}
return encodedQueryParameters;
}
public MultivaluedMap<String, String> getQueryParameters(boolean decode)
{
if (decode) return getQueryParameters();
else return getEncodedQueryParameters();
}
protected void extractParameters(String queryString)
{
if (queryString == null || queryString.equals("")) return;
String[] params = queryString.split("&");
for (String param : params)
{
if (param.indexOf('=') >= 0)
{
String[] nv = param.split("=", 2);
try
{
String name = URLDecoder.decode(nv[0], "UTF-8");
String val = nv.length > 1 ? nv[1] : "";
getEncodedQueryParameters().add(name, val);
getQueryParameters().add(name, URLDecoder.decode(val, "UTF-8"));
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
else
{
try
{
String name = URLDecoder.decode(param, "UTF-8");
getEncodedQueryParameters().add(name, "");
getQueryParameters().add(name, "");
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException(e);
}
}
}
}
public List<String> getMatchedURIs(boolean decode)
{
if (decode)
{
if (matchedUris == null) matchedUris = new ArrayList<String>();
return matchedUris;
}
else
{
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
return encodedMatchedUris;
}
}
public List<String> getMatchedURIs()
{
return getMatchedURIs(true);
}
public List<Object> getMatchedResources()
{
if (ancestors == null) ancestors = new ArrayList<Object>();
return ancestors;
}
public void pushCurrentResource(Object resource)
{
if (ancestors == null) ancestors = new ArrayList<Object>();
ancestors.add(0, resource);
}
public void pushMatchedPath(String encoded)
{
encodedMatchedPaths.add(0, encoded);
}
public List<String> getEncodedMatchedPaths()
{
return encodedMatchedPaths;
}
public void popMatchedPath()
{
encodedMatchedPaths.remove(0);
}
public void pushMatchedURI(String encoded)
{
if (encoded.endsWith("/")) encoded = encoded.substring(0, encoded.length() - 1);
if (encoded.startsWith("/")) encoded = encoded.substring(1);
String decoded = Encode.decode(encoded);
if (encodedMatchedUris == null) encodedMatchedUris = new ArrayList<String>();
encodedMatchedUris.add(0, encoded);
if (matchedUris == null) matchedUris = new ArrayList<String>();
matchedUris.add(0, decoded);
}
@Override
public URI resolve(URI uri)
{
return getBaseUri().resolve(uri);
}
@Override
public URI relativize(URI uri)
{
URI from = getRequestUri();
URI to = uri;
if (uri.getScheme() == null && uri.getHost() == null)
{
to = getBaseUriBuilder().replaceQuery(null).path(uri.getPath()).replaceQuery(uri.getQuery()).fragment(uri.getFragment()).build();
}
return ResteasyUriBuilder.relativize(from, to);
}
}
| raphaelning/resteasy-client-android | jaxrs/resteasy-jaxrs/src/main/java/org/jboss/resteasy/spi/ResteasyUriInfo.java | Java | apache-2.0 | 10,699 |
package com.cabinetms.client;
import java.util.List;
import com.google.common.collect.Lists;
public class TacticMediaCommand {
private String command; // 指令
private String clientIp; // 终端IP地址
private String destination; // 终端队列地址
private Integer startDate;// 策略开始日期
private Integer endDate;// 策略结束日期
private List<TacticDetailMediaCommand> detailList = Lists.newLinkedList();
public List<TacticDetailMediaCommand> getDetailList() {
return detailList;
}
public void setDetailList(List<TacticDetailMediaCommand> detailList) {
this.detailList = detailList;
}
public String getCommand() {
return command;
}
public void setCommand(String command) {
this.command = command;
}
public String getClientIp() {
return clientIp;
}
public void setClientIp(String clientIp) {
this.clientIp = clientIp;
}
public String getDestination() {
return destination;
}
public void setDestination(String destination) {
this.destination = destination;
}
public Integer getStartDate() {
return startDate;
}
public void setStartDate(Integer startDate) {
this.startDate = startDate;
}
public Integer getEndDate() {
return endDate;
}
public void setEndDate(Integer endDate) {
this.endDate = endDate;
}
}
| wangning82/CabinetMS | src/main/java/com/cabinetms/client/TacticMediaCommand.java | Java | apache-2.0 | 1,292 |
package com.jason.showcase.lambdas;
/**
* Created by Qinjianf on 2016/7/19.
*/
public class Lambda {
public void execute(Action action) {
action.run("Hello Lambda!");
}
public void test() {
execute(System.out::println);
}
public static void main(String[] args) {
new Lambda().test();
}
}
| fuyongde/jason | showcase/src/main/java/com/jason/showcase/lambdas/Lambda.java | Java | apache-2.0 | 342 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.action.hadoop;
import com.google.common.annotations.VisibleForTesting;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang.StringUtils;
import org.apache.directory.api.util.Strings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
import static org.apache.oozie.action.hadoop.SparkActionExecutor.SPARK_DEFAULT_OPTS;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "Properties file should be specified by user")
class SparkArgsExtractor {
private static final Pattern SPARK_DEFAULTS_FILE_PATTERN = Pattern.compile("spark-defaults.conf");
private static final String FILES_OPTION = "--files";
private static final String ARCHIVES_OPTION = "--archives";
private static final String LOG4J_CONFIGURATION_JAVA_OPTION = "-Dlog4j.configuration=";
private static final String SECURITY_TOKENS_HADOOPFS = "spark.yarn.security.tokens.hadoopfs.enabled";
private static final String SECURITY_TOKENS_HIVE = "spark.yarn.security.tokens.hive.enabled";
private static final String SECURITY_TOKENS_HBASE = "spark.yarn.security.tokens.hbase.enabled";
private static final String SECURITY_CREDENTIALS_HADOOPFS = "spark.yarn.security.credentials.hadoopfs.enabled";
private static final String SECURITY_CREDENTIALS_HIVE = "spark.yarn.security.credentials.hive.enabled";
private static final String SECURITY_CREDENTIALS_HBASE = "spark.yarn.security.credentials.hbase.enabled";
private static final String PWD = "$PWD" + File.separator + "*";
private static final String MASTER_OPTION = "--master";
private static final String MODE_OPTION = "--deploy-mode";
private static final String JOB_NAME_OPTION = "--name";
private static final String CLASS_NAME_OPTION = "--class";
private static final String VERBOSE_OPTION = "--verbose";
private static final String DRIVER_CLASSPATH_OPTION = "--driver-class-path";
private static final String EXECUTOR_CLASSPATH = "spark.executor.extraClassPath=";
private static final String DRIVER_CLASSPATH = "spark.driver.extraClassPath=";
private static final String EXECUTOR_EXTRA_JAVA_OPTIONS = "spark.executor.extraJavaOptions=";
private static final String DRIVER_EXTRA_JAVA_OPTIONS = "spark.driver.extraJavaOptions=";
private static final Pattern SPARK_VERSION_1 = Pattern.compile("^1.*");
private static final String SPARK_YARN_JAR = "spark.yarn.jar";
private static final String SPARK_YARN_JARS = "spark.yarn.jars";
private static final String OPT_SEPARATOR = "=";
private static final String OPT_VALUE_SEPARATOR = ",";
private static final String CONF_OPTION = "--conf";
private static final String MASTER_OPTION_YARN_CLUSTER = "yarn-cluster";
private static final String MASTER_OPTION_YARN_CLIENT = "yarn-client";
private static final String MASTER_OPTION_YARN = "yarn";
private static final String DEPLOY_MODE_CLUSTER = "cluster";
private static final String DEPLOY_MODE_CLIENT = "client";
private static final String SPARK_YARN_TAGS = "spark.yarn.tags";
private static final String OPT_PROPERTIES_FILE = "--properties-file";
public static final String SPARK_DEFAULTS_GENERATED_PROPERTIES = "spark-defaults-oozie-generated.properties";
private boolean pySpark = false;
private final Configuration actionConf;
SparkArgsExtractor(final Configuration actionConf) {
this.actionConf = actionConf;
}
boolean isPySpark() {
return pySpark;
}
List<String> extract(final String[] mainArgs) throws OozieActionConfiguratorException, IOException, URISyntaxException {
final List<String> sparkArgs = new ArrayList<>();
sparkArgs.add(MASTER_OPTION);
final String master = actionConf.get(SparkActionExecutor.SPARK_MASTER);
sparkArgs.add(master);
// In local mode, everything runs here in the Launcher Job.
// In yarn-client mode, the driver runs here in the Launcher Job and the
// executor in Yarn.
// In yarn-cluster mode, the driver and executor run in Yarn.
final String sparkDeployMode = actionConf.get(SparkActionExecutor.SPARK_MODE);
if (sparkDeployMode != null) {
sparkArgs.add(MODE_OPTION);
sparkArgs.add(sparkDeployMode);
}
final boolean yarnClusterMode = master.equals(MASTER_OPTION_YARN_CLUSTER)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLUSTER));
final boolean yarnClientMode = master.equals(MASTER_OPTION_YARN_CLIENT)
|| (master.equals(MASTER_OPTION_YARN) && sparkDeployMode != null && sparkDeployMode.equals(DEPLOY_MODE_CLIENT));
sparkArgs.add(JOB_NAME_OPTION);
sparkArgs.add(actionConf.get(SparkActionExecutor.SPARK_JOB_NAME));
final String className = actionConf.get(SparkActionExecutor.SPARK_CLASS);
if (className != null) {
sparkArgs.add(CLASS_NAME_OPTION);
sparkArgs.add(className);
}
appendOoziePropertiesToSparkConf(sparkArgs);
String jarPath = actionConf.get(SparkActionExecutor.SPARK_JAR);
if (jarPath != null && jarPath.endsWith(".py")) {
pySpark = true;
}
boolean addedSecurityTokensHadoopFS = false;
boolean addedSecurityTokensHive = false;
boolean addedSecurityTokensHBase = false;
boolean addedSecurityCredentialsHadoopFS = false;
boolean addedSecurityCredentialsHive = false;
boolean addedSecurityCredentialsHBase = false;
boolean addedLog4jDriverSettings = false;
boolean addedLog4jExecutorSettings = false;
final StringBuilder driverClassPath = new StringBuilder();
final StringBuilder executorClassPath = new StringBuilder();
final StringBuilder userFiles = new StringBuilder();
final StringBuilder userArchives = new StringBuilder();
final String sparkOpts = actionConf.get(SparkActionExecutor.SPARK_OPTS);
String propertiesFile = null;
if (StringUtils.isNotEmpty(sparkOpts)) {
final List<String> sparkOptions = SparkOptionsSplitter.splitSparkOpts(sparkOpts);
for (int i = 0; i < sparkOptions.size(); i++) {
String opt = sparkOptions.get(i);
boolean addToSparkArgs = true;
if (yarnClusterMode || yarnClientMode) {
if (opt.startsWith(EXECUTOR_CLASSPATH)) {
appendWithPathSeparator(opt.substring(EXECUTOR_CLASSPATH.length()), executorClassPath);
addToSparkArgs = false;
}
if (opt.startsWith(DRIVER_CLASSPATH)) {
appendWithPathSeparator(opt.substring(DRIVER_CLASSPATH.length()), driverClassPath);
addToSparkArgs = false;
}
if (opt.equals(DRIVER_CLASSPATH_OPTION)) {
// we need the next element after this option
appendWithPathSeparator(sparkOptions.get(i + 1), driverClassPath);
// increase i to skip the next element.
i++;
addToSparkArgs = false;
}
}
if (opt.startsWith(SECURITY_TOKENS_HADOOPFS)) {
addedSecurityTokensHadoopFS = true;
}
if (opt.startsWith(SECURITY_TOKENS_HIVE)) {
addedSecurityTokensHive = true;
}
if (opt.startsWith(SECURITY_TOKENS_HBASE)) {
addedSecurityTokensHBase = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HADOOPFS)) {
addedSecurityCredentialsHadoopFS = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HIVE)) {
addedSecurityCredentialsHive = true;
}
if (opt.startsWith(SECURITY_CREDENTIALS_HBASE)) {
addedSecurityCredentialsHBase = true;
}
if (opt.startsWith(OPT_PROPERTIES_FILE)){
i++;
propertiesFile = sparkOptions.get(i);
addToSparkArgs = false;
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS) || opt.startsWith(DRIVER_EXTRA_JAVA_OPTIONS)) {
if (!opt.contains(LOG4J_CONFIGURATION_JAVA_OPTION)) {
opt += " " + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS;
} else {
System.out.println("Warning: Spark Log4J settings are overwritten." +
" Child job IDs may not be available");
}
if (opt.startsWith(EXECUTOR_EXTRA_JAVA_OPTIONS)) {
addedLog4jExecutorSettings = true;
} else {
addedLog4jDriverSettings = true;
}
}
if (opt.startsWith(FILES_OPTION)) {
final String userFile;
if (opt.contains(OPT_SEPARATOR)) {
userFile = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userFile = sparkOptions.get(i + 1);
i++;
}
if (userFiles.length() > 0) {
userFiles.append(OPT_VALUE_SEPARATOR);
}
userFiles.append(userFile);
addToSparkArgs = false;
}
if (opt.startsWith(ARCHIVES_OPTION)) {
final String userArchive;
if (opt.contains(OPT_SEPARATOR)) {
userArchive = opt.substring(opt.indexOf(OPT_SEPARATOR) + OPT_SEPARATOR.length());
}
else {
userArchive = sparkOptions.get(i + 1);
i++;
}
if (userArchives.length() > 0) {
userArchives.append(OPT_VALUE_SEPARATOR);
}
userArchives.append(userArchive);
addToSparkArgs = false;
}
if (addToSparkArgs) {
sparkArgs.add(opt);
}
else if (sparkArgs.get(sparkArgs.size() - 1).equals(CONF_OPTION)) {
sparkArgs.remove(sparkArgs.size() - 1);
}
}
}
if ((yarnClusterMode || yarnClientMode)) {
// Include the current working directory (of executor container)
// in executor classpath, because it will contain localized
// files
appendWithPathSeparator(PWD, executorClassPath);
appendWithPathSeparator(PWD, driverClassPath);
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_CLASSPATH + executorClassPath.toString());
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_CLASSPATH + driverClassPath.toString());
}
if (actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS) != null) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_TAGS + OPT_SEPARATOR + actionConf.get(LauncherMain.MAPREDUCE_JOB_TAGS));
}
if (!addedSecurityTokensHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityTokensHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_TOKENS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHadoopFS) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HADOOPFS + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHive) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HIVE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedSecurityCredentialsHBase) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SECURITY_CREDENTIALS_HBASE + OPT_SEPARATOR + Boolean.toString(false));
}
if (!addedLog4jExecutorSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(EXECUTOR_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
if (!addedLog4jDriverSettings) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(DRIVER_EXTRA_JAVA_OPTIONS + LOG4J_CONFIGURATION_JAVA_OPTION + SparkMain.SPARK_LOG4J_PROPS);
}
mergeAndAddPropertiesFile(sparkArgs, propertiesFile);
if ((yarnClusterMode || yarnClientMode)) {
final Map<String, URI> fixedFileUrisMap =
SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.getCacheFiles(actionConf));
fixedFileUrisMap.put(SparkMain.SPARK_LOG4J_PROPS, new Path(SparkMain.SPARK_LOG4J_PROPS).toUri());
fixedFileUrisMap.put(SparkMain.HIVE_SITE_CONF, new Path(SparkMain.HIVE_SITE_CONF).toUri());
addUserDefined(userFiles.toString(), fixedFileUrisMap);
final Collection<URI> fixedFileUris = fixedFileUrisMap.values();
final JarFilter jarFilter = new JarFilter(fixedFileUris, jarPath);
jarFilter.filter();
jarPath = jarFilter.getApplicationJar();
final String cachedFiles = StringUtils.join(fixedFileUris, OPT_VALUE_SEPARATOR);
if (cachedFiles != null && !cachedFiles.isEmpty()) {
sparkArgs.add(FILES_OPTION);
sparkArgs.add(cachedFiles);
}
final Map<String, URI> fixedArchiveUrisMap = SparkMain.fixFsDefaultUrisAndFilterDuplicates(DistributedCache.
getCacheArchives(actionConf));
addUserDefined(userArchives.toString(), fixedArchiveUrisMap);
final String cachedArchives = StringUtils.join(fixedArchiveUrisMap.values(), OPT_VALUE_SEPARATOR);
if (cachedArchives != null && !cachedArchives.isEmpty()) {
sparkArgs.add(ARCHIVES_OPTION);
sparkArgs.add(cachedArchives);
}
setSparkYarnJarsConf(sparkArgs, jarFilter.getSparkYarnJar(), jarFilter.getSparkVersion());
}
if (!sparkArgs.contains(VERBOSE_OPTION)) {
sparkArgs.add(VERBOSE_OPTION);
}
sparkArgs.add(jarPath);
sparkArgs.addAll(Arrays.asList(mainArgs));
return sparkArgs;
}
private void mergeAndAddPropertiesFile(final List<String> sparkArgs, final String userDefinedPropertiesFile)
throws IOException {
final Properties properties = new Properties();
loadServerDefaultProperties(properties);
loadLocalizedDefaultPropertiesFile(properties);
loadUserDefinedPropertiesFile(userDefinedPropertiesFile, properties);
final boolean persisted = persistMergedProperties(properties);
if (persisted) {
sparkArgs.add(OPT_PROPERTIES_FILE);
sparkArgs.add(SPARK_DEFAULTS_GENERATED_PROPERTIES);
}
}
private boolean persistMergedProperties(final Properties properties) throws IOException {
if (!properties.isEmpty()) {
try (final Writer writer = new OutputStreamWriter(
new FileOutputStream(new File(SPARK_DEFAULTS_GENERATED_PROPERTIES)),
StandardCharsets.UTF_8.name())) {
properties.store(writer, "Properties file generated by Oozie");
System.out.println(String.format("Persisted merged Spark configs in file %s. Merged properties are: %s",
SPARK_DEFAULTS_GENERATED_PROPERTIES, Arrays.toString(properties.stringPropertyNames().toArray())));
return true;
} catch (IOException e) {
System.err.println(String.format("Could not persist derived Spark config file. Reason: %s", e.getMessage()));
throw e;
}
}
return false;
}
private void loadUserDefinedPropertiesFile(final String userDefinedPropertiesFile, final Properties properties) {
if (userDefinedPropertiesFile != null) {
System.out.println(String.format("Reading Spark config from %s %s...", OPT_PROPERTIES_FILE, userDefinedPropertiesFile));
loadProperties(new File(userDefinedPropertiesFile), properties);
}
}
private void loadLocalizedDefaultPropertiesFile(final Properties properties) {
final File localizedDefaultConfFile = SparkMain.getMatchingFile(SPARK_DEFAULTS_FILE_PATTERN);
if (localizedDefaultConfFile != null) {
System.out.println(String.format("Reading Spark config from file %s...", localizedDefaultConfFile.getName()));
loadProperties(localizedDefaultConfFile, properties);
}
}
private void loadServerDefaultProperties(final Properties properties) {
final String sparkDefaultsFromServer = actionConf.get(SPARK_DEFAULT_OPTS, "");
if (!sparkDefaultsFromServer.isEmpty()) {
System.out.println("Reading Spark config propagated from Oozie server...");
try (final StringReader reader = new StringReader(sparkDefaultsFromServer)) {
properties.load(reader);
} catch (IOException e) {
System.err.println(String.format("Could not read propagated Spark config! Reason: %s", e.getMessage()));
}
}
}
private void loadProperties(final File file, final Properties target) {
try (final Reader reader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8.name())) {
final Properties properties = new Properties();
properties.load(reader);
for(String key :properties.stringPropertyNames()) {
Object prevProperty = target.setProperty(key, properties.getProperty(key));
if(prevProperty != null){
System.out.println(String.format("Value of %s was overwritten from %s", key, file.getName()));
}
}
} catch (IOException e) {
System.err.println(String.format("Could not read Spark configs from file %s. Reason: %s", file.getName(),
e.getMessage()));
}
}
private void appendWithPathSeparator(final String what, final StringBuilder to) {
if (to.length() > 0) {
to.append(File.pathSeparator);
}
to.append(what);
}
private void addUserDefined(final String userList, final Map<String, URI> urisMap) {
if (userList != null) {
for (final String file : userList.split(OPT_VALUE_SEPARATOR)) {
if (!Strings.isEmpty(file)) {
final Path p = new Path(file);
urisMap.put(p.getName(), p.toUri());
}
}
}
}
/*
* Get properties that needs to be passed to Spark as Spark configuration from actionConf.
*/
@VisibleForTesting
void appendOoziePropertiesToSparkConf(final List<String> sparkArgs) {
for (final Map.Entry<String, String> oozieConfig : actionConf
.getValByRegex("^oozie\\.(?!launcher|spark).+").entrySet()) {
sparkArgs.add(CONF_OPTION);
sparkArgs.add(String.format("spark.%s=%s", oozieConfig.getKey(), oozieConfig.getValue()));
}
}
/**
* Sets spark.yarn.jars for Spark 2.X. Sets spark.yarn.jar for Spark 1.X.
*
* @param sparkArgs
* @param sparkYarnJar
* @param sparkVersion
*/
private void setSparkYarnJarsConf(final List<String> sparkArgs, final String sparkYarnJar, final String sparkVersion) {
if (SPARK_VERSION_1.matcher(sparkVersion).find()) {
// In Spark 1.X.X, set spark.yarn.jar to avoid
// multiple distribution
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JAR + OPT_SEPARATOR + sparkYarnJar);
} else {
// In Spark 2.X.X, set spark.yarn.jars
sparkArgs.add(CONF_OPTION);
sparkArgs.add(SPARK_YARN_JARS + OPT_SEPARATOR + sparkYarnJar);
}
}
}
| cbaenziger/oozie | sharelib/spark/src/main/java/org/apache/oozie/action/hadoop/SparkArgsExtractor.java | Java | apache-2.0 | 22,247 |
package com.common.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.logging.Logger;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.PersistenceException;
/**
* La Clase BaseDAO implementa las operaciones básicas de acceso a datos DAO
* utilizando usado por las clases DAO del módulo de ejecución de transacciones.
*
* @author Gestorinc S.A.
* @version $Rev $
*/
public class BaseDAO {
/**
* Constante que representa el character '%'.
*/
public static final String SYMBOLO_LIKE = "%";
/**
* Constante que representa la cadena "'".
*/
public static final String SYMBOLO_APOSTROFE = "'";
/**
* Creación del log de auditoría.
*/
protected static final Logger LOGGER = Logger.getLogger(BaseDAO.class.getName());
/**
* Objeto que maneja las operaciones de persistencia.
*/
@PersistenceContext(name = "punit")
private EntityManager em;
/**
* Constructor por defecto.
*/
public BaseDAO() {
}
/**
* Retorna una referencia al objeto que maneja las operaciones de
* persistencia definidas por JPA.
*
* @return Referencia al objeto que maneja las operaciones de persistencia.
* En caso de que el objeto no este inicializado lanza la excepción
* @see java.lang.IllegalStateException
*/
protected EntityManager getEntityManager() {
if (em == null) {
throw new IllegalStateException(
"EntityManager no ha sido asignado a DAO antes del uso.");
} else {
return em;
}
}
/**
* Ejecuta una sentencia SQL obteniendo una conexión a la BD, referenciado
* por la unidad de persistencia: <b>punit</b>.<br/>
* No utilizar este método para ejecutar sentencias SELECT.
*
* @param sentencia Sentencia SQL que será ejecutada.
*/
public void ejecutarNativo(String sentencia) {
try {
java.sql.Connection connection = em.unwrap(java.sql.Connection.class);
PreparedStatement ps = connection.prepareStatement(sentencia);
ps.execute();
ps.close();
} catch (PersistenceException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
} catch (SQLException e) {
LOGGER.info("Error al ejecutar sentencia"+ e.getMessage());
}
}
/**
* Pone apóstrofes a una cadena de caracteres.
*
* @param cadena la cadena
* @return la cadena con apóstrofes
*/
protected String comillar(String cadena) {
return SYMBOLO_APOSTROFE + cadena + SYMBOLO_APOSTROFE;
}
}
| ServicioReparaciones/ServicioReparaciones | ServicioReparaciones-ejb/src/main/java/com/common/dao/BaseDAO.java | Java | apache-2.0 | 2,856 |
/*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.exec.planner.sql.parser;
import java.util.List;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlSpecialOperator;
import org.apache.calcite.sql.SqlWriter;
import org.apache.calcite.sql.parser.SqlParserPos;
import com.dremio.service.namespace.NamespaceKey;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
public class SqlTruncateTable extends SqlCall {
public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("TRUNCATE_TABLE", SqlKind.OTHER_DDL) {
@Override
public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
Preconditions.checkArgument(operands.length == 3, "SqlTruncateTable.createCall() " +
"has to get 3 operands!");
return new SqlTruncateTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1], (SqlLiteral) operands[2]);
}
};
private SqlIdentifier tableName;
private boolean tableExistenceCheck;
private boolean tableKeywordPresent;
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, SqlLiteral tableExistenceCheck,
SqlLiteral tableKeywordPresent) {
this(pos, tableName, tableExistenceCheck.booleanValue(), tableKeywordPresent.booleanValue());
}
public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, boolean tableExistenceCheck,
boolean tableKeywordPresent) {
super(pos);
this.tableName = tableName;
this.tableExistenceCheck = tableExistenceCheck;
this.tableKeywordPresent = tableKeywordPresent;
}
@Override
public SqlOperator getOperator() {
return OPERATOR;
}
@Override
public List<SqlNode> getOperandList() {
return ImmutableList.of(
tableName,
SqlLiteral.createBoolean(tableExistenceCheck, SqlParserPos.ZERO),
SqlLiteral.createBoolean(tableKeywordPresent, SqlParserPos.ZERO)
);
}
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("TRUNCATE");
if (tableKeywordPresent) {
writer.keyword("TABLE");
}
if (tableExistenceCheck) {
writer.keyword("IF");
writer.keyword("EXISTS");
}
tableName.unparse(writer, leftPrec, rightPrec);
}
public NamespaceKey getPath() {
return new NamespaceKey(tableName.names);
}
public boolean checkTableExistence() {
return tableExistenceCheck;
}
}
| dremio/dremio-oss | sabot/kernel/src/main/java/com/dremio/exec/planner/sql/parser/SqlTruncateTable.java | Java | apache-2.0 | 3,280 |
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Unit tests for {@link StorageTierAssoc}.
*/
public class StorageTierAssocTest {
private void checkStorageTierAssoc(StorageTierAssoc assoc, PropertyKey levelsProperty,
PropertyKeyFormat aliasFormat) {
int size = Configuration.getInt(levelsProperty);
Assert.assertEquals(size, assoc.size());
List<String> expectedOrderedAliases = new ArrayList<>();
for (int i = 0; i < size; i++) {
String alias = Configuration.get(aliasFormat.format(i));
Assert.assertEquals(i, assoc.getOrdinal(alias));
Assert.assertEquals(alias, assoc.getAlias(i));
expectedOrderedAliases.add(alias);
}
Assert.assertEquals(expectedOrderedAliases, assoc.getOrderedStorageAliases());
}
/**
* Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc}
* classes with a {@link Configuration}.
*/
@Test
public void masterWorkerConfConstructor() {
Configuration.set(PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS, "3");
Configuration.set(
PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT.format(2), "BOTTOM");
Configuration.set(PropertyKey.WORKER_TIERED_STORE_LEVELS, "2");
Configuration.set(
PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT.format(1), "BOTTOM");
checkStorageTierAssoc(new MasterStorageTierAssoc(),
PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS,
PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT);
checkStorageTierAssoc(new WorkerStorageTierAssoc(), PropertyKey.WORKER_TIERED_STORE_LEVELS,
PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT);
ConfigurationTestUtils.resetConfiguration();
}
/**
* Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc}
* classes with different storage alias.
*/
@Test
public void storageAliasListConstructor() {
List<String> orderedAliases = Arrays.asList("MEM", "HDD", "SOMETHINGELSE", "SSD");
MasterStorageTierAssoc masterAssoc = new MasterStorageTierAssoc(orderedAliases);
WorkerStorageTierAssoc workerAssoc = new WorkerStorageTierAssoc(orderedAliases);
Assert.assertEquals(orderedAliases.size(), masterAssoc.size());
Assert.assertEquals(orderedAliases.size(), workerAssoc.size());
for (int i = 0; i < orderedAliases.size(); i++) {
String alias = orderedAliases.get(i);
Assert.assertEquals(alias, masterAssoc.getAlias(i));
Assert.assertEquals(i, masterAssoc.getOrdinal(alias));
Assert.assertEquals(alias, workerAssoc.getAlias(i));
Assert.assertEquals(i, workerAssoc.getOrdinal(alias));
}
Assert.assertEquals(orderedAliases, masterAssoc.getOrderedStorageAliases());
Assert.assertEquals(orderedAliases, workerAssoc.getOrderedStorageAliases());
}
}
| bit-zyl/Alluxio-Nvdimm | core/server/src/test/java/alluxio/StorageTierAssocTest.java | Java | apache-2.0 | 3,480 |
/*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uci.ics.asterix.lexergenerator.rules;
public class RuleAnythingUntil implements Rule {
private char expected;
public RuleAnythingUntil clone() {
return new RuleAnythingUntil(expected);
}
public RuleAnythingUntil(char expected) {
this.expected = expected;
}
@Override
public String toString() {
return " .* " + String.valueOf(expected);
}
@Override
public int hashCode() {
return 10 * (int) expected;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (o instanceof RuleAnythingUntil) {
if (((RuleAnythingUntil) o).expected == this.expected) {
return true;
}
}
return false;
}
@Override
public String javaAction() {
return "currentChar = readNextChar();";
}
@Override
public String javaMatch(String action) {
return "boolean escaped = false;\n" + "while (currentChar != '" + expected + "' || escaped) {\n"
+ "if(!escaped && currentChar == '\\\\\\\\') {\n" + "escaped = true;\n" + "containsEscapes = true;\n"
+ "} else {\n" + "escaped = false;\n" + "}\n" + "currentChar = readNextChar();\n" + "}\n"
+ "if (currentChar == '" + expected + "') {" + action + "}\n";
}
}
| parshimers/incubator-asterixdb | asterix-maven-plugins/lexer-generator-maven-plugin/src/main/java/edu/uci/ics/asterix/lexergenerator/rules/RuleAnythingUntil.java | Java | apache-2.0 | 2,007 |
package com.netwebx.hackerrank.rpc.client;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.net.Socket;
/**
* Created by apple on 2017/2/26.
*/
public class RpcImporter<S> {
public S importer(final Class<?> serviceClass, final InetSocketAddress addr) {
return (S) Proxy.newProxyInstance(
serviceClass.getClassLoader(),
new Class<?>[]{serviceClass.getInterfaces()[0]},
new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Socket socket = null;
ObjectOutputStream output = null;
ObjectInputStream input = null;
try {
socket = new Socket();
socket.connect(addr);
output = new ObjectOutputStream(socket.getOutputStream());
output.writeUTF(serviceClass.getName());
output.writeUTF(method.getName());
output.writeObject(method.getParameterTypes());
output.writeObject(args);
input = new ObjectInputStream(socket.getInputStream());
return input.readObject();
} finally {
if (socket != null) {
socket.close();
}
if (output != null) {
output.close();
}
if (input != null) {
input.close();
}
}
}
}
);
}
}
| WengJunFeng/hackerrank_java | src/main/java/com/netwebx/hackerrank/rpc/client/RpcImporter.java | Java | apache-2.0 | 2,053 |
package com.oath.cyclops.internal.stream.spliterators.push;
import com.oath.cyclops.types.persistent.PersistentCollection;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* Created by johnmcclean on 12/01/2017.
*/
public class GroupedByTimeOperator<T,C extends PersistentCollection<? super T>,R> extends BaseOperator<T,R> {
private final Supplier<? extends C> factory;
private final Function<? super C, ? extends R> finalizer;
private final long time;
private final TimeUnit t;
public GroupedByTimeOperator(Operator<T> source, Supplier<? extends C> factory,
Function<? super C, ? extends R> finalizer,long time,
TimeUnit t){
super(source);
this.factory = factory;
this.finalizer = finalizer;
this.time = time;
this.t = t;
}
@Override
public StreamSubscription subscribe(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onComplete) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
StreamSubscription[] upstream = {null};
StreamSubscription sub = new StreamSubscription(){
@Override
public void request(long n) {
if(n<=0) {
onError.accept(new IllegalArgumentException("3.9 While the Subscription is not cancelled, Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the argument is <= 0."));
return;
}
if(!isOpen)
return;
super.request(n);
upstream[0].request(n);
}
@Override
public void cancel() {
upstream[0].cancel();
super.cancel();
}
};
upstream[0] = source.subscribe(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
sub.requested.decrementAndGet();
next[0] = factory.get();
start[0] = System.nanoTime();
}
else{
request( upstream,1l);
}
} catch (Throwable t) {
onError.accept(t);
}
}
,t->{onError.accept(t);
sub.requested.decrementAndGet();
if(sub.isActive())
request( upstream,1);
},()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
sub.requested.decrementAndGet();
}
sub.cancel();
onComplete.run();
});
return sub;
}
@Override
public void subscribeAll(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onCompleteDs) {
long toRun = t.toNanos(time);
PersistentCollection[] next = {factory.get()};
long[] start ={System.nanoTime()};
source.subscribeAll(e-> {
try {
next[0] = next[0].plus(e);
if(System.nanoTime()-start[0] > toRun){
onNext.accept(finalizer.apply((C)next[0]));
next[0] = factory.get();
start[0] = System.nanoTime();
}
} catch (Throwable t) {
onError.accept(t);
}
}
,onError,()->{
if(next[0].size()>0) {
try {
onNext.accept(finalizer.apply((C) next[0]));
} catch(Throwable t){
onError.accept(t);
}
}
onCompleteDs.run();
});
}
}
| aol/cyclops | cyclops/src/main/java/com/oath/cyclops/internal/stream/spliterators/push/GroupedByTimeOperator.java | Java | apache-2.0 | 4,567 |
package net.tcp.socket;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
/**
* 必须先启动服务器 后连接 1、创建服务器 指定端口 ServerSocket(int port) 2、接收客户端连接 3、发送数据+接收数据
*
*/
public class Server {
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// 1、创建服务器 指定端口 ServerSocket(int port)
ServerSocket server = new ServerSocket(8888);
// 2、接收客户端连接 阻塞式
while (true) {
Socket socket = server.accept();
System.out.println("一个客户端建立连接");
// 3、发送数据
String msg = "欢迎使用";
// 输出流
/*
* BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(
* socket.getOutputStream()));
*
* bw.write(msg); bw.newLine(); bw.flush();
*/
DataOutputStream dos = new DataOutputStream(socket.getOutputStream());
dos.writeUTF(msg);
dos.flush();
}
}
}
| zhangxx0/Java_Topic_prictice | src/net/tcp/socket/Server.java | Java | apache-2.0 | 1,059 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.management.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Hashtable;
import java.util.List;
import java.util.NoSuchElementException;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import org.neo4j.jmx.ManagementInterface;
/**
* Does not have any public methods - since the public interface of
* {@link org.neo4j.management.Neo4jManager} should be defined completely in
* that class.
*
* Does not have any (direct or transitive) dependencies on any part of the jmx
* component - since this class is used in
* {@link org.neo4j.management.impl.jconsole.Neo4jPlugin the JConsole plugin},
* and the jmx component is not on the class path in JConsole.
*
* @author Tobias Ivarsson <tobias.ivarsson@neotechnology.com>
*/
public abstract class KernelProxy
{
static final String KERNEL_BEAN_TYPE = "org.neo4j.jmx.Kernel";
protected static final String KERNEL_BEAN_NAME = "Kernel";
static final String MBEAN_QUERY = "MBeanQuery";
protected final MBeanServerConnection server;
protected final ObjectName kernel;
protected KernelProxy( MBeanServerConnection server, ObjectName kernel )
{
String className = null;
try
{
className = server.getMBeanInfo( kernel ).getClassName();
}
catch ( Exception e )
{
// fall through
}
if ( !KERNEL_BEAN_TYPE.equals( className ) )
{
throw new IllegalArgumentException(
"The specified ObjectName does not represent a Neo4j Kernel bean in the specified MBean server." );
}
this.server = server;
this.kernel = kernel;
}
protected List<Object> allBeans()
{
List<Object> beans = new ArrayList<Object>();
Iterable<ObjectInstance> mbeans;
try
{
mbeans = server.queryMBeans( mbeanQuery(), null );
}
catch ( IOException handled )
{
return beans;
}
for ( ObjectInstance instance : mbeans )
{
String className = instance.getClassName();
Class<?> beanType = null;
try
{
if ( className != null ) beanType = Class.forName( className );
}
catch ( Exception ignored )
{
// fall through
}
catch ( LinkageError ignored )
{
// fall through
}
if ( beanType != null )
{
try
{
beans.add( BeanProxy.load( server, beanType, instance.getObjectName() ) );
}
catch ( Exception ignored )
{
// fall through
}
}
}
return beans;
}
private ObjectName assertExists( ObjectName name )
{
try
{
if ( !server.queryNames( name, null ).isEmpty() )
{
return name;
}
}
catch ( IOException handled )
{
// fall through
}
throw new NoSuchElementException( "No MBeans matching " + name );
}
protected <T> T getBean( Class<T> beanInterface )
{
return BeanProxy.load( server, beanInterface, createObjectName( beanInterface ) );
}
protected <T> Collection<T> getBeans( Class<T> beanInterface )
{
return BeanProxy.loadAll( server, beanInterface, createObjectNameQuery( beanInterface ) );
}
private ObjectName createObjectNameQuery( Class<?> beanInterface )
{
return createObjectNameQuery( mbeanQuery(), beanInterface );
}
private ObjectName createObjectName( Class<?> beanInterface )
{
return assertExists( createObjectName( mbeanQuery(), beanInterface ) );
}
protected ObjectName createObjectName( String beanName )
{
return assertExists( createObjectName( mbeanQuery(), beanName, false ) );
}
protected ObjectName mbeanQuery()
{
try
{
return (ObjectName) server.getAttribute( kernel, MBEAN_QUERY );
}
catch ( Exception cause )
{
throw new IllegalStateException( "Could not get MBean query.", cause );
}
}
protected static ObjectName createObjectName( String kernelIdentifier, Class<?> beanInterface )
{
return createObjectName( kernelIdentifier, beanName( beanInterface ) );
}
protected static ObjectName createObjectName( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, false, extraNaming );
}
static ObjectName createObjectNameQuery( String kernelIdentifier, String beanName, String... extraNaming )
{
Hashtable<String, String> properties = new Hashtable<String, String>();
properties.put( "instance", "kernel#" + kernelIdentifier );
return createObjectName( "org.neo4j", properties, beanName, true, extraNaming );
}
static ObjectName createObjectName( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), false );
}
static ObjectName createObjectNameQuery( ObjectName query, Class<?> beanInterface )
{
return createObjectName( query, beanName( beanInterface ), true );
}
private static ObjectName createObjectName( ObjectName query, String beanName, boolean isQuery )
{
Hashtable<String, String> properties = new Hashtable<String, String>(query.getKeyPropertyList());
return createObjectName( query.getDomain(), properties, beanName, isQuery );
}
static String beanName( Class<?> beanInterface )
{
if ( beanInterface.isInterface() )
{
ManagementInterface management = beanInterface.getAnnotation( ManagementInterface.class );
if ( management != null )
{
return management.name();
}
}
throw new IllegalArgumentException( beanInterface + " is not a Neo4j Management Been interface" );
}
private static ObjectName createObjectName( String domain, Hashtable<String, String> properties, String beanName,
boolean query, String... extraNaming )
{
properties.put( "name", beanName );
for ( int i = 0; i < extraNaming.length; i++ )
{
properties.put( "name" + i, extraNaming[i] );
}
ObjectName result;
try
{
result = new ObjectName( domain, properties );
if ( query ) result = ObjectName.getInstance( result.toString() + ",*" );
}
catch ( MalformedObjectNameException e )
{
return null;
}
return result;
}
}
| HuangLS/neo4j | advanced/management/src/main/java/org/neo4j/management/impl/KernelProxy.java | Java | apache-2.0 | 8,016 |
package de.mhus.cha.cao.action;
import java.io.File;
import de.mhus.lib.cao.CaoElement;
import de.mhus.lib.cao.CaoException;
import de.mhus.lib.cao.CaoList;
import de.mhus.lib.cao.CaoMonitor;
import de.mhus.lib.cao.CaoOperation;
import de.mhus.cap.core.Access;
import de.mhus.cha.cao.ChaConnection;
import de.mhus.cha.cao.ChaElement;
import de.mhus.lib.MFile;
import de.mhus.lib.form.MForm;
import de.mhus.lib.form.annotations.FormElement;
import de.mhus.lib.form.annotations.FormSortId;
@FormElement("name='cha_copy_to_folder' title='Copy'")
public class CopyToOperation extends CaoOperation implements MForm {
private CaoList<Access> sources;
private ChaElement target;
private ChaConnection connection;
public CopyToOperation(ChaElement ChaElement) {
target = ChaElement;
}
@Override
public void dispose() throws CaoException {
}
@Override
public void execute() throws CaoException {
connection = (ChaConnection)target.getConnection();
//collect all affected entries
monitor.beginTask("count", CaoMonitor.UNKNOWN);
int cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = count( ((ChaElement)element).getFile(), cnt );
}
monitor.beginTask("copy", cnt);
cnt = 0;
for (CaoElement<Access> element : sources.getElements()) {
cnt = copy( target.getFile(), ((ChaElement)element).getFile(), cnt );
}
}
private int copy(File target, File file, int cnt) {
// validate action
if (monitor.isCanceled()) return cnt;
if ( !file.isDirectory()) return cnt; // for secure
// new path
File newTarget = null;
cnt++;
monitor.worked(cnt);
newTarget = new File(target,connection.createUID());
monitor.log().debug("Create Dir: " + newTarget.getAbsolutePath());
monitor.subTask(file.getAbsolutePath());
// validate path
if ( newTarget.exists() ) {
monitor.log().warn("Folder already exists: " + newTarget.getAbsolutePath());
return cnt;
}
// create
if ( ! newTarget.mkdir() ) {
newTarget = null;
monitor.log().warn("Can't create folder: " + target.getAbsolutePath() + "/" + file.getName());
return cnt;
}
// set id
connection.addIdPath(newTarget.getName(), newTarget.getAbsolutePath());
// events
connection.fireElementCreated(newTarget.getName());
connection.fireElementLink(target.getName(), newTarget.getName());
// copy files
for ( File sub : file.listFiles()) {
if (sub.isFile()) {
monitor.log().debug("Copy File: " + file.getAbsolutePath());
File targetFile = new File(target,file.getName());
if (targetFile.exists()) {
monitor.log().warn("Can't overwrite file: " + file.getAbsolutePath());
} else
if ( !MFile.copyFile(file, targetFile) ) {
monitor.log().warn("Can't copy file: " + file.getAbsolutePath());
}
}
}
// copy sub folders
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = copy(newTarget, sub,cnt);
}
return cnt;
}
private int count(File file, int cnt) {
if (monitor.isCanceled()) return cnt;
if ( file.isDirectory() ) cnt++;
if (!file.isDirectory()) return cnt; // for secure
for ( File sub : file.listFiles(connection.getDefaultFileFilter())) {
cnt = count(sub,cnt);
}
return cnt;
}
@Override
public void initialize() throws CaoException {
}
public void setSources(CaoList<Access> list) {
sources = list;
}
}
| mhus/mhus-inka | de.mhus.hair/hair3/de.mhus.cha.app/src/de/mhus/cha/cao/action/CopyToOperation.java | Java | apache-2.0 | 3,411 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.util.csv;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.Base64;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nullable;
import org.apache.commons.csv.CSVRecord;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.expression.function.EncodeFormat;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.types.PBinary;
import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDataType.PDataCodec;
import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.util.ColumnInfo;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.UpsertExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
/** {@link UpsertExecutor} over {@link CSVRecord}s. */
public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> {
private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class);
protected final String arrayElementSeparator;
/** Testing constructor. Do not use in prod. */
@VisibleForTesting
protected CsvUpsertExecutor(Connection conn, List<ColumnInfo> columnInfoList,
PreparedStatement stmt, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, columnInfoList, stmt, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
public CsvUpsertExecutor(Connection conn, String tableName,
List<ColumnInfo> columnInfoList, UpsertListener<CSVRecord> upsertListener,
String arrayElementSeparator) {
super(conn, tableName, columnInfoList, upsertListener);
this.arrayElementSeparator = arrayElementSeparator;
finishInit();
}
@Override
protected void execute(CSVRecord csvRecord) {
try {
if (csvRecord.size() < conversionFunctions.size()) {
String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
csvRecord.size(), conversionFunctions.size());
throw new IllegalArgumentException(message);
}
for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
if (sqlValue != null) {
preparedStatement.setObject(fieldIndex + 1, sqlValue);
} else {
preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType());
}
}
preparedStatement.execute();
upsertListener.upsertDone(++upsertCount);
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
// Even though this is an error we only log it with debug logging because we're notifying the
// listener, and it can do its own logging if needed
LOG.debug("Error on CSVRecord " + csvRecord, e);
}
upsertListener.errorOnRecord(csvRecord, e);
}
}
@Override
protected Function<String, Object> createConversionFunction(PDataType dataType) {
if (dataType.isArrayType()) {
return new ArrayDatatypeConversionFunction(
new StringToArrayConverter(
conn,
arrayElementSeparator,
PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE)));
} else {
return new SimpleDatatypeConversionFunction(dataType, this.conn);
}
}
/**
* Performs typed conversion from String values to a given column value type.
*/
static class SimpleDatatypeConversionFunction implements Function<String, Object> {
private final PDataType dataType;
private final PDataCodec codec;
private final DateUtil.DateTimeParser dateTimeParser;
private final String binaryEncoding;
SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
ReadOnlyProps props;
try {
props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
} catch (SQLException e) {
throw new RuntimeException(e);
}
this.dataType = dataType;
PDataCodec codec = dataType.getCodec();
if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) {
codec = DateUtil.getCodecFor(dataType);
// TODO: move to DateUtil
String dateFormat;
int dateSqlType = dataType.getResultSetSqlType();
if (dateSqlType == Types.DATE) {
dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
DateUtil.DEFAULT_DATE_FORMAT);
} else if (dateSqlType == Types.TIME) {
dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIME_FORMAT);
} else {
dateFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIMESTAMP_FORMAT);
}
String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId);
} else {
this.dateTimeParser = null;
}
this.codec = codec;
this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING);
}
@Nullable
@Override
public Object apply(@Nullable String input) {
if (input == null || input.isEmpty()) {
return null;
}
if (dataType == PTimestamp.INSTANCE) {
return DateUtil.parseTimestamp(input);
}
if (dateTimeParser != null) {
long epochTime = dateTimeParser.parseDateTime(input);
byte[] byteValue = new byte[dataType.getByteSize()];
codec.encodeLong(epochTime, byteValue, 0);
return dataType.toObject(byteValue);
} else if (dataType == PBoolean.INSTANCE) {
switch (input.toLowerCase()) {
case "true":
case "t":
case "1":
return Boolean.TRUE;
case "false":
case "f":
case "0":
return Boolean.FALSE;
default:
throw new RuntimeException("Invalid boolean value: '" + input
+ "', must be one of ['true','t','1','false','f','0']");
}
}else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){
EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase());
Object object = null;
switch (format) {
case BASE64:
object = Base64.getDecoder().decode(input);
if (object == null) { throw new IllegalDataException(
"Input: [" + input + "] is not base64 encoded"); }
break;
case ASCII:
object = Bytes.toBytes(input);
break;
default:
throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\"");
}
return object;
}
return dataType.toObject(input);
}
}
/**
* Converts string representations of arrays into Phoenix arrays of the correct type.
*/
private static class ArrayDatatypeConversionFunction implements Function<String, Object> {
private final StringToArrayConverter arrayConverter;
private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) {
this.arrayConverter = arrayConverter;
}
@Nullable
@Override
public Object apply(@Nullable String input) {
try {
return arrayConverter.toArray(input);
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}
}
| ohadshacham/phoenix | phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java | Java | apache-2.0 | 10,075 |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.reteoo.common;
import org.drools.core.SessionConfiguration;
import org.drools.core.WorkingMemoryEntryPoint;
import org.drools.core.base.DroolsQuery;
import org.drools.core.common.BaseNode;
import org.drools.core.common.InternalAgenda;
import org.drools.core.common.InternalFactHandle;
import org.drools.core.common.InternalWorkingMemory;
import org.drools.core.common.WorkingMemoryAction;
import org.drools.core.event.AgendaEventSupport;
import org.drools.core.event.RuleEventListenerSupport;
import org.drools.core.event.RuleRuntimeEventSupport;
import org.drools.core.impl.InternalKnowledgeBase;
import org.drools.core.impl.StatefulKnowledgeSessionImpl;
import org.drools.core.phreak.PropagationEntry;
import org.drools.core.reteoo.LIANodePropagation;
import org.drools.core.spi.FactHandleFactory;
import org.drools.core.spi.PropagationContext;
import org.kie.api.runtime.Environment;
import org.kie.api.runtime.rule.AgendaFilter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
public class ReteWorkingMemory extends StatefulKnowledgeSessionImpl {
private List<LIANodePropagation> liaPropagations;
private Queue<WorkingMemoryAction> actionQueue;
private AtomicBoolean evaluatingActionQueue = new AtomicBoolean(false);
/** Flag to determine if a rule is currently being fired. */
private volatile AtomicBoolean firing = new AtomicBoolean(false);
public ReteWorkingMemory() {
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase) {
super(id, kBase);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, boolean initInitFactHandle, SessionConfiguration config, Environment environment) {
super(id, kBase, initInitFactHandle, config, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, long propagationContext, SessionConfiguration config, InternalAgenda agenda, Environment environment) {
super(id, kBase, handleFactory, propagationContext, config, agenda, environment);
}
public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, InternalFactHandle initialFactHandle, long propagationContext, SessionConfiguration config, Environment environment, RuleRuntimeEventSupport workingMemoryEventSupport, AgendaEventSupport agendaEventSupport, RuleEventListenerSupport ruleEventListenerSupport, InternalAgenda agenda) {
super(id, kBase, handleFactory, false, propagationContext, config, environment, workingMemoryEventSupport, agendaEventSupport, ruleEventListenerSupport, agenda);
}
@Override
protected void init() {
this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>();
this.propagationList = new RetePropagationList(this);
}
@Override
public void reset() {
super.reset();
actionQueue.clear();
}
@Override
public void reset(int handleId,
long handleCounter,
long propagationCounter) {
super.reset(handleId, handleCounter, propagationCounter );
if (liaPropagations != null) liaPropagations.clear();
actionQueue.clear();
}
@Override
public WorkingMemoryEntryPoint getWorkingMemoryEntryPoint(String name) {
WorkingMemoryEntryPoint ep = this.entryPoints.get(name);
return ep != null ? new ReteWorkingMemoryEntryPoint( this, ep ) : null;
}
public void addLIANodePropagation(LIANodePropagation liaNodePropagation) {
if (liaPropagations == null) liaPropagations = new ArrayList<LIANodePropagation>();
liaPropagations.add( liaNodePropagation );
}
private final Object syncLock = new Object();
public void initInitialFact() {
if ( initialFactHandle == null ) {
synchronized ( syncLock ) {
if ( initialFactHandle == null ) {
// double check, inside of sync point incase some other thread beat us to it.
initInitialFact(kBase, null);
}
}
}
}
@Override
public void fireUntilHalt(final AgendaFilter agendaFilter) {
initInitialFact();
super.fireUntilHalt( agendaFilter );
}
@Override
public int fireAllRules(final AgendaFilter agendaFilter,
int fireLimit) {
checkAlive();
if ( this.firing.compareAndSet( false,
true ) ) {
initInitialFact();
try {
startOperation();
return internalFireAllRules(agendaFilter, fireLimit);
} finally {
endOperation();
this.firing.set( false );
}
}
return 0;
}
private int internalFireAllRules(AgendaFilter agendaFilter, int fireLimit) {
int fireCount = 0;
try {
kBase.readLock();
// If we're already firing a rule, then it'll pick up the firing for any other assertObject(..) that get
// nested inside, avoiding concurrent-modification exceptions, depending on code paths of the actions.
if ( liaPropagations != null && isSequential() ) {
for ( LIANodePropagation liaPropagation : liaPropagations ) {
( liaPropagation ).doPropagation( this );
}
}
// do we need to call this in advance?
executeQueuedActionsForRete();
fireCount = this.agenda.fireAllRules( agendaFilter,
fireLimit );
} finally {
kBase.readUnlock();
if (kBase.flushModifications()) {
fireCount += internalFireAllRules(agendaFilter, fireLimit);
}
}
return fireCount;
}
@Override
public void closeLiveQuery(final InternalFactHandle factHandle) {
try {
startOperation();
this.kBase.readLock();
this.lock.lock();
final PropagationContext pCtx = pctxFactory.createPropagationContext(getNextPropagationIdCounter(), PropagationContext.INSERTION,
null, null, factHandle, getEntryPoint());
getEntryPointNode().retractQuery( factHandle,
pCtx,
this );
pCtx.evaluateActionQueue(this);
getFactHandleFactory().destroyFactHandle( factHandle );
} finally {
this.lock.unlock();
this.kBase.readUnlock();
endOperation();
}
}
@Override
protected BaseNode[] evalQuery(String queryName, DroolsQuery queryObject, InternalFactHandle handle, PropagationContext pCtx) {
initInitialFact();
BaseNode[] tnodes = kBase.getReteooBuilder().getTerminalNodesForQuery( queryName );
// no need to call retract, as no leftmemory used.
getEntryPointNode().assertQuery( handle,
pCtx,
this );
pCtx.evaluateActionQueue( this );
return tnodes;
}
public Collection<WorkingMemoryAction> getActionQueue() {
return actionQueue;
}
@Override
public void queueWorkingMemoryAction(final WorkingMemoryAction action) {
try {
startOperation();
actionQueue.add(action);
notifyWaitOnRest();
} finally {
endOperation();
}
}
public void addPropagation(PropagationEntry propagationEntry) {
if (propagationEntry instanceof WorkingMemoryAction) {
actionQueue.add((WorkingMemoryAction) propagationEntry);
} else {
super.addPropagation(propagationEntry);
}
}
@Override
public void executeQueuedActionsForRete() {
try {
startOperation();
if ( evaluatingActionQueue.compareAndSet( false,
true ) ) {
try {
if ( actionQueue!= null && !actionQueue.isEmpty() ) {
WorkingMemoryAction action;
while ( (action = actionQueue.poll()) != null ) {
try {
action.execute( (InternalWorkingMemory) this );
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected exception executing action " + action.toString(),
e );
}
}
}
} finally {
evaluatingActionQueue.compareAndSet( true,
false );
}
}
} finally {
endOperation();
}
}
@Override
public Iterator<? extends PropagationEntry> getActionsIterator() {
return actionQueue.iterator();
}
}
| mrietveld/drools | drools-reteoo/src/main/java/org/drools/reteoo/common/ReteWorkingMemory.java | Java | apache-2.0 | 9,985 |
package jp.hashiwa.elasticsearch.authplugin;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.rest.*;
import java.util.*;
import java.util.regex.Pattern;
import java.util.stream.Stream;
public class AuthRestHandler implements RestHandler {
private final Logger logger = Loggers.getLogger(AuthRestHandler.class);
private final RestHandler originalHandler;
private final RestResponse unauthorizedResponse = new RestResponse() {
@Override
public String contentType() {
return "application/json";
}
@Override
public BytesReference content() {
return new BytesArray("");
}
@Override
public RestStatus status() {
return RestStatus.UNAUTHORIZED;
}
};
private final Map<RestRequest.Method, Stream<Pattern>> authPatterns = new HashMap<RestRequest.Method, Stream<Pattern>>() {
{
this.put(RestRequest.Method.POST, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
this.put(RestRequest.Method.PUT, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
// all methods
this.put(null, Stream.of(
Pattern.compile("^/adminindex(/.*)?$")
));
}
};
AuthRestHandler(RestHandler restHandler) {
this.originalHandler = restHandler;
}
@Override
public void handleRequest(RestRequest restRequest, RestChannel restChannel, NodeClient nodeClient) throws Exception {
this.logger.debug(restRequest.path());
this.logger.debug(restRequest.rawPath());
if (isOk(restRequest)) {
this.originalHandler.handleRequest(restRequest, restChannel, nodeClient);
} else {
restChannel.sendResponse(unauthorizedResponse);
}
}
private boolean needAuth(RestRequest.Method method, String path) {
if (authPatterns.containsKey(method)) {
Stream<Pattern> patterns = authPatterns.get(method);
boolean match = patterns.anyMatch(
p -> p.matcher(path).matches()
);
return match;
}
return false;
}
private boolean isOk(RestRequest restRequest) {
RestRequest.Method method = restRequest.method();
String path = restRequest.path(); // use rawpath() ?
boolean needAuth = needAuth(method, path)
|| needAuth(null, path);
if (! needAuth) {
return true;
}
for (java.util.Map.Entry<String, String> entry: restRequest.headers()) {
String key = entry.getKey();
String value = entry.getValue();
if (key.equals("user") && value.equals("admin")) {
return true;
}
}
return false;
// ES 5.4
// return restRequest.getHeaders().get("user").equals("admin");
}
}
| hashiwa000/Elasticsearch-Auth-Plugin | src/jp/hashiwa/elasticsearch/authplugin/AuthRestHandler.java | Java | apache-2.0 | 2,877 |
/*
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License. See accompanying LICENSE file.
*/
package io.s4.persist;
import io.s4.util.clock.Clock;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.log4j.Logger;
public class ConMapPersister implements Persister {
private AtomicInteger persistCount = new AtomicInteger(0);
private boolean selfClean = false;
private int cleanWaitTime = 40; // 20 seconds by default
private String loggerName = "s4";
ConcurrentHashMap<String, CacheEntry> cache;
Clock s4Clock;
private int startCapacity = 5000;
public void setStartCapacity(int startCapacity) {
this.startCapacity = startCapacity;
}
public int getStartCapacity() {
return startCapacity;
}
public void setSelfClean(boolean selfClean) {
this.selfClean = selfClean;
}
public void setCleanWaitTime(int cleanWaitTime) {
this.cleanWaitTime = cleanWaitTime;
}
public void setLoggerName(String loggerName) {
this.loggerName = loggerName;
}
public ConMapPersister(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public void setS4Clock(Clock s4Clock) {
this.s4Clock = s4Clock;
}
public ConMapPersister() {
}
public void init() {
cache = new ConcurrentHashMap<String, CacheEntry>(this.getStartCapacity());
if (selfClean) {
Runnable r = new Runnable() {
public void run() {
while (!Thread.interrupted()) {
int cleanCount = ConMapPersister.this.cleanOutGarbage();
Logger.getLogger(loggerName).info("Cleaned out "
+ cleanCount + " entries; Persister has "
+ cache.size() + " entries");
try {
Thread.sleep(cleanWaitTime * 1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
};
Thread t = new Thread(r);
t.start();
t.setPriority(Thread.MIN_PRIORITY);
}
}
public int getQueueSize() {
return 0;
}
public int getPersistCount() {
return persistCount.get();
}
public int getCacheEntryCount() {
return cache.size();
}
public void setAsynch(String key, Object value, int period) {
// there really is no asynch for the local cache
set(key, value, period);
}
public void set(String key, Object value, int period) {
persistCount.getAndIncrement();
CacheEntry ce = new CacheEntry();
ce.value = value;
ce.period = period;
ce.addTime = s4Clock.getCurrentTime();
cache.put(key, ce);
}
public Object get(String key) {
CacheEntry ce = cache.get(key);
if (ce == null) {
return null;
}
if (ce.isExpired()) {
return null;
}
return ce.value;
}
public Map<String, Object> getBulk(String[] keys) {
HashMap map = new HashMap<String, Object>();
for (String key : keys) {
Object value = get(key);
if (value != null) {
map.put(key, value);
}
}
return map;
}
public Object getObject(String key) {
return get(key);
}
public Map<String, Object> getBulkObjects(String[] keys) {
return getBulk(keys);
}
public void remove(String key) {
cache.remove(key);
}
public int cleanOutGarbage() {
int count = 0;
for (Enumeration en = cache.keys(); en.hasMoreElements();) {
String key = (String) en.nextElement();
CacheEntry ce = cache.get(key);
if (ce != null && ce.isExpired()) {
count++;
cache.remove(key);
}
}
return count;
}
public Set<String> keySet() {
return cache.keySet();
}
public class CacheEntry {
Object value;
long addTime;
int period;
public boolean isExpired() {
if (period > 0) {
if ((addTime + (1000 * (long) period)) <= s4Clock.getCurrentTime()) {
return true;
}
}
return false;
}
}
}
| s4/core | src/main/java/io/s4/persist/ConMapPersister.java | Java | apache-2.0 | 5,403 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/orchestration/airflow/service/v1/environments.proto
package com.google.cloud.orchestration.airflow.service.v1;
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public final class DatabaseConfig extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
DatabaseConfigOrBuilder {
private static final long serialVersionUID = 0L;
// Use DatabaseConfig.newBuilder() to construct.
private DatabaseConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DatabaseConfig() {
machineType_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
return new DatabaseConfig();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
return this.unknownFields;
}
private DatabaseConfig(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10:
{
java.lang.String s = input.readStringRequireUtf8();
machineType_ = s;
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
public static final int MACHINE_TYPE_FIELD_NUMBER = 1;
private volatile java.lang.Object machineType_;
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
@java.lang.Override
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
@java.lang.Override
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, machineType_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, machineType_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)) {
return super.equals(obj);
}
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) obj;
if (!getMachineType().equals(other.getMachineType())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + MACHINE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getMachineType().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
* <pre>
* The configuration of Cloud SQL instance that is used by the Apache Airflow
* software.
* </pre>
*
* Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfigOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class,
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class);
}
// Construct using com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
}
@java.lang.Override
public Builder clear() {
super.clear();
machineType_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass
.internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance();
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig build() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig buildPartial() {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result =
new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig(this);
result.machineType_ = machineType_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) {
return mergeFrom((com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other) {
if (other
== com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance())
return this;
if (!other.getMachineType().isEmpty()) {
machineType_ = other.machineType_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage =
(com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object machineType_ = "";
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The machineType.
*/
public java.lang.String getMachineType() {
java.lang.Object ref = machineType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
machineType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return The bytes for machineType.
*/
public com.google.protobuf.ByteString getMachineTypeBytes() {
java.lang.Object ref = machineType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
machineType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineType(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
machineType_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @return This builder for chaining.
*/
public Builder clearMachineType() {
machineType_ = getDefaultInstance().getMachineType();
onChanged();
return this;
}
/**
*
*
* <pre>
* Optional. Cloud SQL machine type used by Airflow database.
* It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8
* or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.
* </pre>
*
* <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code>
*
* @param value The bytes for machineType to set.
* @return This builder for chaining.
*/
public Builder setMachineTypeBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
machineType_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
}
// @@protoc_insertion_point(class_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig)
private static final com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig();
}
public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<DatabaseConfig> PARSER =
new com.google.protobuf.AbstractParser<DatabaseConfig>() {
@java.lang.Override
public DatabaseConfig parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatabaseConfig(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DatabaseConfig> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DatabaseConfig> getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig
getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
| googleapis/java-orchestration-airflow | proto-google-cloud-orchestration-airflow-v1/src/main/java/com/google/cloud/orchestration/airflow/service/v1/DatabaseConfig.java | Java | apache-2.0 | 23,250 |
package com.bagri.server.hazelcast.task.schema;
import static com.bagri.core.Constants.pn_schema_password;
import static com.bagri.server.hazelcast.serialize.TaskSerializationFactory.cli_UpdateSchemaTask;
import static com.bagri.support.security.Encryptor.encrypt;
import java.io.IOException;
import java.util.Properties;
import java.util.Map.Entry;
import com.bagri.core.system.Schema;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
public class SchemaUpdater extends SchemaProcessor implements IdentifiedDataSerializable {
private boolean override;
private Properties properties;
public SchemaUpdater() {
//
}
public SchemaUpdater(int version, String admin, boolean override, Properties properties) {
super(version, admin);
this.override = override;
this.properties = properties;
}
@Override
public Object process(Entry<String, Schema> entry) {
logger.debug("process.enter; entry: {}", entry);
if (entry.getValue() != null) {
Schema schema = entry.getValue();
if (schema.getVersion() == getVersion()) {
//if (schema.isActive()) {
// if (denitSchemaInCluster(schema) > 0) {
// don't go further
// return null;
// }
//}
if (override) {
String pwd = properties.getProperty(pn_schema_password);
if (pwd != null) {
properties.setProperty(pn_schema_password, encrypt(pwd));
}
schema.setProperties(properties);
} else {
for (String name: properties.stringPropertyNames()) {
String value = properties.getProperty(name);
if (pn_schema_password.equals(name)) {
value = encrypt(value);
}
schema.setProperty(name, value);
}
}
//if (schema.isActive()) {
// if (initSchemaInCluster(schema) == 0) {
// schema.setActive(false);
// }
//}
schema.updateVersion(getAdmin());
entry.setValue(schema);
auditEntity(AuditType.update, schema);
return schema;
}
}
return null;
}
@Override
public int getId() {
return cli_UpdateSchemaTask;
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
override = in.readBoolean();
properties = in.readObject();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeBoolean(override);
out.writeObject(properties);
}
}
| dsukhoroslov/bagri | bagri-server/bagri-server-hazelcast/src/main/java/com/bagri/server/hazelcast/task/schema/SchemaUpdater.java | Java | apache-2.0 | 2,552 |
package uk.co.bluegecko.core.swing.table.rendering;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.awt.Color;
import java.awt.Font;
import org.junit.Before;
import org.junit.Test;
public class RenderingHintTest
{
private Font font;
private Color color;
@Before
public final void setUp()
{
font = Font.decode( "Monospaced-12" );
color = new Color( 0x808080 );
}
@Test
public final void testWeightExceeds()
{
final FontHint min = new FontHint( HintWeight.MIN_WEIGHT );
final FontHint low = new FontHint( HintWeight.LOW_WEIGHT );
final FontHint def = new FontHint( HintWeight.DEFAULT_WEIGHT );
final FontHint selected = new FontHint( HintWeight.SELECTED_WEIGHT );
final FontHint high = new FontHint( HintWeight.HIGH_WEIGHT );
final FontHint focused = new FontHint( HintWeight.FOCUSED_WEIGHT );
final FontHint max = new FontHint( HintWeight.MAX_WEIGHT );
assertFalse( "min-min", min.exceeds( min ) );
assertFalse( "min-low", min.exceeds( low ) );
assertTrue( "low-min", low.exceeds( min ) );
assertTrue( "default-low", def.exceeds( low ) );
assertTrue( "selected-default", selected.exceeds( def ) );
assertTrue( "high-selected", high.exceeds( selected ) );
assertTrue( "focused-high", focused.exceeds( high ) );
assertTrue( "max-focused", max.exceeds( focused ) );
}
@Test
public final void testGetValueNone()
{
assertEquals( font, new FontHint( HintWeight.MAX_WEIGHT ).getValue( font ) );
assertNull( new FontHint( HintWeight.MAX_WEIGHT ).getValue() );
}
@Test
public final void testGetValueNonDerived()
{
final Font value = Font.decode( "Monospaced-BOLD-14" );
assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue( font ) );
assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue() );
}
@Test
public final void testGetValueDerived()
{
final Font value = Font.decode( "Monospaced-14" );
final FontHint fontHint = new FontHint( HintWeight.MAX_WEIGHT )
{
private static final long serialVersionUID = 1L;
@Override
protected Font derive( final Font original )
{
return original.deriveFont( 14.0f );
}
};
assertEquals( value, fontHint.getValue( font ) );
assertNull( fontHint.getValue() );
}
@Test
public final void testFontHintSize()
{
final Font value = Font.decode( "Monospaced-14" );
assertEquals( value, FontHint.size( HintWeight.MAX_WEIGHT, 14 )
.getValue( font ) );
}
@Test
public final void testFontHintLarger()
{
final Font value = Font.decode( "Monospaced-14" );
assertEquals( value, FontHint.larger( HintWeight.MAX_WEIGHT, 2 )
.getValue( font ) );
}
@Test
public final void testFontHintSmaller()
{
final Font value = Font.decode( "Monospaced-10" );
assertEquals( value, FontHint.smaller( HintWeight.MAX_WEIGHT, 2 )
.getValue( font ) );
}
@Test
public final void testFontHintScaled()
{
final Font value = Font.decode( "Monospaced-6" );
assertEquals( value, FontHint.scaled( HintWeight.MAX_WEIGHT, 0.5f )
.getValue( font ) );
}
@Test
public final void testFontHintStyle()
{
final Font value = Font.decode( "Monospaced-BOLD-12" );
assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD )
.getValue( font ) );
}
@Test
public final void testFontHintStyleAndSize()
{
final Font value = Font.decode( "Monospaced-BOLD-14" );
assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD, 14 )
.getValue( font ) );
}
@Test
public final void testForegroundHintDarker()
{
final Color value = new Color( 0x595959 );
assertEquals( value, ForegroundHint.darker( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testForegroundHintBrighter()
{
final Color value = new Color( 0xB6B6B6 );
assertEquals( value, ForegroundHint.brighter( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testBackgroundHintDarker()
{
final Color value = new Color( 0x595959 );
assertEquals( value, BackgroundHint.darker( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
@Test
public final void testBackgroundHintBrighter()
{
final Color value = new Color( 0xB6B6B6 );
assertEquals( value, BackgroundHint.brighter( HintWeight.MAX_WEIGHT )
.getValue( color ) );
}
}
| caveman-frak/java-core | core-swing/src/test/java/uk/co/bluegecko/core/swing/table/rendering/RenderingHintTest.java | Java | apache-2.0 | 4,586 |
/*
* Copyright 2015-2016 DevCon5 GmbH, info@devcon5.ch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.devcon5.cli;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import org.junit.Test;
/**
*/
public class CLIExample {
@CliOption(value = "x",
hasArg = true)
private String example;
@CliOptionGroup
private Structured credentials;
private String postProcessed;
@PostInject
private void init(){
postProcessed = "an " + example;
}
@Test
public void example() {
//arrange
String[] exampleArgs = {"-u", "hans", "-p", "wurst", "-x", "example"};
//act
CLI.parse(exampleArgs).into(this);
run();
//assert
assertEquals("an example", postProcessed);
}
public void run() {
assertThat(example, is(not(nullValue())));
assertThat(credentials.user, is(not(nullValue())));
assertThat(credentials.password, is(not(nullValue())));
}
static class Structured {
@CliOption(value = "u",
hasArg = true)
private String user;
@CliOption(value = "p",
hasArg = true)
private String password;
}
}
| devcon5io/common | cli/src/test/java/io/devcon5/cli/CLIExample.java | Java | apache-2.0 | 1,919 |
/*
* Copyright 2015-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.docksidestage.app.web.product;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.docksidestage.dbflute.allcommon.CDef;
import org.hibernate.validator.constraints.Length;
/**
* @author jflute
*/
public class ProductSearchForm {
@Length(max = 10) // #simple_for_example just for validtion example
public String productName;
public CDef.ProductStatus productStatus;
@Length(max = 5) // #simple_for_example just for validtion example
public String purchaseMemberName;
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this);
}
}
| dbflute-session/lastaflute-test-catalog | src/main/java/org/docksidestage/app/web/product/ProductSearchForm.java | Java | apache-2.0 | 1,252 |
package com.flora.support;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Map;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.context.Context;
import com.flora.Config;
public class VelocityTemplate {
private VelocityEngine velocityEngine;
private Config config;
public VelocityTemplate(){
}
public String parseTemplate(String template, Map model){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new StringWriter();
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
return writer.toString();
}
public void parseTemplate(String template, Map model, Writer writer){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void parseTemplate(String template, Map model, OutputStream os){
model.putAll(Config.getPageTools());
Context context = new VelocityContext(model);
Writer writer = new OutputStreamWriter(os);
try {
velocityEngine.mergeTemplate(template, "UTF-8", context, writer);
} catch (Exception e) {
}
}
public void setVelocityEngine(VelocityEngine velocityEngine) {
this.velocityEngine = velocityEngine;
}
public Config getConfig() {
return config;
}
public void setConfig(Config config) {
this.config = config;
}
}
| liqilun/flora | src/main/java/com/flora/support/VelocityTemplate.java | Java | apache-2.0 | 1,644 |
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
* Subclass of FilteredTermEnum for enumerating all terms that match the
* specified wildcard filter term.
* <p>
* Term enumerations are always ordered by Term.compareTo(). Each term in
* the enumeration is greater than all that precede it.
*
* @version $Id: WildcardTermEnum.java 329859 2005-10-31 17:05:36Z bmesser $
*/
public class WildcardTermEnum extends FilteredTermEnum {
Term searchTerm;
String field = "";
String text = "";
String pre = "";
int preLen = 0;
boolean endEnum = false;
/**
* Creates a new <code>WildcardTermEnum</code>. Passing in a
* {@link org.apache.lucene.index.Term Term} that does not contain a
* <code>WILDCARD_CHAR</code> will cause an exception to be thrown.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*/
public WildcardTermEnum(IndexReader reader, Term term) throws IOException {
super();
searchTerm = term;
field = searchTerm.field();
text = searchTerm.text();
int sidx = text.indexOf(WILDCARD_STRING);
int cidx = text.indexOf(WILDCARD_CHAR);
int idx = sidx;
if (idx == -1) {
idx = cidx;
}
else if (cidx >= 0) {
idx = Math.min(idx, cidx);
}
pre = searchTerm.text().substring(0,idx);
preLen = pre.length();
text = text.substring(preLen);
setEnum(reader.terms(new Term(searchTerm.field(), pre)));
}
protected final boolean termCompare(Term term) {
if (field == term.field()) {
String searchText = term.text();
if (searchText.startsWith(pre)) {
return wildcardEquals(text, 0, searchText, preLen);
}
}
endEnum = true;
return false;
}
public final float difference() {
return 1.0f;
}
public final boolean endEnum() {
return endEnum;
}
/********************************************
* String equality with support for wildcards
********************************************/
public static final char WILDCARD_STRING = '*';
public static final char WILDCARD_CHAR = '?';
/**
* Determines if a word matches a wildcard pattern.
* <small>Work released by Granta Design Ltd after originally being done on
* company time.</small>
*/
public static final boolean wildcardEquals(String pattern, int patternIdx,
String string, int stringIdx)
{
int p = patternIdx;
for (int s = stringIdx; ; ++p, ++s)
{
// End of string yet?
boolean sEnd = (s >= string.length());
// End of pattern yet?
boolean pEnd = (p >= pattern.length());
// If we're looking at the end of the string...
if (sEnd)
{
// Assume the only thing left on the pattern is/are wildcards
boolean justWildcardsLeft = true;
// Current wildcard position
int wildcardSearchPos = p;
// While we haven't found the end of the pattern,
// and haven't encountered any non-wildcard characters
while (wildcardSearchPos < pattern.length() && justWildcardsLeft)
{
// Check the character at the current position
char wildchar = pattern.charAt(wildcardSearchPos);
// If it's not a wildcard character, then there is more
// pattern information after this/these wildcards.
if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING)
{
justWildcardsLeft = false;
}
else
{
// to prevent "cat" matches "ca??"
if (wildchar == WILDCARD_CHAR) {
return false;
}
// Look at the next character
wildcardSearchPos++;
}
}
// This was a prefix wildcard search, and we've matched, so
// return true.
if (justWildcardsLeft)
{
return true;
}
}
// If we've gone past the end of the string, or the pattern,
// return false.
if (sEnd || pEnd)
{
break;
}
// Match a single character, so continue.
if (pattern.charAt(p) == WILDCARD_CHAR)
{
continue;
}
//
if (pattern.charAt(p) == WILDCARD_STRING)
{
// Look at the character beyond the '*'.
++p;
// Examine the string, starting at the last character.
for (int i = string.length(); i >= s; --i)
{
if (wildcardEquals(pattern, p, string, i))
{
return true;
}
}
break;
}
if (pattern.charAt(p) != string.charAt(s))
{
break;
}
}
return false;
}
public void close() throws IOException
{
super.close();
searchTerm = null;
field = null;
text = null;
}
}
| lpxz/grail-lucene358684 | src/java/org/apache/lucene/search/WildcardTermEnum.java | Java | apache-2.0 | 5,708 |
package com.github.ayltai.foscam.client;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import android.support.annotation.NonNull;
import android.support.annotation.VisibleForTesting;
import android.support.v4.util.Pair;
import rx.Subscriber;
import rx.Subscription;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public /* final */ class RxBus {
private static final ThreadLocal<RxBus> INSTANCE = new ThreadLocal<>();
private final Map<Pair<Class, Subscriber>, Subscription> subscriptions = new HashMap<>();
private final Subject<Object, ?> bus = new SerializedSubject<>(PublishSubject.create());
public static RxBus getInstance() {
final RxBus instance = RxBus.INSTANCE.get();
if (instance == null) {
RxBus.INSTANCE.set(new RxBus());
return RxBus.INSTANCE.get();
}
return instance;
}
@VisibleForTesting
RxBus() {
}
public <T> void register(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) throw new IllegalArgumentException("The given subscriber is already registered");
this.subscriptions.put(key, this.bus.filter(event -> event != null && event.getClass().equals(eventType)).subscribe(value -> subscriber.onNext((T)value)));
}
public <T> void unregister(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) {
final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber);
if (this.subscriptions.containsKey(key)) this.subscriptions.remove(key).unsubscribe();
}
public void unregisterAll() {
for (final Pair<Class, Subscriber> pair : new HashSet<>(this.subscriptions.keySet())) {
this.unregister(pair.first, pair.second);
}
}
public <T> void send(@NonNull final T event) {
if (!this.subscriptions.isEmpty()) this.bus.onNext(event);
}
}
| ayltai/Foscam-CGI-Client | app/src/main/java/com/github/ayltai/foscam/client/RxBus.java | Java | apache-2.0 | 2,141 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.retail.v2;
/**
* Available OAuth 2.0 scopes for use with the Retail API.
*
* @since 1.4
*/
public class CloudRetailScopes {
/** See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account.. */
public static final String CLOUD_PLATFORM = "https://www.googleapis.com/auth/cloud-platform";
/**
* Returns an unmodifiable set that contains all scopes declared by this class.
*
* @since 1.16
*/
public static java.util.Set<String> all() {
java.util.Set<String> set = new java.util.HashSet<String>();
set.add(CLOUD_PLATFORM);
return java.util.Collections.unmodifiableSet(set);
}
private CloudRetailScopes() {
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-retail/v2/1.31.0/com/google/api/services/retail/v2/CloudRetailScopes.java | Java | apache-2.0 | 1,411 |
package com.github.andriell.collection;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Created by Andrey on 13.02.2016
*/
public class HashThreeTest {
public static void main(String[] args) {
HashThreeTest test = new HashThreeTest();
test.test1();
}
@Test
public void test1() {
ObjectTest test1 = new ObjectTest(0x50000000);
ObjectTest test2 = new ObjectTest(0x60000000);
ObjectTest test3 = new ObjectTest(0x70000000);
ObjectTest test4 = new ObjectTest(0x00000005);
ObjectTest test5 = new ObjectTest(0x00000006);
ObjectTest test6 = new ObjectTest(0x00000007);
HashThree<ObjectTest> three = new HashThree<ObjectTest>();
assertEquals(0, three.getSize());
assertEquals(false, three.remove(test1));
assertEquals(true, three.add(test1));
assertEquals(1, three.getSize());
assertEquals(true, three.add(test2));
assertEquals(2, three.getSize());
assertEquals(true, three.add(test3));
assertEquals(3, three.getSize());
assertEquals(true, three.add(test4));
assertEquals(4, three.getSize());
assertEquals(true, three.add(test5));
assertEquals(5, three.getSize());
assertEquals(true, three.add(test6));
assertEquals(6, three.getSize());
assertEquals(false, three.add(test1));
assertEquals(false, three.add(test2));
assertEquals(false, three.add(test3));
assertEquals(false, three.add(test4));
assertEquals(true, three.replace(test1));
assertEquals(true, three.replace(test2));
assertEquals(true, three.replace(test3));
assertEquals(true, three.replace(test4));
System.out.println(three);
assertEquals(true, three.exist(test2));
assertEquals(true, three.remove(test2));
//assertEquals(false, three.remove(test2));
//assertEquals(true, three.exist(test1));
//assertEquals(false, three.exist(test2));
//assertEquals(true, three.exist(test3));
//assertEquals(true, three.exist(test4));
System.out.println(three);
}
private class ObjectTest {
private int hashCode;
public ObjectTest(int hashCode) {
this.hashCode = hashCode;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return Integer.toString(hashCode);
}
}
}
| andriell/craftyfox | src/test/java/com/github/andriell/collection/HashThreeTest.java | Java | apache-2.0 | 2,540 |
package com.ryanharter.auto.value.moshi.example;
import com.google.auto.value.AutoValue;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.lang.reflect.Type;
@AutoValue public abstract class GenericsExample<A, B, C> {
public abstract A a();
public abstract B b();
public abstract C c();
@AutoValue.Builder
public interface Builder<A, B, C> {
Builder<A, B, C> a(A a);
Builder<A, B, C> b(B b);
Builder<A, B, C> c(C c);
GenericsExample<A, B, C> build();
}
public static <A, B, C> Builder<A, B, C> builder() {
return new AutoValue_GenericsExample.Builder<A, B, C>();
}
public static <A, B, C> JsonAdapter<GenericsExample<A, B, C>> jsonAdapter(Moshi moshi, Type[] types) {
return new AutoValue_GenericsExample.MoshiJsonAdapter(moshi, types);
}
}
| rharter/auto-value-moshi | example/src/main/java/com/ryanharter/auto/value/moshi/example/GenericsExample.java | Java | apache-2.0 | 870 |
package web.magic.jvm;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
class MBeanTyper {
static final boolean DEBUG = Boolean.getBoolean("jboss.jmx.debug");
/**
* create a typed object from an mbean
*/
public static final Object typeMBean(MBeanServer server, ObjectName mbean, Class<?> mainInterface) throws Exception {
List<Class<?>> interfaces = new ArrayList<Class<?>>();
if (mainInterface.isInterface()) {
interfaces.add(mainInterface);
}
addInterfaces(mainInterface.getInterfaces(), interfaces);
Class<?> cl[] = (Class[]) interfaces.toArray(new Class[interfaces.size()]);
if (DEBUG) {
System.err.println("typeMean->server=" + server + ",mbean=" + mbean + ",mainInterface=" + mainInterface);
for (int c = 0; c < cl.length; c++) {
System.err.println(" :" + cl[c]);
}
}
return Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), cl, new MBeanTyperInvoker(server,
mbean));
}
private static final void addInterfaces(Class<?> cl[], List<Class<?>> list) {
if (cl == null)
return;
for (int c = 0; c < cl.length; c++) {
list.add(cl[c]);
addInterfaces(cl[c].getInterfaces(), list);
}
}
}
/**
* MBeanTyperInvoker handles method invocations against the MBeanTyper target
* object and forwards them to the MBeanServer and ObjectName for invocation.
*
* @author <a href="mailto:jhaynie@vocalocity.net">Jeff Haynie</a>
*/
final class MBeanTyperInvoker implements java.lang.reflect.InvocationHandler {
private final MBeanServer server;
private final ObjectName mbean;
private final Map<Method, String[]> signatureCache = Collections.synchronizedMap(new HashMap<Method, String[]>());
MBeanTyperInvoker(MBeanServer server, ObjectName mbean) {
this.server = server;
this.mbean = mbean;
}
private boolean isJMXAttribute(Method m) {
String name = m.getName();
return (name.startsWith("get"));
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (MBeanTyper.DEBUG) {
System.err.println(" ++ method=" + method.getName() + ",args=" + args);
}
try {
if (method.getDeclaringClass() == Object.class) {
String name = method.getName();
if (name.equals("hashCode")) {
return new Integer(this.hashCode());
} else if (name.equals("toString")) {
return this.toString();
} else if (name.equals("equals")) {
// FIXME: this needs to be reviewed - we should be
// smarter about this ...
return new Boolean(equals(args[0]));
}
} else if (isJMXAttribute(method) && (args == null || args.length <= 0)) {
String name = method.getName().substring(3);
return server.getAttribute(mbean, name);
}
String sig[] = (String[]) signatureCache.get(method);
if (sig == null) {
// get the method signature from the method argument directly
// vs. the arguments passed, since there may be primitives that
// are wrapped as objects in the arguments
Class<?> _args[] = method.getParameterTypes();
if (_args != null && _args.length > 0) {
sig = new String[_args.length];
for (int c = 0; c < sig.length; c++) {
if (_args[c] != null) {
sig[c] = _args[c].getName();
}
}
} else {
sig = new String[0];
}
signatureCache.put(method, sig);
}
return server.invoke(mbean, method.getName(), args, sig);
} catch (Throwable t) {
if (MBeanTyper.DEBUG) {
t.printStackTrace();
}
if (t instanceof UndeclaredThrowableException) {
UndeclaredThrowableException ut = (UndeclaredThrowableException) t;
throw ut.getUndeclaredThrowable();
} else if (t instanceof InvocationTargetException) {
InvocationTargetException it = (InvocationTargetException) t;
throw it.getTargetException();
} else if (t instanceof MBeanException) {
MBeanException me = (MBeanException) t;
throw me.getTargetException();
} else {
throw t;
}
}
}
} | liufeiit/WebMagic | WebMagic/src/test/java/web/magic/jvm/MBeanTyper.java | Java | apache-2.0 | 4,284 |
package eu.dowsing.kolla.widget.brick.facade;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.scene.shape.CircleBuilder;
import javafx.scene.shape.Rectangle;
import javafx.scene.shape.RectangleBuilder;
import com.leapmotion.leap.Hand;
import eu.dowsing.kolla.widget.brick.model.BrickModel;
import eu.dowsing.kolla.widget.brick.model.BrickModel.Position;
/**
* Represents a complete hand including its fingers.
*
* @author richardg
*
*/
public class BrickView {
// port(left hand:red) and starboard(right hand:green)
public enum Importance {
PRIMARY, SECONDARY
}
private Rectangle horizontal;
private Rectangle vertical;
private Rectangle[] fingerRects;
private Circle hint;
/** Hints at where the gesture started. **/
private Circle startHint;
public BrickView(Pane p, int rectHeight, int rectWidth, int rectX, int rectY, int miniRectHeight, int miniRectWidth) {
drawIndicator(p, rectHeight, rectWidth, rectX, rectY, miniRectHeight, miniRectWidth);
}
private void drawIndicator(Pane p, int hHeight, int hWidth, int rectX, int rectY, int mHeight, int mWidth) {
final int fingerCount = 5;
fingerRects = new Rectangle[fingerCount];
final int rectMargin = 10;
final int hRealWidth = hWidth - (2 * rectMargin);
// create the measure for the mini finger rectangles
int miniRectMargin = rectMargin / 2;
int mRealWidth = mWidth - miniRectMargin;
int mRectX = rectX + (miniRectMargin / 2);
int mRectY = rectY;
// create measures for the vertical rectangle
final int vWidth = hHeight;
final int vHeight = hWidth / 2;
// create the circle indicating where the hand can be
this.hint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(hint);
// create the circle indicating where the gesture started
this.startHint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(startHint);
// create the rectangle indicating position of the hand
horizontal = RectangleBuilder.create().height(hHeight).width(hRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.RED).fill(Color.web("blue", 0.1)).translateX(rectX).translateY(rectY).build();
p.getChildren().add(horizontal);
// create rectangle indicating if the hand is vertical
vertical = RectangleBuilder.create().height(vHeight).width(vWidth).arcHeight(0).arcWidth(0).stroke(Color.RED)
.fill(Color.web("blue", 0.1)).translateX(rectX + (vWidth / 2)).translateY(rectY - (vHeight / 2))
.build();
p.getChildren().add(vertical);
// now create the rectangles indicating fingers found
for (int i = 0; i < fingerRects.length; i++) {
Rectangle mini = RectangleBuilder.create().height(mHeight).width(mRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.GREEN).fill(Color.web("blue", 0.1)).translateX(mRectX + (i * mWidth))
.translateY(mRectY).build();
fingerRects[i] = mini;
p.getChildren().add(mini);
}
}
public Color getPitchColor(Hand h) {
double direction = Math.toDegrees(h.direction().pitch());
if (direction < 10 && direction > -10) {
return Color.web("blue", 0.1);
} else if (direction < 100 && direction > 80) {
return Color.web("green", 0.1);
} else if (direction < -80 && direction > -100) {
return Color.web("yellow", 0.1);
} else {
return Color.web("red", 0.1);
}
}
public Color getHandColor(Importance importance) {
// port(left hand/secondary:red) and starboard(right hand/primary:green)
if (importance == Importance.PRIMARY) {
return Color.web("green", 1);
} else if (importance == Importance.SECONDARY) {
return Color.web("red", 1);
} else {
return Color.web("yellow", 1);
}
}
public void setShowGestureStart(Importance importance) {
Color fill = getHandColor(importance);
this.startHint.setVisible(true);
this.startHint.setFill(fill);
}
/**
* Show the hand
*
* @param importance
* @param pos
* @param fingerCount
* @param handledGesture
*/
public void showHand(Importance importance, Position pos, int fingerCount, boolean handledGesture) {
// first all rectangles visible
setVisible(true);
// hide vertical or horizontal position
Color fill = getHandColor(importance);
if (pos == Position.HORIZONTAL) {
vertical.setVisible(false);
} else if (pos == Position.VERTICAL) {
horizontal.setVisible(false);
}
// notify the user that the gesture was handled
if (handledGesture) {
fill = Color.web("yellow", 1);
}
// color the rectangles
horizontal.setFill(fill);
vertical.setFill(fill);
// then we hide invisible fingers
for (int i = fingerCount; i < fingerRects.length; i++) {
fingerRects[i].setVisible(false);
}
}
/**
* Show or hide the complete hand with all indicators
*
* @param visible
*/
public void setVisible(boolean visible) {
hint.setVisible(visible);
startHint.setVisible(visible);
horizontal.setVisible(visible);
vertical.setVisible(visible);
for (Rectangle rect : this.fingerRects) {
rect.setVisible(visible);
}
}
/**
* Show or hide only the hand hint.
*
* @param visible
*/
public void setHintVisible(boolean visible) {
this.hint.setVisible(visible);
}
}
| N0rp/Snabb | src/main/java/eu/dowsing/kolla/widget/brick/facade/BrickView.java | Java | apache-2.0 | 6,215 |
/*
* Copyright (c) 2016, WSO2 Inc. (http://wso2.com) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.msf4j;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A responder for sending chunk-encoded response.
*/
public interface ChunkResponder extends Closeable {
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if the connection is already closed
*/
void sendChunk(ByteBuffer chunk) throws IOException;
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if this {@link ChunkResponder} already closed or the connection is closed
*/
void sendChunk(ByteBuf chunk) throws IOException;
/**
* Closes this responder which signals the end of the chunk response.
*/
@Override
void close() throws IOException;
}
| taniamahanama/product-msf4j | core/src/main/java/org/wso2/msf4j/ChunkResponder.java | Java | apache-2.0 | 1,625 |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of YANG node bgpVrfAf's children nodes.
*/
package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf; | mengmoya/onos | apps/l3vpn/nel3vpn/nemgr/src/main/java/org/onosproject/yang/gen/v1/ne/bgpcomm/rev20141225/nebgpcomm/bgpcomm/bgpvrfs/bgpvrf/bgpvrfafs/bgpvrfaf/package-info.java | Java | apache-2.0 | 796 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
* Manages and performs region assignment.
* <p>
* Monitors ZooKeeper for events related to regions in transition.
* <p>
* Handles existing regions in transition during master failover.
*/
@InterfaceAudience.Private
public class AssignmentManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(AssignmentManager.class);
public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME,
-1, -1L);
public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout";
public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000;
public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management";
public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false;
public static final String ALREADY_IN_TRANSITION_WAITTIME
= "hbase.assignment.already.intransition.waittime";
public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute
protected final Server server;
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
private final TimerUpdater timerUpdater;
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
private final TableLockManager tableLockManager;
private AtomicInteger numRegionsOpened = new AtomicInteger(0);
final private KeyLocker<String> locker = new KeyLocker<String>();
/**
* Map of regions to reopen after the schema of a table is changed. Key -
* encoded region name, value - HRegionInfo
*/
private final Map <String, HRegionInfo> regionsToReopen;
/*
* Maximum times we recurse an assignment/unassignment.
* See below in {@link #assign()} and {@link #unassign()}.
*/
private final int maximumAttempts;
/**
* Map of two merging regions from the region to be created.
*/
private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions
= new HashMap<String, PairOfSameType<HRegionInfo>>();
/**
* The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment
* failure due to lack of availability of region plan
*/
private final long sleepTimeBeforeRetryingMetaAssignment;
/** Plans for region movement. Key is the encoded version of a region name*/
// TODO: When do plans get cleaned out? Ever? In server open and in server
// shutdown processing -- St.Ack
// All access to this Map must be synchronized.
final NavigableMap<String, RegionPlan> regionPlans =
new TreeMap<String, RegionPlan>();
private final ZKTable zkTable;
/**
* Contains the server which need to update timer, these servers will be
* handled by {@link TimerUpdater}
*/
private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer;
private final ExecutorService executorService;
// For unit tests, keep track of calls to ClosedRegionHandler
private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null;
// For unit tests, keep track of calls to OpenedRegionHandler
private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null;
//Thread pool executor service for timeout monitor
private java.util.concurrent.ExecutorService threadPoolExecutorService;
// A bunch of ZK events workers. Each is a single thread executor service
private final java.util.concurrent.ExecutorService zkEventWorkers;
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
private final RegionStates regionStates;
// The threshold to use bulk assigning. Using bulk assignment
// only if assigning at least this many regions to at least this
// many servers. If assigning fewer regions to fewer servers,
// bulk assigning may be not as efficient.
private final int bulkAssignThresholdRegions;
private final int bulkAssignThresholdServers;
// Should bulk assignment wait till all regions are assigned,
// or it is timed out? This is useful to measure bulk assignment
// performance, but not needed in most use cases.
private final boolean bulkAssignWaitTillAllAssigned;
/**
* Indicator that AssignmentManager has recovered the region states so
* that ServerShutdownHandler can be fully enabled and re-assign regions
* of dead servers. So that when re-assignment happens, AssignmentManager
* has proper region states.
*
* Protected to ease testing.
*/
protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false);
/** Is the TimeOutManagement activated **/
private final boolean tomActivated;
/**
* A map to track the count a region fails to open in a row.
* So that we don't try to open a region forever if the failure is
* unrecoverable. We don't put this information in region states
* because we don't expect this to happen frequently; we don't
* want to copy this information over during each state transition either.
*/
private final ConcurrentHashMap<String, AtomicInteger>
failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
// A flag to indicate if we are using ZK for region assignment
private final boolean useZKForAssignment;
// In case not using ZK for region assignment, region states
// are persisted in meta with a state store
private final RegionStateStore regionStateStore;
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP_SPLIT_HANDLING = false;
/** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
/**
* Constructs a new assignment manager.
*
* @param server
* @param serverManager
* @param catalogTracker
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean(
ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
this.timeoutMonitor = new TimeoutMonitor(
conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000),
server, serverManager,
conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT));
this.timerUpdater = new TimerUpdater(conf.getInt(
"hbase.master.assignment.timerupdater.period", 10000), server);
Threads.setDaemonThreadRunning(timerUpdater.getThread(),
server.getServerName() + ".timerUpdater");
} else {
this.serversInUpdatingTimer = null;
this.timeoutMonitor = null;
this.timerUpdater = null;
}
this.zkTable = new ZKTable(this.watcher);
// This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong(
"hbase.meta.assignment.retry.sleeptime", 1000l);
this.balancer = balancer;
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
this.regionStates = new RegionStates(server, serverManager, regionStateStore);
this.bulkAssignWaitTillAllAssigned =
conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
TimeUnit.SECONDS, threadFactory);
this.tableLockManager = tableLockManager;
this.metricsAssignmentManager = new MetricsAssignmentManager();
useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
}
void startTimeOutMonitor() {
if (tomActivated) {
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName()
+ ".timeoutMonitor");
}
}
/**
* Add the listener to the notification list.
* @param listener The AssignmentListener to register
*/
public void registerListener(final AssignmentListener listener) {
this.listeners.add(listener);
}
/**
* Remove the listener from the notification list.
* @param listener The AssignmentListener to unregister
*/
public boolean unregisterListener(final AssignmentListener listener) {
return this.listeners.remove(listener);
}
/**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
// These are 'expensive' to make involving trip to zk ensemble so allow
// sharing.
return this.zkTable;
}
/**
* This SHOULD not be public. It is public now
* because of some unit tests.
*
* TODO: make it package private and keep RegionStates in the master package
*/
public RegionStates getRegionStates() {
return regionStates;
}
/**
* Used in some tests to mock up region state in meta
*/
@VisibleForTesting
RegionStateStore getRegionStateStore() {
return regionStateStore;
}
public RegionPlan getRegionReopenPlan(HRegionInfo hri) {
return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri));
}
/**
* Add a regionPlan for the specified region.
* @param encodedName
* @param plan
*/
public void addPlan(String encodedName, RegionPlan plan) {
synchronized (regionPlans) {
regionPlans.put(encodedName, plan);
}
}
/**
* Add a map of region plans.
*/
public void addPlans(Map<String, RegionPlan> plans) {
synchronized (regionPlans) {
regionPlans.putAll(plans);
}
}
/**
* Set the list of regions that will be reopened
* because of an update in table schema
*
* @param regions
* list of regions that should be tracked for reopen
*/
public void setRegionsToReopen(List <HRegionInfo> regions) {
for(HRegionInfo hri : regions) {
regionsToReopen.put(hri.getEncodedName(), hri);
}
}
/**
* Used by the client to identify if all regions have the schema updates
*
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
// no lock concurrent access ok: sequential consistency respected.
if (regionsToReopen.containsKey(name)
|| regionStates.isRegionInTransition(name)) {
pending++;
}
}
return new Pair<Integer, Integer>(pending, hris.size());
}
/**
* Used by ServerShutdownHandler to make sure AssignmentManager has completed
* the failover cleanup before re-assigning regions of dead servers. So that
* when re-assignment happens, AssignmentManager has proper region states.
*/
public boolean isFailoverCleanupDone() {
return failoverCleanupDone.get();
}
/**
* To avoid racing with AM, external entities may need to lock a region,
* for example, when SSH checks what regions to skip re-assigning.
*/
public Lock acquireRegionLock(final String encodedName) {
return locker.acquireLock(encodedName);
}
/**
* Now, failover cleanup is completed. Notify server manager to
* process queued up dead servers processing, if any.
*/
void failoverCleanupDone() {
failoverCleanupDone.set(true);
serverManager.processQueuedDeadServers();
}
/**
* Called on startup.
* Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
* @throws KeeperException
* @throws InterruptedException
*/
void joinCluster() throws IOException,
KeeperException, InterruptedException {
long startTime = System.currentTimeMillis();
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
// Scan hbase:meta to build list of existing regions, servers, and assignment
// Returns servers who have not checked in (assumed dead) and their regions
Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions();
// This method will assign all user regions if a clean server startup or
// it will reconstruct master state and cleanup any leftovers from
// previous master process.
boolean failover = processDeadServersAndRegionsInTransition(deadServers);
if (!useZKForAssignment) {
// Not use ZK for assignment any more, remove the ZNode
ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode);
}
recoverTableInDisablingState();
recoverTableInEnablingState();
LOG.info("Joined the cluster in " + (System.currentTimeMillis()
- startTime) + "ms, failover=" + failover);
}
/**
* Process all regions that are in transition in zookeeper and also
* processes the list of dead servers by scanning the META.
* Used by master joining an cluster. If we figure this is a clean cluster
* startup, will assign all user regions.
* @param deadServers
* Map of dead servers and their regions. Can be null.
* @throws KeeperException
* @throws IOException
* @throws InterruptedException
*/
boolean processDeadServersAndRegionsInTransition(
final Map<ServerName, List<HRegionInfo>> deadServers)
throws KeeperException, IOException, InterruptedException {
List<String> nodes = ZKUtil.listChildrenNoWatch(watcher,
watcher.assignmentZNode);
if (nodes == null && useZKForAssignment) {
String errorMessage = "Failed to get the children from ZK";
server.abort(errorMessage, new IOException(errorMessage));
return true; // Doesn't matter in this case
}
boolean failover = !serverManager.getDeadServers().isEmpty();
if (failover) {
// This may not be a failover actually, especially if meta is on this master.
if (LOG.isDebugEnabled()) {
LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers());
}
} else {
// If any one region except meta is assigned, it's a failover.
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) {
HRegionInfo hri = en.getKey();
if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) {
LOG.debug("Found " + hri + " out on cluster");
failover = true;
break;
}
}
}
if (!failover && nodes != null) {
// If any one region except meta is in transition, it's a failover.
for (String encodedName : nodes) {
RegionState regionState = regionStates.getRegionState(encodedName);
if (regionState != null && !regionState.getRegion().isMetaRegion()) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
if (!failover && !useZKForAssignment) {
// If any region except meta is in transition on a live server, it's a failover.
Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
if (!regionsInTransition.isEmpty()) {
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (RegionState regionState : regionsInTransition.values()) {
if (!regionState.getRegion().isMetaRegion()
&& onlineServers.contains(regionState.getServerName())) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
// if there are some HLogs are not split yet. For meta HLogs, they should have
// been split already, if any. We can walk through those queued dead servers,
// if they don't have any HLogs, this restart should be considered as a clean one
Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName : queuedDeadServers) {
Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
}
}
if (!failover) {
// We figured that it's not a failover, so no need to
// work on these re-queued dead servers any more.
LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size()
+ " queued dead servers");
serverManager.removeRequeuedDeadServers();
}
}
}
Set<TableName> disabledOrDisablingOrEnabling = null;
if (!failover) {
disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Clean re/start, mark all user regions closed before reassignment
// TODO -Hbase-11319
regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
}
// Now region states are restored
regionStateStore.start();
// If we found user regions out on cluster, its a failover.
if (failover) {
LOG.info("Found regions out on cluster or in RIT; presuming failover");
// Process list of dead servers and regions in RIT.
// See HBASE-4580 for more information.
processDeadServersAndRecoverLostRegions(deadServers);
}
if (!failover && useZKForAssignment) {
// Cleanup any existing ZK nodes and start watching
ZKAssign.deleteAllNodes(watcher);
ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode);
}
// Now we can safely claim failover cleanup completed and enable
// ServerShutdownHandler for further processing. The nodes (below)
// in transition, if any, are for regions not related to those
// dead servers at all, and can be done in parallel to SSH.
failoverCleanupDone();
if (!failover) {
// Fresh cluster startup.
LOG.info("Clean cluster startup. Assigning user regions");
assignAllUserRegions(disabledOrDisablingOrEnabling);
}
return failover;
}
/**
* If region is up in zk in transition, then do fixup and block and wait until
* the region is assigned and out of transition. Used on startup for
* catalog regions.
* @param hri Region to look for.
* @return True if we processed a region in transition else false if region
* was not up in zk in transition.
* @throws InterruptedException
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri)
throws InterruptedException, KeeperException, IOException {
String encodedRegionName = hri.getEncodedName();
if (!processRegionInTransition(encodedRegionName, hri)) {
return false; // The region is not in transition
}
LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName));
while (!this.server.isStopped() &&
this.regionStates.isRegionInTransition(encodedRegionName)) {
RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName);
if (state == null || !serverManager.isServerOnline(state.getServerName())) {
// The region is not in transition, or not in transition on an online
// server. Doesn't help to block here any more. Caller need to
// verify the region is actually assigned.
break;
}
this.regionStates.waitForUpdate(100);
}
return true;
}
/**
* Process failover of new master for region <code>encodedRegionName</code>
* up in zookeeper.
* @param encodedRegionName Region to process failover for.
* @param regionInfo If null we'll go get it from meta table.
* @return True if we processed <code>regionInfo</code> as a RIT.
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransition(final String encodedRegionName,
final HRegionInfo regionInfo) throws KeeperException, IOException {
// We need a lock here to ensure that we will not put the same region twice
// It has no reason to be a lock shared with the other operations.
// We can do the lock on the region only, instead of a global lock: what we want to ensure
// is that we don't have two threads working on the same region.
Lock lock = locker.acquireLock(encodedRegionName);
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
if (data == null) return false;
RegionTransition rt;
try {
rt = RegionTransition.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse znode data", e);
return false;
}
HRegionInfo hri = regionInfo;
if (hri == null) {
// The region info is not passed in. We will try to find the region
// from region states map/meta based on the encoded region name. But we
// may not be able to find it. This is valid for online merge that
// the region may have not been created if the merge is not completed.
// Therefore, it is not in meta at master recovery time.
hri = regionStates.getRegionInfo(rt.getRegionName());
EventType et = rt.getEventType();
if (hri == null && et != EventType.RS_ZK_REGION_MERGING
&& et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
LOG.warn("Couldn't find the region in recovering " + rt);
return false;
}
}
return processRegionsInTransition(
rt, hri, stat.getVersion());
} finally {
lock.unlock();
}
}
/**
* This call is invoked only (1) master assign meta;
* (2) during failover mode startup, zk assignment node processing.
* The locker is set in the caller. It returns true if the region
* is in transition for sure, false otherwise.
*
* It should be private but it is used by some test too.
*/
boolean processRegionsInTransition(
final RegionTransition rt, final HRegionInfo regionInfo,
final int expectedVersion) throws KeeperException {
EventType et = rt.getEventType();
// Get ServerName. Could not be null.
final ServerName sn = rt.getServerName();
final byte[] regionName = rt.getRegionName();
final String encodedName = HRegionInfo.encodeRegionName(regionName);
final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et);
if (regionStates.isRegionInTransition(encodedName)
&& (regionInfo.isMetaRegion() || !useZKForAssignment)) {
LOG.info("Processed region " + prettyPrintedRegionName + " in state: "
+ et + ", does nothing since the region is already in transition "
+ regionStates.getRegionTransitionState(encodedName));
// Just return
return true;
}
if (!serverManager.isServerOnline(sn)) {
// It was transitioning on a dead server, so it's closed now.
// Force to OFFLINE and put it in transition, but not assign it
// since log splitting for the dead server is not done yet.
LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() +
" was on deadserver; forcing offline");
if (regionStates.isRegionOnline(regionInfo)) {
// Meta could still show the region is assigned to the previous
// server. If that server is online, when we reload the meta, the
// region is put back to online, we need to offline it.
regionStates.regionOffline(regionInfo);
sendRegionClosedNotification(regionInfo);
}
// Put it back in transition so that SSH can re-assign it
regionStates.updateRegionState(regionInfo, State.OFFLINE, sn);
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
MetaRegionTracker.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
regionStates.setLastRegionServerOfRegion(sn, encodedName);
// Make sure we know the server is dead.
if (!serverManager.isServerDead(sn)) {
serverManager.expireServer(sn);
}
}
return false;
}
switch (et) {
case M_ZK_REGION_CLOSING:
// Insert into RIT & resend the query to the region server: may be the previous master
// died before sending the query the first time.
final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null);
if (regionStates.isRegionOffline(regionInfo)) {
assign(regionInfo, true);
}
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_CLOSED:
case RS_ZK_REGION_FAILED_OPEN:
// Region is closed, insert into RIT and handle it
regionStates.updateRegionState(regionInfo, State.CLOSED, sn);
invokeAssign(regionInfo);
break;
case M_ZK_REGION_OFFLINE:
// Insert in RIT and resend to the regionserver
regionStates.updateRegionState(rt, State.PENDING_OPEN);
final RegionState rsOffline = regionStates.getRegionState(regionInfo);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
RegionPlan plan = new RegionPlan(regionInfo, null, sn);
addPlan(encodedName, plan);
assign(rsOffline, false, false);
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_OPENING:
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Region is opened, insert into RIT and handle it
// This could be done asynchronously, we would need then to acquire the lock in the
// handler.
regionStates.updateRegionState(rt, State.OPEN);
new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process();
break;
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
// Splitting region should be online. We could have skipped it during
// user region rebuilding since we may consider the split is completed.
// Put it in SPLITTING state to avoid complications.
regionStates.regionOnline(regionInfo, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
default:
throw new IllegalStateException("Received region in state:" + et + " is not valid.");
}
LOG.info("Processed region " + prettyPrintedRegionName + " in state "
+ et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ")
+ "server: " + sn);
return true;
}
/**
* When a region is closed, it should be removed from the regionsToReopen
* @param hri HRegionInfo of the region which was closed
*/
public void removeClosedRegion(HRegionInfo hri) {
if (regionsToReopen.remove(hri.getEncodedName()) != null) {
LOG.debug("Removed region from reopening regions because it was closed");
}
}
/**
* Handles various states an unassigned node can be in.
* <p>
* Method is called when a state change is suspected for an unassigned node.
* <p>
* This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
* yet).
* @param rt
* @param expectedVersion
*/
void handleRegion(final RegionTransition rt, int expectedVersion) {
if (rt == null) {
LOG.warn("Unexpected NULL input for RegionTransition rt");
return;
}
final ServerName sn = rt.getServerName();
// Check if this is a special HBCK transition
if (sn.equals(HBCK_CODE_SERVERNAME)) {
handleHBCK(rt);
return;
}
final long createTime = rt.getCreateTime();
final byte[] regionName = rt.getRegionName();
String encodedName = HRegionInfo.encodeRegionName(regionName);
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
// Verify this is a known server
if (!serverManager.isServerOnline(sn)
&& !ignoreStatesRSOffline.contains(rt.getEventType())) {
LOG.warn("Attempted to handle region transition for server but " +
"it is not online: " + prettyPrintedRegionName + ", " + rt);
return;
}
RegionState regionState =
regionStates.getRegionState(encodedName);
long startTime = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
boolean lateEvent = createTime < (startTime - 15000);
LOG.debug("Handling " + rt.getEventType() +
", server=" + sn + ", region=" +
(prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) +
(lateEvent ? ", which is more than 15 seconds late" : "") +
", current_state=" + regionState);
}
// We don't do anything for this event,
// so separate it out, no need to lock/unlock anything
if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
return;
}
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState latestState =
regionStates.getRegionState(encodedName);
if ((regionState == null && latestState != null)
|| (regionState != null && latestState == null)
|| (regionState != null && latestState != null
&& latestState.getState() != regionState.getState())) {
LOG.warn("Region state changed from " + regionState + " to "
+ latestState + ", while acquiring lock");
}
long waitedTime = System.currentTimeMillis() - startTime;
if (waitedTime > 5000) {
LOG.warn("Took " + waitedTime + "ms to acquire the lock");
}
regionState = latestState;
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
// Merged region is a new region, we can't find it in the region states now.
// However, the two merging regions are not new. They should be in state for merging.
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to CLOSING (or update stamp if already CLOSING)
regionStates.updateRegionState(rt, State.CLOSING);
break;
case RS_ZK_REGION_CLOSED:
// Should see CLOSED after CLOSING but possible after PENDING_CLOSE
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Handle CLOSED by assigning elsewhere or stopping if a disable
// If we got here all is good. Need to update RegionState -- else
// what follows will fail because not in expected state.
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
updateClosedRegionHandlerTracker(regionState.getRegion());
break;
case RS_ZK_REGION_FAILED_OPEN:
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(rt, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
regionState = regionStates.updateRegionState(rt, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the regionplan. (HBASE-5546)
try {
getRegionPlan(regionState.getRegion(), sn, true);
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
}
break;
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to OPENING (or update stamp if already OPENING)
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Should see OPENED after OPENING but possible after PENDING_OPEN.
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
if (regionState != null) {
// Close it without updating the internal region states,
// so as not to create double assignments in unlucky scenarios
// mentioned in OpenRegionHandler#process
unassign(regionState.getRegion(), null, -1, null, false, sn);
}
return;
}
// Handle OPENED by removing from transition and deleted zk node
regionState = regionStates.updateRegionState(rt, State.OPEN);
if (regionState != null) {
failedOpenTracker.remove(encodedName); // reset the count, if any
new OpenedRegionHandler(
server, this, regionState.getRegion(), sn, expectedVersion).process();
updateOpenedRegionHandlerTracker(regionState.getRegion());
}
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
} finally {
lock.unlock();
}
}
//For unit tests only
boolean wasClosedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = closedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
boolean wasOpenedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = openedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
void initializeHandlerTrackers() {
closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
}
void updateClosedRegionHandlerTracker(HRegionInfo hri) {
if (closedRegionHandlerCalled != null) { //only for unit tests this is true
closedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
void updateOpenedRegionHandlerTracker(HRegionInfo hri) {
if (openedRegionHandlerCalled != null) { //only for unit tests this is true
openedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* Handle a ZK unassigned node transition triggered by HBCK repair tool.
* <p>
* This is handled in a separate code path because it breaks the normal rules.
* @param rt
*/
private void handleHBCK(RegionTransition rt) {
String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
", server=" + rt.getServerName() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
switch (rt.getEventType()) {
case M_ZK_REGION_OFFLINE:
HRegionInfo regionInfo;
if (regionState != null) {
regionInfo = regionState.getRegion();
} else {
try {
byte [] name = rt.getRegionName();
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
return;
}
}
LOG.info("HBCK repair is triggering assignment of region=" +
regionInfo.getRegionNameAsString());
// trigger assign, node is already in OFFLINE so don't need to update ZK
assign(regionInfo, false);
break;
default:
LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
break;
}
}
// ZooKeeper events
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING or CLOSING of a region by
* creating an unassigned node.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeCreated(String path) {
handleAssignmentEvent(path);
}
/**
* Existing unassigned node has had data changed.
*
* <p>This happens when an RS transitions from OFFLINE to OPENING, or between
* OPENING/OPENED and CLOSING/CLOSED.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeDataChanged(String path) {
handleAssignmentEvent(path);
}
// We don't want to have two events on the same region managed simultaneously.
// For this reason, we need to wait if an event on the same region is currently in progress.
// So we track the region names of the events in progress, and we keep a waiting list.
private final Set<String> regionsInProgress = new HashSet<String>();
// In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need
// this as we want the events to be managed in the same order as we received them.
private final LinkedHashMultimap <String, RegionRunnable>
zkEventWorkerWaitingList = LinkedHashMultimap.create();
/**
* A specific runnable that works only on a region.
*/
private interface RegionRunnable extends Runnable{
/**
* @return - the name of the region it works on.
*/
String getRegionName();
}
/**
* Submit a task, ensuring that there is only one task at a time that working on a given region.
* Order is respected.
*/
protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) {
synchronized (regionsInProgress) {
// If we're there is already a task with this region, we add it to the
// waiting list and return.
if (regionsInProgress.contains(regRunnable.getRegionName())) {
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable);
}
return;
}
// No event in progress on this region => we can submit a new task immediately.
regionsInProgress.add(regRunnable.getRegionName());
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
regRunnable.run();
} finally {
// now that we have finished, let's see if there is an event for the same region in the
// waiting list. If it's the case, we can now submit it to the pool.
synchronized (regionsInProgress) {
regionsInProgress.remove(regRunnable.getRegionName());
synchronized (zkEventWorkerWaitingList) {
java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get(
regRunnable.getRegionName());
if (!waiting.isEmpty()) {
// We want the first object only. The only way to get it is through an iterator.
RegionRunnable toSubmit = waiting.iterator().next();
zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit);
zkEventWorkersSubmit(toSubmit);
}
}
}
}
}
});
}
}
@Override
public void nodeDeleted(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
Lock lock = locker.acquireLock(regionName);
try {
RegionState rs = regionStates.getRegionTransitionState(regionName);
if (rs == null) {
rs = regionStates.getRegionState(regionName);
if (rs == null || !rs.isMergingNew()) {
// MergingNew is an offline state
return;
}
}
HRegionInfo regionInfo = rs.getRegion();
String regionNameStr = regionInfo.getRegionNameAsString();
LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable());
ServerName serverName = rs.getServerName();
if (serverManager.isServerOnline(serverName)) {
if (rs.isOnServer(serverName)
&& (rs.isOpened() || rs.isSplitting())) {
regionOnline(regionInfo, serverName);
if (disabled) {
// if server is offline, no hurt to unassign again
LOG.info("Opened " + regionNameStr
+ "but this table is disabled, triggering close of region");
unassign(regionInfo);
}
} else if (rs.isMergingNew()) {
synchronized (regionStates) {
String p = regionInfo.getEncodedName();
PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
if (regions != null) {
onlineMergingRegion(disabled, regions.getFirst(), serverName);
onlineMergingRegion(disabled, regions.getSecond(), serverName);
}
}
}
}
} finally {
lock.unlock();
}
}
private void onlineMergingRegion(boolean disabled,
final HRegionInfo hri, final ServerName serverName) {
RegionState regionState = regionStates.getRegionState(hri);
if (regionState != null && regionState.isMerging()
&& regionState.isOnServer(serverName)) {
regionOnline(regionState.getRegion(), serverName);
if (disabled) {
unassign(hri);
}
}
}
});
}
}
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a
* region by creating a znode.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further children changed events</li>
* <li>Watch all new children for changed events</li>
* </ol>
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.assignmentZNode)) {
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
// Just make sure we see the changes for the new znodes
List<String> children =
ZKUtil.listChildrenAndWatchForNewChildren(
watcher, watcher.assignmentZNode);
if (children != null) {
Stat stat = new Stat();
for (String child : children) {
// if region is in transition, we already have a watch
// on it, so no need to watch it again. So, as I know for now,
// this is needed to watch splitting nodes only.
if (!regionStates.isRegionInTransition(child)) {
ZKAssign.getDataAndWatch(watcher, child, stat);
}
}
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned children", e);
}
}
});
}
}
/**
* Marks the region as online. Removes it from regions in transition and
* updates the in-memory assignment information.
* <p>
* Used when a region has been successfully opened on a region server.
* @param regionInfo
* @param sn
*/
void regionOnline(HRegionInfo regionInfo, ServerName sn) {
regionOnline(regionInfo, sn, HConstants.NO_SEQNUM);
}
void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) {
numRegionsOpened.incrementAndGet();
regionStates.regionOnline(regionInfo, sn, openSeqNum);
// Remove plan if one.
clearRegionPlan(regionInfo);
// Add the server to serversInUpdatingTimer
addToServersInUpdatingTimer(sn);
balancer.regionOnline(regionInfo, sn);
// Tell our listeners that a region was opened
sendRegionOpenedNotification(regionInfo, sn);
}
/**
* Pass the assignment event to a worker for processing.
* Each worker is a single thread executor service. The reason
* for just one thread is to make sure all events for a given
* region are processed in order.
*
* @param path
*/
private void handleAssignmentEvent(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat);
if (data == null) return;
RegionTransition rt = RegionTransition.parseFrom(data);
handleRegion(rt, stat.getVersion());
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned node data", e);
} catch (DeserializationException e) {
server.abort("Unexpected exception deserializing node data", e);
}
}
});
}
}
/**
* Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater}
* will update timers for this server in background
* @param sn
*/
private void addToServersInUpdatingTimer(final ServerName sn) {
if (tomActivated){
this.serversInUpdatingTimer.add(sn);
}
}
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
Preconditions.checkState(tomActivated);
if (sn == null) return;
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
List<Map.Entry<String, RegionPlan>> rps;
synchronized(this.regionPlans) {
rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet());
}
for (Map.Entry<String, RegionPlan> e : rps) {
if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) {
RegionState regionState = regionStates.getRegionTransitionState(e.getKey());
if (regionState != null) {
regionState.updateTimestampToNow();
}
}
}
}
/**
* Marks the region as offline. Removes it from regions in transition and
* removes in-memory assignment information.
* <p>
* Used when a region has been closed and should remain closed.
* @param regionInfo
*/
public void regionOffline(final HRegionInfo regionInfo) {
regionOffline(regionInfo, null);
}
public void offlineDisabledRegion(HRegionInfo regionInfo) {
if (useZKForAssignment) {
// Disabling so should not be reassigned, just delete the CLOSED node
LOG.debug("Table being disabled so deleting ZK node and removing from " +
"regions in transition, skipping assignment of region " +
regionInfo.getRegionNameAsString());
String encodedName = regionInfo.getEncodedName();
deleteNodeInStates(encodedName, "closed", null,
EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE);
}
regionOffline(regionInfo);
}
// Assignment methods
/**
* Assigns the specified region.
* <p>
* If a RegionPlan is available with a valid destination then it will be used
* to determine what server region is assigned to. If no RegionPlan is
* available, region will be assigned to a random available server.
* <p>
* Updates the RegionState and sends the OPEN RPC.
* <p>
* This will only succeed if the region is in transition and in a CLOSED or
* OFFLINE state or not in transition (in-memory not zk), and of course, the
* chosen server is up and running (It may have just crashed!). If the
* in-memory checks pass, the zk node is forced to OFFLINE before assigning.
*
* @param region server to be assigned
* @param setOfflineInZK whether ZK node should be created/transitioned to an
* OFFLINE state before assigning the region
*/
public void assign(HRegionInfo region, boolean setOfflineInZK) {
assign(region, setOfflineInZK, false);
}
/**
* Use care with forceNewPlan. It could cause double assignment.
*/
public void assign(HRegionInfo region,
boolean setOfflineInZK, boolean forceNewPlan) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
if (this.serverManager.isClusterShutdown()) {
LOG.info("Cluster shutdown is set; skipping assign of " +
region.getRegionNameAsString());
return;
}
String encodedName = region.getEncodedName();
Lock lock = locker.acquireLock(encodedName);
try {
RegionState state = forceRegionStateToOffline(region, forceNewPlan);
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
return;
}
assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan);
}
} finally {
lock.unlock();
}
}
/**
* Bulk assign regions to <code>destination</code>.
* @param destination
* @param regions Regions to assign.
* @return true if successful
*/
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
int regionCount = regions.size();
if (regionCount == 0) {
return true;
}
LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString());
Set<String> encodedNames = new HashSet<String>(regionCount);
for (HRegionInfo region : regions) {
encodedNames.add(region.getEncodedName());
}
List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
Map<String, Lock> locks = locker.acquireLocks(encodedNames);
try {
AtomicInteger counter = new AtomicInteger(0);
Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>();
OfflineCallback cb = new OfflineCallback(
watcher, destination, counter, offlineNodesVersions);
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size());
List<RegionState> states = new ArrayList<RegionState>(regions.size());
for (HRegionInfo region : regions) {
String encodedName = region.getEncodedName();
if (!isDisabledorDisablingRegionInRIT(region)) {
RegionState state = forceRegionStateToOffline(region, false);
boolean onDeadServer = false;
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
onDeadServer = true;
} else if (!useZKForAssignment
|| asyncSetOfflineInZooKeeper(state, cb, destination)) {
RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
plans.put(encodedName, plan);
states.add(state);
continue;
}
}
// Reassign if the region wasn't on a dead server
if (!onDeadServer) {
LOG.info("failed to force region state to offline or "
+ "failed to set it offline in ZK, will reassign later: " + region);
failedToOpenRegions.add(region); // assign individually later
}
}
// Release the lock, this region is excluded from bulk assign because
// we can't update its state, or set its znode to offline.
Lock lock = locks.remove(encodedName);
lock.unlock();
}
if (useZKForAssignment) {
// Wait until all unassigned nodes have been put up and watchers set.
int total = states.size();
for (int oldCounter = 0; !server.isStopped();) {
int count = counter.get();
if (oldCounter != count) {
LOG.info(destination.toString() + " unassigned znodes=" + count + " of total="
+ total);
oldCounter = count;
}
if (count >= total) break;
Threads.sleep(5);
}
}
if (server.isStopped()) {
return false;
}
// Add region plans, so we can updateTimers when one region is opened so
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
Integer nodeVersion = offlineNodesVersions.get(encodedRegionName);
if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) {
LOG.warn("failed to offline in zookeeper: " + region);
failedToOpenRegions.add(region); // assign individually later
Lock lock = locks.remove(encodedRegionName);
lock.unlock();
} else {
regionStates.updateRegionState(
region, State.PENDING_OPEN, destination);
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
// Move on to open regions.
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException,
// regions will be assigned individually.
long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
try {
// regionOpenInfos is empty if all regions are in failedToOpenRegions list
if (regionOpenInfos.isEmpty()) {
break;
}
List<RegionOpeningState> regionOpeningStateList = serverManager
.sendRegionOpen(destination, regionOpenInfos);
if (regionOpeningStateList == null) {
// Failed getting RPC connection to this server
return false;
}
for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
RegionOpeningState openingState = regionOpeningStateList.get(k);
if (openingState != RegionOpeningState.OPENED) {
HRegionInfo region = regionOpenInfos.get(k).getFirst();
if (openingState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, destination);
} else if (openingState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, reassign it later
failedToOpenRegions.add(region);
} else {
LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state "
+ openingState + " in assigning region " + region);
}
}
}
break;
} catch (IOException e) {
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof RegionServerStoppedException) {
LOG.warn("The region server was shut down, ", e);
// No need to retry, the region server is a goner.
return false;
} else if (e instanceof ServerNotRunningYetException) {
long now = System.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
Thread.sleep(100);
i--; // reset the try count
continue;
}
} else if (e instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(destination)) {
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server.
if (LOG.isDebugEnabled()) {
LOG.debug("Bulk assigner openRegion() to " + destination
+ " has timed out, but the regions might"
+ " already be opened on it.", e);
}
// wait and reset the re-try count, server might be just busy.
Thread.sleep(100);
i--;
continue;
}
throw e;
}
}
} catch (IOException e) {
// Can be a socket timeout, EOF, NoRouteToHost, etc
LOG.info("Unable to communicate with " + destination
+ " in order to assign regions, ", e);
return false;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
for (Lock lock : locks.values()) {
lock.unlock();
}
}
if (!failedToOpenRegions.isEmpty()) {
for (HRegionInfo region : failedToOpenRegions) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
LOG.debug("Bulk assigning done for " + destination);
return true;
} finally {
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
/**
* Send CLOSE RPC if the server is online, otherwise, offline the region.
*
* The RPC will be sent only to the region sever found in the region state
* if it is passed in, otherwise, to the src server specified. If region
* state is not specified, we don't update region state at all, instead
* we just send the RPC call. This is useful for some cleanup without
* messing around the region states (see handleRegion, on region opened
* on an unexpected server scenario, for an example)
*/
private void unassign(final HRegionInfo region,
final RegionState state, final int versionOfClosingNode,
final ServerName dest, final boolean transitionInZK,
final ServerName src) {
ServerName server = src;
if (state != null) {
server = state.getServerName();
}
long maxWaitTime = -1;
for (int i = 1; i <= this.maximumAttempts; i++) {
if (this.server.isStopped() || this.server.isAborted()) {
LOG.debug("Server stopped/aborted; skipping unassign of " + region);
return;
}
// ClosedRegionhandler can remove the server from this.regions
if (!serverManager.isServerOnline(server)) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", no need to unassign since it's on a dead server: " + server);
if (transitionInZK) {
// delete the node. if no node exists need not bother.
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
}
try {
// Send CLOSE RPC
if (serverManager.sendRegionClose(server, region,
versionOfClosingNode, dest, transitionInZK)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
if (useZKForAssignment && !transitionInZK && state != null) {
// Retry to make sure the region is
// closed so as to avoid double assignment.
unassign(region, state, versionOfClosingNode,
dest, transitionInZK, src);
}
return;
}
// This never happens. Currently regionserver close always return true.
// Todo; this can now happen (0.96) if there is an exception in a coprocessor
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
boolean logRetries = true;
if (t instanceof NotServingRegionException
|| t instanceof RegionServerStoppedException
|| t instanceof ServerNotRunningYetException) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", it's not any more on " + server, t);
if (transitionInZK) {
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
long sleepTime = 0;
Configuration conf = this.server.getConfiguration();
if(t instanceof FailedServerException) {
sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
LOG.debug("update " + state + " the timestamp.");
state.updateTimestampToNow();
if (maxWaitTime < 0) {
maxWaitTime =
EnvironmentEdgeManager.currentTimeMillis()
+ conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
}
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
sleepTime = 100;
i--; // reset the try count
logRetries = false;
}
}
try {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
} catch (InterruptedException ie) {
LOG.warn("Failed to unassign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
return;
}
}
if (logRetries) {
LOG.info("Server " + server + " returned " + t + " for "
+ region.getRegionNameAsString() + ", try=" + i
+ " of " + this.maximumAttempts, t);
// Presume retry or server will expire.
}
}
}
// Run out of attempts
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
}
/**
* Set region to OFFLINE unless it is opening and forceNewPlan is false.
*/
private RegionState forceRegionStateToOffline(
final HRegionInfo region, final boolean forceNewPlan) {
RegionState state = regionStates.getRegionState(region);
if (state == null) {
LOG.warn("Assigning a region not in region states: " + region);
state = regionStates.createRegionState(region);
}
ServerName sn = state.getServerName();
if (forceNewPlan && LOG.isDebugEnabled()) {
LOG.debug("Force region state offline " + state);
}
switch (state.getState()) {
case OPEN:
case OPENING:
case PENDING_OPEN:
case CLOSING:
case PENDING_CLOSE:
if (!forceNewPlan) {
LOG.debug("Skip assigning " +
region + ", it is already " + state);
return null;
}
case FAILED_CLOSE:
case FAILED_OPEN:
unassign(region, state, -1, null, false, null);
state = regionStates.getRegionState(region);
if (state.isFailedClose()) {
// If we can't close the region, we can't re-assign
// it so as to avoid possible double assignment/data loss.
LOG.info("Skip assigning " +
region + ", we couldn't close it: " + state);
return null;
}
case OFFLINE:
// This region could have been open on this server
// for a while. If the server is dead and not processed
// yet, we can move on only if the meta shows the
// region is not on this server actually, or on a server
// not dead, or dead and processed already.
// In case not using ZK, we don't need this check because
// we have the latest info in memory, and the caller
// will do another round checking any way.
if (useZKForAssignment
&& regionStates.isServerDeadAndNotProcessed(sn)
&& wasRegionOnDeadServerByMeta(region, sn)) {
if (!regionStates.isRegionInTransition(region)) {
LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH");
regionStates.updateRegionState(region, State.OFFLINE);
}
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it is on a dead but not processed yet server: " + sn);
return null;
}
case CLOSED:
break;
default:
LOG.error("Trying to assign region " + region
+ ", which is " + state);
return null;
}
return state;
}
private boolean wasRegionOnDeadServerByMeta(
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
ServerName server = catalogTracker.getMetaLocation();
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
catalogTracker.waitForMeta();
Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
LOG.info("Received exception accessing hbase:meta during force assign "
+ region.getRegionNameAsString() + ", retrying", ioe);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted accessing hbase:meta", e);
}
// Call is interrupted or server is stopped.
return regionStates.isServerDeadAndNotProcessed(sn);
}
/**
* Caller must hold lock on the passed <code>state</code> object.
* @param state
* @param setOfflineInZK
* @param forceNewPlan
*/
private void assign(RegionState state,
final boolean setOfflineInZK, final boolean forceNewPlan) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
Configuration conf = server.getConfiguration();
RegionState currentState = state;
int versionOfOfflineNode = -1;
RegionPlan plan = null;
long maxWaitTime = -1;
HRegionInfo region = state.getRegion();
RegionOpeningState regionOpenState;
Throwable previousException = null;
for (int i = 1; i <= maximumAttempts; i++) {
if (server.isStopped() || server.isAborted()) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", the server is stopped/aborted");
return;
}
if (plan == null) { // Get a server for the region at first
try {
plan = getRegionPlan(region, forceNewPlan);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
if (plan == null) {
LOG.warn("Unable to determine a plan to assign " + region);
if (tomActivated){
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
if (region.isMetaRegion()) {
try {
Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment);
if (i == maximumAttempts) i = 1;
continue;
} catch (InterruptedException e) {
LOG.error("Got exception while waiting for hbase:meta assignment");
Thread.currentThread().interrupt();
}
}
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination());
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
// In case of assignment from EnableTableHandler table state is ENABLING. Any how
// EnableTableHandler will set ENABLED after assigning all the table regions. If we
// try to set to ENABLED directly then client API may think table is enabled.
// When we have a case such as all the regions are added directly into hbase:meta and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
TableName tableName = region.getTable();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
}
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
LOG.info("Unable to set offline in ZooKeeper to assign " + region);
// Setting offline in ZK must have been failed due to ZK racing or some
// exception which may make the server to abort. If it is ZK racing,
// we should retry since we already reset the region state,
// existing (re)assignment will fail anyway.
if (!server.isAborted()) {
continue;
}
}
LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
currentState = regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination());
boolean needNewPlan;
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
needNewPlan = true;
LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " +
" trying to assign elsewhere instead; " +
"try=" + i + " of " + this.maximumAttempts);
} else {
// we're done
if (regionOpenState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, plan.getDestination());
}
return;
}
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
previousException = t;
// Should we wait a little before retrying? If the server is starting it's yes.
// If the region is already in transition, it's yes as well: we want to be sure that
// the region will get opened but we don't want a double assignment.
boolean hold = (t instanceof RegionAlreadyInTransitionException ||
t instanceof ServerNotRunningYetException);
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server to avoid possible
// double assignment.
boolean retry = !hold && (t instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(plan.getDestination()));
if (hold) {
LOG.warn(assignMsg + ", waiting a little before trying on the same region server " +
"try=" + i + " of " + this.maximumAttempts, t);
if (maxWaitTime < 0) {
if (t instanceof RegionAlreadyInTransitionException) {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
} else {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(
"hbase.regionserver.rpc.startup.waittime", 60000);
}
}
try {
needNewPlan = false;
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up or region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
Thread.sleep(100);
i--; // reset the try count
} else if (!(t instanceof RegionAlreadyInTransitionException)) {
LOG.debug("Server is not up for a while; try a new one", t);
needNewPlan = true;
}
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
} else if (retry) {
needNewPlan = false;
i--; // we want to retry as many times as needed as long as the RS is not dead.
LOG.warn(assignMsg + ", trying to assign to the same region server due ", t);
} else {
needNewPlan = true;
LOG.warn(assignMsg + ", trying to assign elsewhere instead;" +
" try=" + i + " of " + this.maximumAttempts, t);
}
}
if (i == this.maximumAttempts) {
// Don't reset the region state or get a new plan any more.
// This is the last try.
continue;
}
// If region opened on destination of present plan, reassigning to new
// RS may cause double assignments. In case of RegionAlreadyInTransitionException
// reassigning to same RS.
if (needNewPlan) {
// Force a new plan and reassign. Will return null if no servers.
// The new plan could be the same as the existing plan since we don't
// exclude the server of the original plan, which should not be
// excluded since it could be the only server up now.
RegionPlan newPlan = null;
try {
newPlan = getRegionPlan(region, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
if (newPlan == null) {
if (tomActivated) {
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
LOG.warn("Unable to find a viable location to assign region " +
region.getRegionNameAsString());
return;
}
if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
currentState = regionStates.updateRegionState(region, State.OFFLINE);
versionOfOfflineNode = -1;
plan = newPlan;
} else if(plan.getDestination().equals(newPlan.getDestination()) &&
previousException instanceof FailedServerException) {
try {
LOG.info("Trying to re-assign " + region.getRegionNameAsString() +
" to the same failed server.");
Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT));
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
}
}
}
// Run out of attempts
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
} finally {
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
regionStates.regionOnline(region, sn);
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
TableName tableName = region.getTable();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
" skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return true;
}
return false;
}
/**
* Set region as OFFLINED up in zookeeper
*
* @param state
* @return the version of the offline node if setting of the OFFLINE node was
* successful, -1 otherwise.
*/
private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
this.server.abort(msg, new IllegalStateException(msg));
return -1;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
int versionOfOfflineNode;
try {
// get the version after setting the znode to OFFLINE
versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher,
state.getRegion(), destination);
if (versionOfOfflineNode == -1) {
LOG.warn("Attempted to create/force node into OFFLINE state before "
+ "completing assignment but failed to do so for " + state);
return -1;
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return -1;
}
return versionOfOfflineNode;
}
/**
* @param region the region to assign
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final boolean forceNewPlan) throws HBaseIOException {
return getRegionPlan(region, null, forceNewPlan);
}
/**
* @param region the region to assign
* @param serverToExclude Server to exclude (we know its bad). Pass null if
* all servers are thought to be assignable.
* @param forceNewPlan If true, then if an existing plan exists, a new plan
* will be generated.
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException {
// Pickup existing plan or make a new one
final String encodedName = region.getEncodedName();
final List<ServerName> destServers =
serverManager.createDestinationServersList(serverToExclude);
if (destServers.isEmpty()){
LOG.warn("Can't move " + encodedName +
", there is no destination server available.");
return null;
}
RegionPlan randomPlan = null;
boolean newPlan = false;
RegionPlan existingPlan;
synchronized (this.regionPlans) {
existingPlan = this.regionPlans.get(encodedName);
if (existingPlan != null && existingPlan.getDestination() != null) {
LOG.debug("Found an existing plan for " + region.getRegionNameAsString()
+ " destination server is " + existingPlan.getDestination() +
" accepted as a dest server = " + destServers.contains(existingPlan.getDestination()));
}
if (forceNewPlan
|| existingPlan == null
|| existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
if (newPlan) {
if (randomPlan.getDestination() == null) {
LOG.warn("Can't find a destination for " + encodedName);
return null;
}
LOG.debug("No previous transition plan found (or ignoring " +
"an existing plan) for " + region.getRegionNameAsString() +
"; generated random plan=" + randomPlan + "; " +
serverManager.countOfRegionServers() +
" (online=" + serverManager.getOnlineServers().size() +
", available=" + destServers.size() + ") available servers" +
", forceNewPlan=" + forceNewPlan);
return randomPlan;
}
LOG.debug("Using pre-existing plan for " +
region.getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
*/
public void unassign(HRegionInfo region) {
unassign(region, false);
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force, ServerName dest) {
// TODO: Method needs refactoring. Ugly buried returns throughout. Beware!
LOG.debug("Starting unassign of " + region.getRegionNameAsString()
+ " (offlining), current state: " + regionStates.getRegionState(region));
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
int versionOfClosingNode = -1;
// We need a lock here as we're going to do a put later and we don't want multiple states
// creation
ReentrantLock lock = locker.acquireLock(encodedName);
RegionState state = regionStates.getRegionTransitionState(encodedName);
boolean reassign = true;
try {
if (state == null) {
// Region is not in transition.
// We can unassign it only if it's not SPLIT/MERGED.
state = regionStates.getRegionState(encodedName);
if (state != null && state.isUnassignable()) {
LOG.info("Attempting to unassign " + state + ", ignored");
// Offline region will be reassigned below
return;
}
// Create the znode in CLOSING state
try {
if (state == null || state.getServerName() == null) {
// We don't know where the region is, offline it.
// No need to send CLOSE RPC
LOG.warn("Attempting to unassign a region not in RegionStates"
+ region.getRegionNameAsString() + ", offlined");
regionOffline(region);
return;
}
if (useZKForAssignment) {
versionOfClosingNode = ZKAssign.createNodeClosing(
watcher, region, state.getServerName());
if (versionOfClosingNode == -1) {
LOG.info("Attempting to unassign " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
reassign = false; // not unassigned at all
return;
}
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplittingOrMergedOrMerging(path)) {
LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " +
"skipping unassign because region no longer exists -- its split or merge");
reassign = false; // no need to reassign for split/merged region
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
} catch (DeserializationException de) {
LOG.error("Failed parse", de);
}
}
// If we get here, don't understand whats going on -- abort.
server.abort("Unexpected ZK exception creating node CLOSING", e);
reassign = false; // heading out already
return;
}
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
} else if (state.isFailedOpen()) {
// The region is not open yet
regionOffline(region);
return;
} else if (force && state.isPendingCloseOrClosing()) {
LOG.debug("Attempting to unassign " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
if (state.isFailedClose()) {
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
}
state.updateTimestampToNow();
} else {
LOG.debug("Attempting to unassign " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null);
} finally {
lock.unlock();
// Region is expected to be reassigned afterwards
if (reassign && regionStates.isRegionOffline(region)) {
assign(region, true);
}
}
}
public void unassign(HRegionInfo region, boolean force){
unassign(region, force, null);
}
/**
* @param region regioninfo of znode to be deleted.
*/
public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) {
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING,
EventType.RS_ZK_REGION_CLOSED);
}
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
/**
* Used by unit tests. Return the number of regions opened so far in the life
* of the master. Increases by one every time the master opens a region
* @return the counter value of the number of regions opened so far
*/
public int getNumRegionsOpened() {
return numRegionsOpened.get();
}
/**
* Waits until the specified region has completed assignment.
* <p>
* If the region is already assigned, returns immediately. Otherwise, method
* blocks until the region is assigned.
* @param regionInfo region to wait on assignment for
* @throws InterruptedException
*/
public boolean waitForAssignment(HRegionInfo regionInfo)
throws InterruptedException {
while (!regionStates.isRegionOnline(regionInfo)) {
if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN)
|| this.server.isStopped()) {
return false;
}
// We should receive a notification, but it's
// better to have a timeout to recheck the condition here:
// it lowers the impact of a race condition if any
regionStates.waitForUpdate(100);
}
return true;
}
/**
* Assigns the hbase:meta region.
* <p>
* Assumes that hbase:meta is currently closed and is not being actively served by
* any RegionServer.
* <p>
* Forcibly unsets the current meta region location in ZooKeeper and assigns
* hbase:meta to a random RegionServer.
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
MetaRegionTracker.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
/**
* Assigns specified regions retaining assignments, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(Map<HRegionInfo, ServerName> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Reuse existing assignment info
Map<ServerName, List<HRegionInfo>> bulkPlan =
balancer.retainAssignment(regions, servers);
assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan);
}
/**
* Assigns specified regions round robin, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(List<HRegionInfo> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
}
private void assign(int regions, int totalServers,
String message, Map<ServerName, List<HRegionInfo>> bulkPlan)
throws InterruptedException, IOException {
int servers = bulkPlan.size();
if (servers == 1 || (regions < bulkAssignThresholdRegions
&& servers < bulkAssignThresholdServers)) {
// Not use bulk assignment. This could be more efficient in small
// cluster, especially mini cluster for testing, so that tests won't time out
if (LOG.isTraceEnabled()) {
LOG.trace("Not using bulk assignment since we are assigning only " + regions +
" region(s) to " + servers + " server(s)");
}
for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
if (!assign(plan.getKey(), plan.getValue())) {
for (HRegionInfo region: plan.getValue()) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
}
} else {
LOG.info("Bulk assigning " + regions + " region(s) across "
+ totalServers + " server(s), " + message);
// Use fixed count thread pool assigning.
BulkAssigner ba = new GeneralBulkAssigner(
this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned);
ba.bulkAssign();
LOG.info("Bulk assigning done");
}
}
/**
* Assigns all user regions, if any exist. Used during cluster startup.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
*/
private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling)
throws IOException, InterruptedException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
// Scan hbase:meta for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) {
return;
}
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
if (retainAssignment) {
assign(allRegions);
} else {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet());
assign(regions);
}
for (HRegionInfo hri : allRegions.keySet()) {
TableName tableName = hri.getTable();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
}
/**
* Wait until no regions in transition.
* @param timeout How long to wait.
* @return True if nothing in regions in transition.
* @throws InterruptedException
*/
boolean waitUntilNoRegionsInTransition(final long timeout)
throws InterruptedException {
// Blocks until there are no regions in transition. It is possible that
// there
// are regions in transition immediately after this returns but guarantees
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
&& endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
return !regionStates.isRegionsInTransition();
}
/**
* Rebuild the list of user regions and assignment information.
* <p>
* Returns a map of servers that are not found to be online and the regions
* they were hosting.
* @return map of servers not online to their assigned regions, as stored
* in META
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
List<Result> results = MetaReader.fullScan(this.catalogTracker);
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
Map<ServerName, List<HRegionInfo>> offlineServers =
new TreeMap<ServerName, List<HRegionInfo>>();
// Iterate regions in META
for (Result result : results) {
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result);
if (regionInfo == null) continue;
State state = RegionStateStore.getRegionState(result);
ServerName regionLocation = RegionStateStore.getRegionServer(result);
regionStates.createRegionState(regionInfo, state, regionLocation);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList<HRegionInfo>(1);
offlineServers.put(regionLocation, offlineRegions);
}
if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
offlineRegions.add(regionInfo);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation);
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
} else if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
// need to enable the table if not disabled or disabling or enabling
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName)
&& !getZKTable().isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
return offlineServers;
}
/**
* Recover the tables that were not fully moved to DISABLED state. These
* tables are in DISABLING state when the master restarted/switched.
*
* @throws KeeperException
* @throws TableNotFoundException
* @throws IOException
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
}
/**
* Recover the tables that are not fully moved to ENABLED state. These tables
* are in ENABLING state when the master restarted/switched
*
* @throws KeeperException
* @throws org.apache.hadoop.hbase.TableNotFoundException
* @throws IOException
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
LOG.warn("Table " + tableName + " not found in hbase:meta to recover.");
continue;
}
eth.process();
}
}
}
/**
* Processes list of dead servers from result of hbase:meta scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<HRegionInfo>> deadServers)
throws IOException, KeeperException {
if (deadServers != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) {
ServerName serverName = server.getKey();
// We need to keep such info even if the server is known dead
regionStates.setLastRegionServerOfRegions(serverName, server.getValue());
if (!serverManager.isServerDead(serverName)) {
serverManager.expireServer(serverName); // Let SSH do region re-assign
}
}
}
List<String> nodes = useZKForAssignment ?
ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode)
: ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
if (nodes != null && !nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null);
}
} else if (!useZKForAssignment) {
// We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions
// in case the RPC call is not sent out yet before the master was shut down
// since we update the state before we send the RPC call. We can't update
// the state after the RPC call. Otherwise, we don't know what's happened
// to the region if the master dies right after the RPC call is out.
Map<String, RegionState> rits = regionStates.getRegionsInTransition();
for (RegionState regionState: rits.values()) {
if (!serverManager.isServerOnline(regionState.getServerName())) {
continue; // SSH will handle it
}
State state = regionState.getState();
LOG.info("Processing " + regionState);
switch (state) {
case CLOSED:
invokeAssign(regionState.getRegion());
break;
case PENDING_OPEN:
retrySendRegionOpen(regionState);
break;
case PENDING_CLOSE:
retrySendRegionClose(regionState);
break;
default:
// No process for other states
}
}
}
}
/**
* At master failover, for pending_open region, make sure
* sendRegionOpen RPC call is sent to the target regionserver
*/
private void retrySendRegionOpen(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri);
}
RegionOpeningState regionOpenState = serverManager.sendRegionOpen(
serverName, hri, -1, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, this means the target server didn't get
// the original region open RPC, so re-assign it with a new plan
LOG.debug("Got failed_opening in retry sendRegionOpen for "
+ regionState + ", re-assign it");
invokeAssign(hri, true);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
// For other exceptions, re-assign it
LOG.debug("Got exception in retry sendRegionOpen for "
+ regionState + ", re-assign it", t);
invokeAssign(hri);
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* At master failover, for pending_close region, make sure
* sendRegionClose RPC call is sent to the target regionserver
*/
private void retrySendRegionClose(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) {
// This means the region is still on the target server
LOG.debug("Got false in retry sendRegionClose for "
+ regionState + ", re-close it");
invokeUnAssign(hri);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
if (!(t instanceof NotServingRegionException
|| t instanceof RegionAlreadyInTransitionException)) {
// NotServingRegionException/RegionAlreadyInTransitionException
// means the target server got the original region close request.
// For other exceptions, re-close it
LOG.debug("Got exception in retry sendRegionClose for "
+ regionState + ", re-close it", t);
invokeUnAssign(hri);
}
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* Set Regions in transitions metrics.
* This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized.
* This iterator is not fail fast, which may lead to stale read; but that's better than
* creating a copy of the map for metrics computation, as this method will be invoked
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
int ritThreshold = this.server.getConfiguration().
getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000);
for (RegionState state: regionStates.getRegionsInTransition().values()) {
totalRITs++;
long ritTime = currentTime - state.getStamp();
if (ritTime > ritThreshold) { // more than the threshold
totalRITsOverThreshold++;
}
if (oldestRITTime < ritTime) {
oldestRITTime = ritTime;
}
}
if (this.metricsAssignmentManager != null) {
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
this.metricsAssignmentManager.updateRITCount(totalRITs);
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
/**
* @param region Region whose plan we are to clear.
*/
void clearRegionPlan(final HRegionInfo region) {
synchronized (this.regionPlans) {
this.regionPlans.remove(region.getEncodedName());
}
}
/**
* Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
throws IOException, InterruptedException {
waitOnRegionToClearRegionsInTransition(hri, -1L);
}
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTimeMillis() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
/**
* Update timers for all regions in transition going against the server in the
* serversInUpdatingTimer.
*/
public class TimerUpdater extends Chore {
public TimerUpdater(final int period, final Stoppable stopper) {
super("AssignmentTimerUpdater", period, stopper);
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
ServerName serverToUpdateTimer = null;
while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) {
if (serverToUpdateTimer == null) {
serverToUpdateTimer = serversInUpdatingTimer.first();
} else {
serverToUpdateTimer = serversInUpdatingTimer
.higher(serverToUpdateTimer);
}
if (serverToUpdateTimer == null) {
break;
}
updateTimers(serverToUpdateTimer);
serversInUpdatingTimer.remove(serverToUpdateTimer);
}
}
}
/**
* Monitor to check for time outs on region transition operations
*/
public class TimeoutMonitor extends Chore {
private boolean allRegionServersOffline = false;
private ServerManager serverManager;
private final int timeout;
/**
* Creates a periodic monitor to check for time outs on region transition
* operations. This will deal with retries if for some reason something
* doesn't happen within the specified timeout.
* @param period
* @param stopper When {@link Stoppable#isStopped()} is true, this thread will
* cleanup and exit cleanly.
* @param timeout
*/
public TimeoutMonitor(final int period, final Stoppable stopper,
ServerManager serverManager,
final int timeout) {
super("AssignmentTimeoutMonitor", period, stopper);
this.timeout = timeout;
this.serverManager = serverManager;
}
private synchronized void setAllRegionServersOffline(
boolean allRegionServersOffline) {
this.allRegionServersOffline = allRegionServersOffline;
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (String regionName : regionStates.getRegionsInTransition().keySet()) {
RegionState regionState = regionStates.getRegionTransitionState(regionName);
if (regionState == null) continue;
if (regionState.getStamp() + timeout <= now) {
// decide on action upon timeout
actOnTimeOut(regionState);
} else if (this.allRegionServersOffline && !noRSAvailable) {
RegionPlan existingPlan = regionPlans.get(regionName);
if (existingPlan == null
|| !this.serverManager.isServerOnline(existingPlan
.getDestination())) {
// if some RSs just came back online, we can start the assignment
// right away
actOnTimeOut(regionState);
}
}
}
setAllRegionServersOffline(noRSAvailable);
}
private void actOnTimeOut(RegionState regionState) {
HRegionInfo regionInfo = regionState.getRegion();
LOG.info("Regions in transition timed out: " + regionState);
// Expired! Do a retry.
switch (regionState.getState()) {
case CLOSED:
LOG.info("Region " + regionInfo.getEncodedName()
+ " has been CLOSED for too long, waiting on queued "
+ "ClosedRegionHandler to run or server shutdown");
// Update our timestamp.
regionState.updateTimestampToNow();
break;
case OFFLINE:
LOG.info("Region has been OFFLINE for too long, " + "reassigning "
+ regionInfo.getRegionNameAsString() + " to a random server");
invokeAssign(regionInfo);
break;
case PENDING_OPEN:
LOG.info("Region has been PENDING_OPEN for too "
+ "long, reassigning region=" + regionInfo.getRegionNameAsString());
invokeAssign(regionInfo);
break;
case OPENING:
processOpeningState(regionInfo);
break;
case OPEN:
LOG.error("Region has been OPEN for too long, " +
"we don't know where region was opened so can't do anything");
regionState.updateTimestampToNow();
break;
case PENDING_CLOSE:
LOG.info("Region has been PENDING_CLOSE for too "
+ "long, running forced unassign again on region="
+ regionInfo.getRegionNameAsString());
invokeUnassign(regionInfo);
break;
case CLOSING:
LOG.info("Region has been CLOSING for too " +
"long, this should eventually complete or the server will " +
"expire, send RPC again");
invokeUnassign(regionInfo);
break;
case SPLIT:
case SPLITTING:
case FAILED_OPEN:
case FAILED_CLOSE:
case MERGING:
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
}
}
private void processOpeningState(HRegionInfo regionInfo) {
LOG.info("Region has been OPENING for too long, reassigning region="
+ regionInfo.getRegionNameAsString());
// Should have a ZK node in OPENING state
try {
String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName());
Stat stat = new Stat();
byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
LOG.warn("Data is null, node " + node + " no longer exists");
return;
}
RegionTransition rt = RegionTransition.parseFrom(data);
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REGION_OPENED) {
LOG.debug("Region has transitioned to OPENED, allowing "
+ "watched event handlers to process");
return;
} else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) {
LOG.warn("While timing out a region, found ZK node in unexpected state: " + et);
return;
}
invokeAssign(regionInfo);
} catch (KeeperException ke) {
LOG.error("Unexpected ZK exception timing out CLOSING region", ke);
} catch (DeserializationException e) {
LOG.error("Unexpected exception parsing CLOSING region", e);
}
}
void invokeAssign(HRegionInfo regionInfo) {
invokeAssign(regionInfo, true);
}
void invokeAssign(HRegionInfo regionInfo, boolean newPlan) {
threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan));
}
void invokeUnAssign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
private void invokeUnassign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
public boolean isCarryingMeta(ServerName serverName) {
return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
}
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransition rt = null;
try {
byte [] data = ZKAssign.getData(watcher, hri.getEncodedName());
// This call can legitimately come by null
rt = data == null? null: RegionTransition.parseFrom(data);
} catch (KeeperException e) {
server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e);
} catch (DeserializationException e) {
server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e);
}
ServerName addressFromZK = rt != null? rt.getServerName(): null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = addressFromZK.equals(serverName);
LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK +
" current=" + serverName + ", matches=" + matchZK);
return matchZK;
}
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
/**
* @param plan Plan to execute.
*/
public void balance(final RegionPlan plan) {
HRegionInfo hri = plan.getRegionInfo();
TableName tableName = hri.getTable();
if (zkTable.isDisablingOrDisabledTable(tableName)) {
LOG.info("Ignored moving region of disabling/disabled table "
+ tableName);
return;
}
// Move the region only if it's assigned
String encodedName = hri.getEncodedName();
ReentrantLock lock = locker.acquireLock(encodedName);
try {
if (!regionStates.isRegionOnline(hri)) {
RegionState state = regionStates.getRegionState(encodedName);
LOG.info("Ignored moving region not assigned: " + hri + ", "
+ (state == null ? "not in region states" : state));
return;
}
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
unassign(hri, false, plan.getDestination());
} finally {
lock.unlock();
}
}
public void stop() {
shutdown(); // Stop executor service, etc
if (tomActivated){
this.timeoutMonitor.interrupt();
this.timerUpdater.interrupt();
}
}
/**
* Shutdown the threadpool executor service
*/
public void shutdown() {
// It's an immediate shutdown, so we're clearing the remaining tasks.
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.clear();
}
threadPoolExecutorService.shutdownNow();
zkEventWorkers.shutdownNow();
regionStateStore.stop();
}
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {
// here we can abort as it is the start up flow
String errorMsg = "Unable to ensure that the table " + tableName
+ " will be" + " enabled because of a ZooKeeper issue";
LOG.error(errorMsg);
this.server.abort(errorMsg, e);
}
}
/**
* Set region as OFFLINED up in zookeeper asynchronously.
* @param state
* @return True if we succeeded, false otherwise (State was incorrect or failed
* updating zk).
*/
private boolean asyncSetOfflineInZooKeeper(final RegionState state,
final AsyncCallback.StringCallback cb, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
this.server.abort("Unexpected state trying to OFFLINE; " + state,
new IllegalStateException());
return false;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(),
destination, cb, state);
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
LOG.warn("Node for " + state.getRegion() + " already exists");
} else {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
}
return false;
}
return true;
}
private boolean deleteNodeInStates(String encodedName,
String desc, ServerName sn, EventType... types) {
try {
for (EventType et: types) {
if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) {
return true;
}
}
LOG.info("Failed to delete the " + desc + " node for "
+ encodedName + ". The node type may not match");
} catch (NoNodeException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("The " + desc + " node for " + encodedName + " already deleted");
}
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting " + desc
+ " node for the region " + encodedName, ke);
}
return false;
}
private void deleteMergingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING,
EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED);
}
private void deleteSplittingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING,
EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
}
private void onRegionFailedOpen(
final HRegionInfo hri, final ServerName sn) {
String encodedName = hri.getEncodedName();
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the region plan. (HBASE-5546)
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
if (disablingOrDisabled.contains(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
// ZK Node is in CLOSED state, assign it.
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
try {
getRegionPlan(hri, sn, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
invokeAssign(hri, false);
}
}
}
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (isTableDisabledOrDisabling(hri.getTable())) {
invokeUnAssign(hri);
}
}
private void onRegionClosed(final HRegionInfo hri) {
if (isTableDisabledOrDisabling(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
invokeAssign(hri, false);
}
private String onRegionSplit(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_p.isOpenOrSplittingOnServer(sn)
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
return "Not in state good for split";
}
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(p, State.SPLITTING);
if (code == TransitionCode.SPLIT) {
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(p, State.SPLIT);
regionOnline(a, sn, 1);
regionOnline(b, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
} else if (code == TransitionCode.SPLIT_PONR) {
try {
regionStateStore.splitRegion(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record split region " + p.getShortNameToLog());
return "Failed to record the splitting in meta";
}
} else if (code == TransitionCode.SPLIT_REVERTED) {
regionOnline(p, sn);
regionOffline(a);
regionOffline(b);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
}
return null;
}
private boolean isTableDisabledOrDisabling(TableName t) {
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
return disablingOrDisabled.contains(t) ? true : false;
}
private String onRegionMerge(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn)
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
return "Not in state good for merge";
}
regionStates.updateRegionState(a, State.MERGING);
regionStates.updateRegionState(b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
String encodedName = p.getEncodedName();
if (code == TransitionCode.READY_TO_MERGE) {
mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(a, b));
} else if (code == TransitionCode.MERGED) {
mergingRegions.remove(encodedName);
regionOffline(a, State.MERGED);
regionOffline(b, State.MERGED);
regionOnline(p, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
} else if (code == TransitionCode.MERGE_PONR) {
try {
regionStateStore.mergeRegions(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record merged region " + p.getShortNameToLog());
return "Failed to record the merging in meta";
}
} else {
mergingRegions.remove(encodedName);
regionOnline(a, sn);
regionOnline(b, sn);
regionOffline(p);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
}
return null;
}
/**
* A helper to handle region merging transition event.
* It transitions merging regions to MERGING state.
*/
private boolean handleRegionMerging(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped merging! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfMerging = rt.getPayload();
List<HRegionInfo> mergingRegions;
try {
mergingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfMerging, 0, payloadOfMerging.length);
} catch (IOException e) {
LOG.error("Dropped merging! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert mergingRegions.size() == 3;
HRegionInfo p = mergingRegions.get(0);
HRegionInfo hri_a = mergingRegions.get(1);
HRegionInfo hri_b = mergingRegions.get(2);
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrMergingOnServer(sn))
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
LOG.warn("Dropped merging! Not in state good for MERGING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) {
try {
if (RegionMergeTransaction.transitionMergingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE,
EventType.RS_ZK_REGION_MERGING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED
&& currentType != EventType.RS_ZK_REGION_MERGING)) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.MERGING);
regionStates.updateRegionState(hri_b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
if (et != EventType.RS_ZK_REGION_MERGED) {
this.mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(hri_a, hri_b));
} else {
this.mergingRegions.remove(encodedName);
regionOffline(hri_a, State.MERGED);
regionOffline(hri_b, State.MERGED);
regionOnline(p, sn);
}
}
if (et == EventType.RS_ZK_REGION_MERGED) {
LOG.debug("Handling MERGED event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_MERGED, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting MERGED node " + encodedName, e);
}
}
LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString()
+ ", region_a=" + hri_a.getRegionNameAsString() + ", region_b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(p);
}
}
return true;
}
/**
* A helper to handle region splitting transition event.
*/
private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped splitting! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfSplitting = rt.getPayload();
List<HRegionInfo> splittingRegions;
try {
splittingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfSplitting, 0, payloadOfSplitting.length);
} catch (IOException e) {
LOG.error("Dropped splitting! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert splittingRegions.size() == 2;
HRegionInfo hri_a = splittingRegions.get(0);
HRegionInfo hri_b = splittingRegions.get(1);
RegionState rs_p = regionStates.getRegionState(encodedName);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn))
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
if (rs_p == null) {
// Splitting region should be online
rs_p = regionStates.updateRegionState(rt, State.OPEN);
if (rs_p == null) {
LOG.warn("Received splitting for region " + prettyPrintedRegionName
+ " from server " + sn + " but it doesn't exist anymore,"
+ " probably already processed its split");
return false;
}
regionStates.regionOnline(rs_p.getRegion(), sn);
}
HRegionInfo p = rs_p.getRegion();
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) {
try {
if (SplitTransaction.transitionSplittingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT,
EventType.RS_ZK_REGION_SPLITTING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT
&& currentType != EventType.RS_ZK_REGION_SPLITTING)) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
// The below is for testing ONLY! We can't do fault injection easily, so
// resort to this kinda uglyness -- St.Ack 02/25/2011.
if (TEST_SKIP_SPLIT_HANDLING) {
LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set");
return true; // return true so that the splitting node stays
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
regionOffline(p, State.SPLIT);
regionOnline(hri_a, sn);
regionOnline(hri_b, sn);
}
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_SPLIT, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting SPLIT node " + encodedName, e);
}
}
LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString()
+ ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(hri_a);
unassign(hri_b);
}
}
return true;
}
/**
* A region is offline. The new state should be the specified one,
* if not null. If the specified state is null, the new state is Offline.
* The specified state can be Split/Merged/Offline/null only.
*/
private void regionOffline(final HRegionInfo regionInfo, final State state) {
regionStates.regionOffline(regionInfo, state);
removeClosedRegion(regionInfo);
// remove the region plan as well just in case.
clearRegionPlan(regionInfo);
balancer.regionOffline(regionInfo);
// Tell our listeners that a region was closed
sendRegionClosedNotification(regionInfo);
}
private void sendRegionOpenedNotification(final HRegionInfo regionInfo,
final ServerName serverName) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionOpened(regionInfo, serverName);
}
}
}
private void sendRegionClosedNotification(final HRegionInfo regionInfo) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionClosed(regionInfo);
}
}
}
/**
* Try to update some region states. If the state machine prevents
* such update, an error message is returned to explain the reason.
*
* It's expected that in each transition there should have just one
* region for opening/closing, 3 regions for splitting/merging.
* These regions should be on the server that requested the change.
*
* Region state machine. Only these transitions
* are expected to be triggered by a region server.
*
* On the state transition:
* (1) Open/Close should be initiated by master
* (a) Master sets the region to pending_open/pending_close
* in memory and hbase:meta after sending the request
* to the region server
* (b) Region server reports back to the master
* after open/close is done (either success/failure)
* (c) If region server has problem to report the status
* to master, it must be because the master is down or some
* temporary network issue. Otherwise, the region server should
* abort since it must be a bug. If the master is not accessible,
* the region server should keep trying until the server is
* stopped or till the status is reported to the (new) master
* (d) If region server dies in the middle of opening/closing
* a region, SSH picks it up and finishes it
* (e) If master dies in the middle, the new master recovers
* the state during initialization from hbase:meta. Region server
* can report any transition that has not been reported to
* the previous active master yet
* (2) Split/merge is initiated by region servers
* (a) To split a region, a region server sends a request
* to master to try to set a region to splitting, together with
* two daughters (to be created) to splitting new. If approved
* by the master, the splitting can then move ahead
* (b) To merge two regions, a region server sends a request to
* master to try to set the new merged region (to be created) to
* merging_new, together with two regions (to be merged) to merging.
* If it is ok with the master, the merge can then move ahead
* (c) Once the splitting/merging is done, the region server
* reports the status back to the master either success/failure.
* (d) Other scenarios should be handled similarly as for
* region open/close
*/
protected String onRegionTransition(final ServerName serverName,
final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri);
if (LOG.isDebugEnabled()) {
LOG.debug("Got transition " + code + " for "
+ (current != null ? current.toString() : hri.getShortNameToLog())
+ " from " + serverName);
}
String errorMsg = null;
switch (code) {
case OPENED:
if (current != null && current.isOpened() && current.isOnServer(serverName)) {
LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on "
+ serverName);
break;
}
case FAILED_OPEN:
if (current == null
|| !current.isPendingOpenOrOpeningOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending open on " + serverName;
} else if (code == TransitionCode.FAILED_OPEN) {
onRegionFailedOpen(hri, serverName);
} else {
long openSeqNum = HConstants.NO_SEQNUM;
if (transition.hasOpenSeqNum()) {
openSeqNum = transition.getOpenSeqNum();
}
if (openSeqNum < 0) {
errorMsg = "Newly opened region has invalid open seq num " + openSeqNum;
} else {
onRegionOpen(hri, serverName, openSeqNum);
}
}
break;
case CLOSED:
if (current == null
|| !current.isPendingCloseOrClosingOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending close on " + serverName;
} else {
onRegionClosed(hri);
}
break;
case READY_TO_SPLIT:
case SPLIT_PONR:
case SPLIT:
case SPLIT_REVERTED:
errorMsg = onRegionSplit(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
case READY_TO_MERGE:
case MERGE_PONR:
case MERGED:
case MERGE_REVERTED:
errorMsg = onRegionMerge(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
default:
errorMsg = "Unexpected transition code " + code;
}
if (errorMsg != null) {
LOG.error("Failed to transtion region from " + current + " to "
+ code + " by " + serverName + ": " + errorMsg);
}
return errorMsg;
}
/**
* @return Instance of load balancer
*/
public LoadBalancer getBalancer() {
return this.balancer;
}
}
| Jackygq1982/hbase_src | hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java | Java | apache-2.0 | 167,470 |
package io.dropwizard.jetty;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.jetty9.InstrumentedConnectionFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.dropwizard.configuration.ResourceConfigurationSourceProvider;
import io.dropwizard.configuration.YamlConfigurationFactory;
import io.dropwizard.jackson.DiscoverableSubtypeResolver;
import io.dropwizard.jackson.Jackson;
import io.dropwizard.logging.ConsoleAppenderFactory;
import io.dropwizard.logging.FileAppenderFactory;
import io.dropwizard.logging.SyslogAppenderFactory;
import io.dropwizard.util.DataSize;
import io.dropwizard.util.Duration;
import io.dropwizard.validation.BaseValidator;
import org.assertj.core.api.InstanceOfAssertFactories;
import org.eclipse.jetty.http.CookieCompliance;
import org.eclipse.jetty.http.HttpCompliance;
import org.eclipse.jetty.server.ForwardedRequestCustomizer;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.ProxyConnectionFactory;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
import org.eclipse.jetty.util.thread.ThreadPool;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.validation.Validator;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
class HttpConnectorFactoryTest {
private final ObjectMapper objectMapper = Jackson.newObjectMapper();
private final Validator validator = BaseValidator.newValidator();
@BeforeEach
void setUp() {
objectMapper.getSubtypeResolver().registerSubtypes(ConsoleAppenderFactory.class,
FileAppenderFactory.class, SyslogAppenderFactory.class, HttpConnectorFactory.class);
}
@Test
void isDiscoverable() {
assertThat(new DiscoverableSubtypeResolver().getDiscoveredSubtypes())
.contains(HttpConnectorFactory.class);
}
@Test
void testParseMinimalConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector-minimal.yml");
assertThat(http.getPort()).isEqualTo(8080);
assertThat(http.getBindHost()).isNull();
assertThat(http.isInheritChannel()).isFalse();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(512));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(30));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(64));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(1024));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(64));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getAcceptorThreads()).isEmpty();
assertThat(http.getSelectorThreads()).isEmpty();
assertThat(http.getAcceptQueueSize()).isNull();
assertThat(http.isReuseAddress()).isTrue();
assertThat(http.isUseServerHeader()).isFalse();
assertThat(http.isUseDateHeader()).isTrue();
assertThat(http.isUseForwardedHeaders()).isFalse();
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC7230);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testParseFullConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector.yml");
assertThat(http.getPort()).isEqualTo(9090);
assertThat(http.getBindHost()).isEqualTo("127.0.0.1");
assertThat(http.isInheritChannel()).isTrue();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(256));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(128));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(10));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(128));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(500));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(42));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(200));
assertThat(http.getAcceptorThreads()).contains(1);
assertThat(http.getSelectorThreads()).contains(4);
assertThat(http.getAcceptQueueSize()).isEqualTo(1024);
assertThat(http.isReuseAddress()).isFalse();
assertThat(http.isUseServerHeader()).isTrue();
assertThat(http.isUseDateHeader()).isFalse();
assertThat(http.isUseForwardedHeaders()).isTrue();
HttpConfiguration httpConfiguration = http.buildHttpConfiguration();
assertThat(httpConfiguration.getCustomizers()).hasAtLeastOneElementOfType(ForwardedRequestCustomizer.class);
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC2616);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC2965);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testBuildConnector() throws Exception {
HttpConnectorFactory http = spy(new HttpConnectorFactory());
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
http.setAcceptQueueSize(1024);
http.setMinResponseDataPerSecond(DataSize.bytes(200));
http.setMinRequestDataPerSecond(DataSize.bytes(42));
http.setRequestCookieCompliance(CookieCompliance.RFC6265);
http.setResponseCookieCompliance(CookieCompliance.RFC6265);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getPort()).isEqualTo(8080);
assertThat(connector.getHost()).isEqualTo("127.0.0.1");
assertThat(connector.getAcceptQueueSize()).isEqualTo(1024);
assertThat(connector.getReuseAddress()).isTrue();
assertThat(connector.getIdleTimeout()).isEqualTo(30000);
assertThat(connector.getName()).isEqualTo("test-http-connector");
assertThat(connector.getServer()).isSameAs(server);
assertThat(connector.getScheduler()).isInstanceOf(ScheduledExecutorScheduler.class);
assertThat(connector.getExecutor()).isSameAs(threadPool);
verify(http).buildBufferPool(64, 1024, 64 * 1024);
assertThat(connector.getAcceptors()).isEqualTo(1);
assertThat(connector.getSelectorManager().getSelectorCount()).isEqualTo(2);
InstrumentedConnectionFactory connectionFactory =
(InstrumentedConnectionFactory) connector.getConnectionFactory("http/1.1");
assertThat(connectionFactory).isInstanceOf(InstrumentedConnectionFactory.class);
assertThat(connectionFactory)
.extracting("connectionFactory")
.asInstanceOf(InstanceOfAssertFactories.type(HttpConnectionFactory.class))
.satisfies(factory -> {
assertThat(factory.getInputBufferSize()).isEqualTo(8192);
assertThat(factory.getHttpCompliance()).isEqualByComparingTo(HttpCompliance.RFC7230);
})
.extracting(HttpConnectionFactory::getHttpConfiguration)
.satisfies(config -> {
assertThat(config.getHeaderCacheSize()).isEqualTo(512);
assertThat(config.getOutputBufferSize()).isEqualTo(32768);
assertThat(config.getRequestHeaderSize()).isEqualTo(8192);
assertThat(config.getResponseHeaderSize()).isEqualTo(8192);
assertThat(config.getSendDateHeader()).isTrue();
assertThat(config.getSendServerVersion()).isFalse();
assertThat(config.getCustomizers()).noneMatch(customizer -> customizer.getClass().equals(ForwardedRequestCustomizer.class));
assertThat(config.getMinRequestDataRate()).isEqualTo(42);
assertThat(config.getMinResponseDataRate()).isEqualTo(200);
assertThat(config.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(config.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
});
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testBuildConnectorWithProxyProtocol() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setUseProxyProtocol(true);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector-with-proxy-protocol", threadPool);
assertThat(connector.getConnectionFactories().toArray()[0]).isInstanceOf(ProxyConnectionFactory.class);
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testDefaultAcceptQueueSize() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getAcceptQueueSize()).isEqualTo(NetUtil.getTcpBacklog());
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
}
| phambryan/dropwizard | dropwizard-jetty/src/test/java/io/dropwizard/jetty/HttpConnectorFactoryTest.java | Java | apache-2.0 | 12,103 |
package com.p.service;
import java.util.Collection;
import java.util.Optional;
import java.util.Random;
import java.util.UUID;
import javax.annotation.Resource;
import org.apache.log4j.Logger;
import org.hibernate.SessionFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import com.p.model.Notificacion;
import com.p.model.Role;
import com.p.model.User;
import com.p.model.modelAux.RegisterUser;
import com.p.model.repositories.UserRepository;
@Service("usersService")
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public class UsersService {
protected static Logger logger = Logger.getLogger("service");
@Resource(name = "sessionFactory")
private SessionFactory sessionFactory;
@Autowired
private UserRepository repository;
@Autowired
private NotificacionService notificacionService;
@Autowired
private EmailManager emailManager;
@Autowired
private PasswordEncoder passwordEncoder;
@Transactional
/**
* Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0)
*
* @param id
* el id del usuario existente
*/
public void delete(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
repository.delete(id);
}
/**
* Guarda o edita sengún si el ID esta o no relleno
*
* @param us
*/
@Transactional()
public User save(User us) {
gestionarAvatar(us);
gestionarAltaUsuario(us);
User usr = repository.save(us);
return usr;
}
protected void gestionarAltaUsuario(User us) {
if (us.getId() == null || us.getId().equals(0)) {
gestionarNotificacionAltaUsuario(us);
gestionarEmailAltaUsuario(us);
}
}
protected void gestionarEmailAltaUsuario(User us) {
emailManager.notify(us);
}
/**
* @param us
*/
protected void gestionarNotificacionAltaUsuario(User us) {
// Es nuevo usuario
// Le enviamos un email y una notificacion
Notificacion notificacion = notificacionService.create();
Optional<User> admin = repository.findAdministradores().stream()
.findFirst();
Assert.isTrue(admin.isPresent());
User administrador = admin.get();
notificacion.setEmisor(administrador);
notificacion.setReceptor(us);
notificacion.setTitulo("Gracias por registrarte en Pachanga!");
notificacion
.setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)");
notificacionService.save(notificacion);
}
/**
* @param us
*/
protected void gestionarAvatar(User us) {
if (us.getAvatar() == null) {
Random rd = new Random();
us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]);
}
}
@Transactional
public User getByEmail(String login) {
Assert.notNull(login);
Assert.isTrue(login.length() > 0);
return repository.findByEmail(login);
}
@Transactional
public User findOne(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > -1);
return repository.findOne(id);
}
@Transactional
public Collection<User> findAll() {
return repository.findAll();
}
@Transactional
public Collection<User> findAllDifferent(String email) {
return repository.findAllDifferent(email);
}
@Transactional(readOnly = true)
/**
*
* @author David Romero Alcaide
* @return
*/
public User getPrincipal() {
User result;
SecurityContext context;
Authentication authentication;
Object principal;
// If the asserts in this method fail, then you're
// likely to have your Tomcat's working directory
// corrupt. Please, clear your browser's cache, stop
// Tomcat, update your Maven's project configuration,
// clean your project, clean Tomcat's working directory,
// republish your project, and start it over.
context = SecurityContextHolder.getContext();
Assert.notNull(context);
authentication = context.getAuthentication();
Assert.notNull(authentication);
principal = authentication.getPrincipal();
Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User);
result = getByEmail(((org.springframework.security.core.userdetails.User) principal)
.getUsername());
Assert.notNull(result);
Assert.isTrue(result.getId() != 0);
return result;
}
public User map(RegisterUser user) {
User usr = create();
usr.setEmail(user.getEmail());
usr.setPassword(user.getPassword());
return usr;
}
public User create() {
User user = new User();
user.setFirstName(" ");
user.setLastName(" ");
user.setRole(Role.ROLE_USER);
return user;
}
@Transactional
public void regenerarPassword(User user) {
String newPass = UUID.randomUUID().toString();
newPass = passwordEncoder.encode(newPass);
user.setPassword(newPass);
save(user);
emailManager.notifyNewPassword(user,newPass);
}
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public byte[] findImage(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
return repository.findImage(id);
}
@Transactional(readOnly = true)
public Collection<? extends User> find(String texto) {
return repository.findFullText(texto);
}
}
| david-romero/Pachanga | src/main/java/com/p/service/UsersService.java | Java | apache-2.0 | 5,489 |
package org.apache.solr.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.http.params.CoreConnectionPNames;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Hash;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ShardSplitTest extends BasicDistributedZkTest {
public static final String SHARD1_0 = SHARD1 + "_0";
public static final String SHARD1_1 = SHARD1 + "_1";
@Override
@Before
public void setUp() throws Exception {
super.setUp();
System.setProperty("numShards", Integer.toString(sliceCount));
System.setProperty("solr.xml.persist", "true");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
if (VERBOSE || printLayoutOnTearDown) {
super.printLayout();
}
if (controlClient != null) {
controlClient.shutdown();
}
if (cloudClient != null) {
cloudClient.shutdown();
}
if (controlClientCloud != null) {
controlClientCloud.shutdown();
}
super.tearDown();
System.clearProperty("zkHost");
System.clearProperty("numShards");
System.clearProperty("solr.xml.persist");
// insurance
DirectUpdateHandler2.commitOnClose = true;
}
@Override
public void doTest() throws Exception {
waitForThingsToLevelOut(15);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
int numReplicas = shard1.getReplicas().size();
del("*:*");
for (int id = 0; id < 100; id++) {
indexAndUpdateCount(ranges, docCounts, id);
}
commit();
Thread indexThread = new Thread() {
@Override
public void run() {
for (int id = 101; id < atLeast(401); id++) {
try {
indexAndUpdateCount(ranges, docCounts, id);
Thread.sleep(atLeast(25));
} catch (Exception e) {
log.error("Exception while adding doc", e);
}
}
}
};
indexThread.start();
splitShard(SHARD1);
log.info("Layout after split: \n");
printLayout();
indexThread.join();
commit();
checkDocCountsAndShardStates(docCounts, numReplicas);
// todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
// and the new sub-shards don't have any.
waitForRecoveriesToFinish(true);
//waitForThingsToLevelOut(15);
}
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response = shard1_0Server.query(query);
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2 = shard1_1Server.query(query);
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
}
protected void splitShard(String shardId) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
params.set("collection", "collection1");
params.set("shard", shardId);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
baseServer.setConnectionTimeout(15000);
baseServer.setSoTimeout((int) (CollectionsHandler.DEFAULT_ZK_TIMEOUT * 5));
baseServer.request(request);
}
protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception {
indexr("id", id);
// todo - hook in custom hashing
byte[] bytes = String.valueOf(id).getBytes("UTF-8");
int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
for (int i = 0; i < ranges.size(); i++) {
DocRouter.Range range = ranges.get(i);
if (range.includes(hash))
docCounts[i]++;
}
}
protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Expected docCount for shard1_{} = {}", i, docCount);
}
log.info("Actual docCount for shard1_0 = {}", shard10Count);
log.info("Actual docCount for shard1_1 = {}", shard11Count);
Map<String, String> idVsVersion = new HashMap<String, String>();
Map<String, SolrDocument> shard10Docs = new HashMap<String, SolrDocument>();
Map<String, SolrDocument> shard11Docs = new HashMap<String, SolrDocument>();
for (int i = 0; i < response.getResults().size(); i++) {
SolrDocument document = response.getResults().get(i);
idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
for (int i = 0; i < response2.getResults().size(); i++) {
SolrDocument document = response2.getResults().get(i);
String value = document.getFieldValue("id").toString();
String version = idVsVersion.get(value);
if (version != null) {
log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
}
SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
}
@Override
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected SolrServer createNewSolrServer(int port) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected CloudSolrServer createCloudClient(String defaultCollection) throws MalformedURLException {
CloudSolrServer client = super.createCloudClient(defaultCollection);
client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
return client;
}
}
| halentest/solr | solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java | Java | apache-2.0 | 11,050 |
package org.zstack.header.identity;
import org.zstack.header.message.APICreateMessage;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.APIParam;
@NeedRoles(roles = {IdentityRoles.CREATE_POLICY_ROLE})
public class APICreatePolicyMsg extends APICreateMessage implements AccountMessage {
@APIParam
private String name;
private String description;
@APIParam
private String policyData;
@Override
public String getAccountUuid() {
return this.getSession().getAccountUuid();
}
public String getPolicyData() {
return policyData;
}
public void setPolicyData(String policyData) {
this.policyData = policyData;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
| SoftwareKing/zstack | header/src/main/java/org/zstack/header/identity/APICreatePolicyMsg.java | Java | apache-2.0 | 1,059 |
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler.memory;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.packages.AspectClass;
import com.google.devtools.build.lib.packages.RuleClass;
import com.google.devtools.build.lib.packages.RuleFunction;
import com.google.devtools.build.lib.syntax.Debug;
import com.google.devtools.build.lib.syntax.Location;
import com.google.devtools.build.lib.syntax.StarlarkCallable;
import com.google.devtools.build.lib.syntax.StarlarkThread;
import com.google.monitoring.runtime.instrumentation.Sampler;
import com.google.perftools.profiles.ProfileProto.Function;
import com.google.perftools.profiles.ProfileProto.Line;
import com.google.perftools.profiles.ProfileProto.Profile;
import com.google.perftools.profiles.ProfileProto.Sample;
import com.google.perftools.profiles.ProfileProto.ValueType;
import java.io.FileOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.zip.GZIPOutputStream;
import javax.annotation.Nullable;
/** Tracks allocations for memory reporting. */
@ConditionallyThreadCompatible
@SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global
public final class AllocationTracker implements Sampler, Debug.ThreadHook {
// A mapping from Java thread to StarlarkThread.
// Used to effect a hidden StarlarkThread parameter to sampleAllocation.
// TODO(adonovan): opt: merge the three different ThreadLocals in use here.
private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>();
@Override
public void onPushFirst(StarlarkThread thread) {
starlarkThread.set(thread);
}
@Override
public void onPopLast(StarlarkThread thread) {
starlarkThread.remove();
}
private static class AllocationSample {
@Nullable final RuleClass ruleClass; // Current rule being analysed, if any
@Nullable final AspectClass aspectClass; // Current aspect being analysed, if any
final ImmutableList<Frame> callstack; // Starlark callstack, if any
final long bytes;
AllocationSample(
@Nullable RuleClass ruleClass,
@Nullable AspectClass aspectClass,
ImmutableList<Frame> callstack,
long bytes) {
this.ruleClass = ruleClass;
this.aspectClass = aspectClass;
this.callstack = callstack;
this.bytes = bytes;
}
}
private static class Frame {
final String name;
final Location loc;
@Nullable final RuleFunction ruleFunction;
Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) {
this.name = name;
this.loc = loc;
this.ruleFunction = ruleFunction;
}
}
private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap();
private final int samplePeriod;
private final int sampleVariance;
private boolean enabled = true;
/**
* Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation.
*/
private static final class LongValue {
long value;
}
private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new);
private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample);
private final Random random = new Random();
AllocationTracker(int samplePeriod, int variance) {
this.samplePeriod = samplePeriod;
this.sampleVariance = variance;
}
// Called by instrumentation.recordAllocation, which is in turn called
// by an instrumented version of the application assembled on the fly
// by instrumentation.AllocationInstrumenter.
// The instrumenter inserts a call to recordAllocation after every
// memory allocation instruction in the original class.
//
// This function runs within 'new', so is not supposed to allocate memory;
// see Sampler interface. In fact it allocates in nearly a dozen places.
// TODO(adonovan): suppress reentrant calls by setting a thread-local flag.
@Override
@ThreadSafe
public void sampleAllocation(int count, String desc, Object newObj, long size) {
if (!enabled) {
return;
}
@Nullable StarlarkThread thread = starlarkThread.get();
// Calling Debug.getCallStack is a dubious operation here.
// First it allocates memory, which breaks the Sampler contract.
// Second, the allocation could in principle occur while the thread's
// representation invariants are temporarily broken (that is, during
// the call to ArrayList.add when pushing a new stack frame).
// For now at least, the allocation done by ArrayList.add occurs before
// the representation of the ArrayList is changed, so it is safe,
// but this is a fragile assumption.
ImmutableList<Debug.Frame> callstack =
thread != null ? Debug.getCallStack(thread) : ImmutableList.of();
RuleClass ruleClass = CurrentRuleTracker.getRule();
AspectClass aspectClass = CurrentRuleTracker.getAspect();
// Should we bother sampling?
if (callstack.isEmpty() && ruleClass == null && aspectClass == null) {
return;
}
// Convert the thread's stack right away to our internal form.
// It is not safe to inspect Debug.Frame references once the thread resumes,
// and keeping StarlarkCallable values live defeats garbage collection.
ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size());
for (Debug.Frame fr : callstack) {
// The frame's PC location is currently not updated at every step,
// only at function calls, so the leaf frame's line number may be
// slightly off; see the tests.
// TODO(b/149023294): remove comment when we move to a compiled representation.
StarlarkCallable fn = fr.getFunction();
frames.add(
new Frame(
fn.getName(),
fr.getLocation(),
fn instanceof RuleFunction ? (RuleFunction) fn : null));
}
// If we start getting stack overflows here, it's because the memory sampling
// implementation has changed to call back into the sampling method immediately on
// every allocation. Since thread locals can allocate, this can in this case lead
// to infinite recursion. This method will then need to be rewritten to not
// allocate, or at least not allocate to obtain its sample counters.
LongValue bytesValue = currentSampleBytes.get();
long bytes = bytesValue.value + size;
if (bytes < nextSampleBytes.get()) {
bytesValue.value = bytes;
return;
}
bytesValue.value = 0;
nextSampleBytes.set(getNextSample());
allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes));
}
private long getNextSample() {
return (long) samplePeriod
+ (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0);
}
/** A pair of rule/aspect name and the bytes it consumes. */
public static final class RuleBytes {
private final String name;
private long bytes;
public RuleBytes(String name) {
this.name = name;
}
/** The number of bytes total occupied by this rule or aspect class. */
public long getBytes() {
return bytes;
}
public RuleBytes addBytes(long bytes) {
this.bytes += bytes;
return this;
}
@Override
public String toString() {
return String.format("RuleBytes(%s, %d)", name, bytes);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RuleBytes ruleBytes = (RuleBytes) o;
return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name);
}
@Override
public int hashCode() {
return Objects.hashCode(name, bytes);
}
}
// If the topmost stack entry is a call to a rule function, returns it.
@Nullable
private static RuleFunction getRule(AllocationSample sample) {
Frame top = Iterables.getLast(sample.callstack, null);
return top != null ? top.ruleFunction : null;
}
/**
* Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey}
* or {@link AspectClass#getKey}.
*/
public void getRuleMemoryConsumption(
Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
// Get loading phase memory for rules.
for (AllocationSample sample : allocations.values()) {
RuleFunction rule = getRule(sample);
if (rule != null) {
RuleClass ruleClass = rule.getRuleClass();
String key = ruleClass.getKey();
RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
}
// Get analysis phase memory for rules and aspects
for (AllocationSample sample : allocations.values()) {
if (sample.ruleClass != null) {
String key = sample.ruleClass.getKey();
RuleBytes ruleBytes =
rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
if (sample.aspectClass != null) {
String key = sample.aspectClass.getKey();
RuleBytes ruleBytes =
aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName()));
aspects.put(key, ruleBytes.addBytes(sample.bytes));
}
}
enabled = true;
}
/** Dumps all Starlark analysis time allocations to a pprof-compatible file. */
public void dumpSkylarkAllocations(String path) throws IOException {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
Profile profile = buildMemoryProfile();
try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) {
profile.writeTo(outputStream);
outputStream.finish();
}
enabled = true;
}
Profile buildMemoryProfile() {
Profile.Builder profile = Profile.newBuilder();
StringTable stringTable = new StringTable(profile);
FunctionTable functionTable = new FunctionTable(profile, stringTable);
LocationTable locationTable = new LocationTable(profile, functionTable);
profile.addSampleType(
ValueType.newBuilder()
.setType(stringTable.get("memory"))
.setUnit(stringTable.get("bytes"))
.build());
for (AllocationSample sample : allocations.values()) {
// Skip empty callstacks
if (sample.callstack.isEmpty()) {
continue;
}
Sample.Builder b = Sample.newBuilder().addValue(sample.bytes);
for (Frame fr : sample.callstack.reverse()) {
b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line()));
}
profile.addSample(b.build());
}
profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000);
return profile.build();
}
private static class StringTable {
final Profile.Builder profile;
final Map<String, Long> table = new HashMap<>();
long index = 0;
StringTable(Profile.Builder profile) {
this.profile = profile;
get(""); // 0 is reserved for the empty string
}
long get(String str) {
return table.computeIfAbsent(
str,
key -> {
profile.addStringTable(key);
return index++;
});
}
}
private static class FunctionTable {
final Profile.Builder profile;
final StringTable stringTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
FunctionTable(Profile.Builder profile, StringTable stringTable) {
this.profile = profile;
this.stringTable = stringTable;
}
long get(String file, String function) {
return table.computeIfAbsent(
file + "#" + function,
key -> {
Function fn =
Function.newBuilder()
.setId(index)
.setFilename(stringTable.get(file))
.setName(stringTable.get(function))
.build();
profile.addFunction(fn);
return index++;
});
}
}
private static class LocationTable {
final Profile.Builder profile;
final FunctionTable functionTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
LocationTable(Profile.Builder profile, FunctionTable functionTable) {
this.profile = profile;
this.functionTable = functionTable;
}
long get(String file, String function, long line) {
return table.computeIfAbsent(
file + "#" + function + "#" + line,
key -> {
com.google.perftools.profiles.ProfileProto.Location location =
com.google.perftools.profiles.ProfileProto.Location.newBuilder()
.setId(index)
.addLine(
Line.newBuilder()
.setFunctionId(functionTable.get(file, function))
.setLine(line)
.build())
.build();
profile.addLocation(location);
return index++;
});
}
}
}
| akira-baruah/bazel | src/main/java/com/google/devtools/build/lib/profiler/memory/AllocationTracker.java | Java | apache-2.0 | 14,364 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.service.base.client;
import net.sf.mmm.service.api.RemoteInvocationCall;
import net.sf.mmm.util.lang.api.function.Consumer;
/**
* This is a simple container for the data corresponding to a {@link RemoteInvocationCall}.
*
* @param <RESULT> is the generic type of the method return-type.
* @param <CALL> is the generic type of the {@link #getCall() call} data.
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0
*/
public class RemoteInvocationCallData<RESULT, CALL extends RemoteInvocationCall> {
/** The callback to receive the service result on success. */
private final Consumer<? extends RESULT> successCallback;
/** The callback to receive a potential service failure. */
private final Consumer<Throwable> failureCallback;
/** @see #getCall() */
private CALL call;
/**
* The constructor.
*
* @param successCallback is the callback that {@link Consumer#accept(Object) receives} the result on
* success.
* @param failureCallback is the callback that {@link Consumer#accept(Object) receives} the failure on
* error.
*/
public RemoteInvocationCallData(Consumer<? extends RESULT> successCallback, Consumer<Throwable> failureCallback) {
super();
this.successCallback = successCallback;
this.failureCallback = failureCallback;
}
/**
* @return the successCallback.
*/
public Consumer<? extends RESULT> getSuccessCallback() {
return this.successCallback;
}
/**
* @return the failureCallback.
*/
public Consumer<Throwable> getFailureCallback() {
return this.failureCallback;
}
/**
* @return the actual call data (either {@link net.sf.mmm.service.api.command.RemoteInvocationCommand}
* itself or {@link net.sf.mmm.service.base.rpc.GenericRemoteInvocationRpcCall}).
*/
public CALL getCall() {
return this.call;
}
/**
* @param call is the new value of {@link #getCall()}.
*/
public void setCall(CALL call) {
assert (this.call == null);
assert (call != null);
this.call = call;
}
}
| m-m-m/service | base/src/main/java/net/sf/mmm/service/base/client/RemoteInvocationCallData.java | Java | apache-2.0 | 2,216 |
/**
*
*/
package jframe.core.plugin;
import java.util.EventListener;
/**
* @author dzh
* @date Sep 12, 2013 9:42:33 PM
* @since 1.0
*/
public interface PluginListener extends EventListener {
void pluginChanged(PluginEvent event);
}
| dzh/jframe | jframe/jframe-core/src/main/java/jframe/core/plugin/PluginListener.java | Java | apache-2.0 | 244 |
/*
* Created on May 17, 2004
*
* Paros and its related class files.
*
* Paros is an HTTP/HTTPS proxy for assessing web application security.
* Copyright (C) 2003-2004 Chinotec Technologies Company
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Clarified Artistic License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Clarified Artistic License for more details.
*
* You should have received a copy of the Clarified Artistic License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// ZAP: 2013/01/16 Minor fix to prevent NPE
// ZAP: 2014/10/17 Issue 1308: Updated for latest icons
// ZAP: 2015/02/10 Issue 1528: Support user defined font size
// ZAP: 2015/09/07 Move icon loading to a utility class
package org.parosproxy.paros.view;
import java.awt.Dimension;
import java.awt.Frame;
import java.awt.Image;
import java.awt.Point;
import java.awt.Toolkit;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.event.WindowEvent;
import java.awt.event.WindowStateListener;
import java.util.ArrayList;
import java.util.List;
import java.util.prefs.BackingStoreException;
import java.util.prefs.Preferences;
import javax.swing.JFrame;
import org.apache.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.zaproxy.zap.utils.DisplayUtils;
/**
* Generic Frame, which handles some basic properties.
* <ul>
* <li>Sets the icon(s) for the frame, which are the ZAP icons</li>
* <li>Centers the frame on screen</li>
* <li>Sets the frame to _not_ visible</li>
* <li>Sets a common font for the frame</li>
* <li>Sets a default title (ZAP application name)</li>
* <li>Preserves window state, location and size correctly (will survive multiple session)</li>
* </ul>
* Hint for implementers: If you use this class,
* don't use {@link #setSize(Dimension)}, but {@link #setPreferredSize(Dimension)}
* instead. Also, don't use {@link #setLocation(Point)}. This abstract class
* will automatically take care of size and position.
*/
public abstract class AbstractFrame extends JFrame {
private static final long serialVersionUID = 6751593232255236597L;
private static final String PREF_WINDOW_STATE = "window.state";
private static final String PREF_WINDOW_SIZE = "window.size";
private static final String PREF_WINDOW_POSITION = "window.position";
private static final int WINDOW_DEFAULT_WIDTH = 800;
private static final int WINDOW_DEFAULT_HEIGHT = 600;
/**
* Hint: Preferences are only saved by package.
* We have to use a prefix for separation.
*/
private final Preferences preferences;
private final String prefnzPrefix = this.getClass().getSimpleName()+".";
private final Logger logger = Logger.getLogger(AbstractFrame.class);
/**
* This is the default constructor
*/
public AbstractFrame() {
super();
this.preferences = Preferences.userNodeForPackage(getClass());
initialize();
}
/**
* This method initializes this
*/
private void initialize() {
// ZAP: Rebrand
this.setIconImages(DisplayUtils.getZapIconImages());
this.setVisible(false);
this.setTitle(Constant.PROGRAM_NAME);
final Dimension dim = restoreWindowSize();
if (dim == null) {
this.setSize(WINDOW_DEFAULT_WIDTH, WINDOW_DEFAULT_HEIGHT);
}
final Point point = restoreWindowLocation();
if (point == null) {
centerFrame();
}
restoreWindowState();
this.addWindowStateListener(new FrameWindowStateListener());
this.addComponentListener(new FrameResizedListener());
}
/**
* Centre this frame.
*
*/
public void centerFrame() {
final Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
final Dimension frameSize = this.getSize();
if (frameSize.height > screenSize.height) {
frameSize.height = screenSize.height;
}
if (frameSize.width > screenSize.width) {
frameSize.width = screenSize.width;
}
this.setLocation((screenSize.width - frameSize.width) / 2, (screenSize.height - frameSize.height) / 2);
}
/**
* @param windowstate integer value, see {@link JFrame#getExtendedState()}
*/
private void saveWindowState(int windowstate) {
if ((windowstate & Frame.ICONIFIED) == Frame.ICONIFIED) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.ICONFIED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.ICONFIED);
}
if ((windowstate & Frame.MAXIMIZED_BOTH) == Frame.MAXIMIZED_BOTH) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.MAXIMIZED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.MAXIMIZED);
}
if (windowstate == Frame.NORMAL) { // hint: Frame.NORMAL = 0, thats why no masking
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.NORMAL.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.NORMAL);
}
}
/**
* Loads and sets the last window state of the frame.
* Additionally, the last state will be returned.
*
* @return last window state OR null
*/
private SimpleWindowState restoreWindowState() {
SimpleWindowState laststate = null;
final String statestr = preferences.get(prefnzPrefix+PREF_WINDOW_STATE, null);
if (logger.isDebugEnabled()) logger.debug("Restoring preference "+PREF_WINDOW_STATE+"=" + statestr);
if (statestr != null) {
SimpleWindowState state = null;
try {
state = SimpleWindowState.valueOf(statestr);
} catch (final IllegalArgumentException e) { state = null; }
if (state != null) {
switch (state) {
case ICONFIED: this.setExtendedState(Frame.ICONIFIED); break;
case NORMAL: this.setExtendedState(Frame.NORMAL); break;
case MAXIMIZED: this.setExtendedState(Frame.MAXIMIZED_BOTH); break;
default:
logger.error("Invalid window state (nothing will changed): " + statestr);
}
}
laststate = state;
}
return laststate;
}
/**
* Saves the size of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the size is not saved!
*
* @param size
*/
private void saveWindowSize(Dimension size) {
if (size != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_SIZE + "=" + size.width + "," + size.height);
this.preferences.put(prefnzPrefix+PREF_WINDOW_SIZE, size.width + "," + size.height);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_SIZE + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved size preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Dimension restoreWindowSize() {
Dimension result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_SIZE, null);
if (sizestr != null) {
int width = 0;
int height = 0;
final String[] sizes = sizestr.split("[,]");
try {
width = Integer.parseInt(sizes[0].trim());
height = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (width > 0 && height > 0) {
result = new Dimension(width, height);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_SIZE + "=" + result.width + "," + result.height);
this.setSize(result);
}
}
return result;
}
/**
* Saves the location of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the location is not saved!
*
* @param point
*/
private void saveWindowLocation(Point point) {
if (point != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_POSITION + "=" + point.x + "," + point.y);
this.preferences.put(prefnzPrefix+PREF_WINDOW_POSITION, point.x + "," + point.y);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_POSITION + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved position preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Point restoreWindowLocation() {
Point result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_POSITION, null);
if (sizestr != null) {
int x = 0;
int y = 0;
final String[] sizes = sizestr.split("[,]");
try {
x = Integer.parseInt(sizes[0].trim());
y = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (x > 0 && y > 0) {
result = new Point(x, y);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_POSITION + "=" + result.x + "," + result.y);
this.setLocation(result);
}
}
return result;
}
/**
* @deprecated (2.4.2) Use {@link DisplayUtils#getZapIconImages()} instead. It will be removed in a future release.
*/
@Deprecated
@SuppressWarnings("javadoc")
protected List<Image> loadIconImages() {
return new ArrayList<>(DisplayUtils.getZapIconImages());
}
@Override
public void dispose() {
super.dispose();
try {
this.preferences.flush();
} catch (final BackingStoreException e) {
logger.error("Error while saving the preferences", e);
}
}
/*
* ========================================================================
*/
private final class FrameWindowStateListener implements WindowStateListener {
@Override
public void windowStateChanged(WindowEvent e) {
saveWindowState(e.getNewState());
}
}
private final class FrameResizedListener extends ComponentAdapter {
@Override
public void componentResized(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowSize(e.getComponent().getSize());
}
}
@Override
public void componentMoved(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowLocation(e.getComponent().getLocation());
}
}
}
/**
* Simplified version for easier handling of the states ...
*/
private enum SimpleWindowState {
ICONFIED,
NORMAL,
MAXIMIZED;
}
} // @jve:visual-info decl-index=0 visual-constraint="31,17"
| GillesMoris/OSS | src/org/parosproxy/paros/view/AbstractFrame.java | Java | apache-2.0 | 10,722 |
/**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.accountstats;
import com.codahale.metrics.MetricRegistry;
import com.github.ambry.config.AccountStatsMySqlConfig;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.VerifiableProperties;
import com.github.ambry.server.HostAccountStorageStatsWrapper;
import com.github.ambry.server.HostPartitionClassStorageStatsWrapper;
import com.github.ambry.server.StatsHeader;
import com.github.ambry.server.StatsReportType;
import com.github.ambry.server.StatsSnapshot;
import com.github.ambry.server.StatsWrapper;
import com.github.ambry.server.StorageStatsUtil;
import com.github.ambry.server.StorageStatsUtilTest;
import com.github.ambry.server.storagestats.AggregatedAccountStorageStats;
import com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats;
import com.github.ambry.server.storagestats.ContainerStorageStats;
import com.github.ambry.server.storagestats.HostAccountStorageStats;
import com.github.ambry.server.storagestats.HostPartitionClassStorageStats;
import com.github.ambry.utils.Pair;
import com.github.ambry.utils.TestUtils;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
/**
* Integration tests for {@link AccountStatsMySqlStore}.
*/
@RunWith(Parameterized.class)
public class AccountStatsMySqlStoreIntegrationTest {
private static final String clusterName1 = "Ambry-test";
private static final String clusterName2 = "Ambry-random";
// hostname1 and hostname2 are the same, but with different port numbers
private static final String hostname1 = "ambry1.test.github.com";
private static final String hostname2 = "ambry1.test.github.com";
private static final String hostname3 = "ambry3.test.github.com";
private static final int port1 = 12345;
private static final int port2 = 12346;
private static final int port3 = 12347;
private final int batchSize;
private final AccountStatsMySqlStore mySqlStore;
@Parameterized.Parameters
public static List<Object[]> data() {
return Arrays.asList(new Object[][]{{0}, {17}});
}
public AccountStatsMySqlStoreIntegrationTest(int batchSize) throws Exception {
this.batchSize = batchSize;
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
}
@Before
public void before() throws Exception {
mySqlStore.cleanupTables();
}
@After
public void after() {
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 =
mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 =
mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 =
mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted =
StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromMysqlDb() throws Exception {
//write a new stats into database.
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(1, 1, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
//initialized the mySqlStore and write a new stats with the same partition.
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
assertTrue(
mySqlStore.getPreviousHostAccountStorageStatsWrapper().getStats().getStorageStats().containsKey((long) 0));
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(0, 0, 0, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats2.getStats().getStorageStats());
newStorageStats.put((long) 0,
new HashMap<>()); // Remove partition 0's storage stats data, this would remove entire partition from database
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
// empty stats should remove all the data in the database
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromLocalBackUpFile() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 still empty
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats2.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 not empty
HostAccountStorageStatsWrapper stats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, stats.getStats().getStorageStats().get((long) 1));
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats3.getStats().getStorageStats().containsKey((long) 10));
// Write an empty HostAccountStorageStats
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
// Empty storage stats should remove all the data in the database
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats4.getStats().getStorageStats().isEmpty());
// Write an empty HostAccountStorageStats again
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
HostAccountStorageStatsWrapper obtainedStats5 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats5.getStats().getStorageStats().isEmpty());
HostAccountStorageStatsWrapper stats6 =
generateHostAccountStorageStatsWrapper(20, 20, 20, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats6);
HostAccountStorageStatsWrapper obtainedStats6 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats6.getStats().getStorageStats(), stats6.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test to delete partition, account and container data from database
* @throws Exception
*/
@Test
public void testStatsDeletePartitionAccountContainer() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
// Now remove one partition from stats
HostAccountStorageStats storageStatsCopy = new HostAccountStorageStats(stats.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.remove((long) 1);
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats2.getStats().getStorageStats(), stats2.getStats().getStorageStats());
// Now remove one account from stats
storageStatsCopy = new HostAccountStorageStats(stats2.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.get((long) 3).remove((short) 1);
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats2.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats3);
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats3.getStats().getStorageStats(), stats3.getStats().getStorageStats());
// Now remove some containers
storageStatsCopy = new HostAccountStorageStats(stats3.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
for (short containerId : new short[]{0, 1, 2}) {
newStorageStatsMap.get((long) 3).get((short) 3).remove(containerId);
}
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats3.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats4);
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats4.getStats().getStorageStats(), stats4.getStats().getStorageStats());
// Now write the stats back
stats = generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for one hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testStoreMultilpleWrites() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats1);
HostAccountStorageStats hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
ContainerStorageStats origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats2.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).physicalStorageUsage(origin.getPhysicalStorageUsage() + 1)
.build());
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats3);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats3.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0, new ContainerStorageStats.Builder(origin).numberOfBlobs(origin.getNumberOfBlobs() + 1).build());
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats4);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats4.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStats() throws Exception {
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
mySqlStore.storeAggregatedAccountStats(snapshot);
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(snapshot, obtainedSnapshot);
// Fetching aggregated account stats for clustername2 should result in empty stats
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(snapshot);
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 1))
.getSubMap()
.get(Utils.statsContainerKey((short) 1))
.setValue(1);
newSnapshot.updateValue();
containerStorageUsages.get("1").put("1", 1L);
mySqlStore.storeAggregatedAccountStats(newSnapshot);
obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
// Delete account and container
newSnapshot = new StatsSnapshot(newSnapshot);
newSnapshot.getSubMap().remove(Utils.statsAccountKey((short) 1));
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 2))
.getSubMap()
.remove(Utils.statsContainerKey((short) 1));
newSnapshot.updateValue();
// Now remove all containers for account 1 and container 1 of account 2
for (String containerId : containerStorageUsages.get(String.valueOf(1)).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, Short.valueOf(containerId));
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(newSnapshot, obtainedSnapshot);
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account storage stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStorageStats() throws Exception {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
// Compare container usage map
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(aggregatedAccountStorageStats, false),
obtainedContainerStorageUsages);
// Compare StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(
StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(aggregatedAccountStorageStats, false),
obtainedSnapshot);
// Compare AggregatedAccountStorageStats
AggregatedAccountStorageStats obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
// Fetching aggregated account stats for clustername2 should result in a null;
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
assertEquals(mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName2).getStorageStats().size(), 0);
// Change one value and store it to mysql database again
Map<Short, Map<Short, ContainerStorageStats>> newStorageStatsMap =
new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get((short) 1).get((short) 1);
newStorageStatsMap.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
aggregatedAccountStorageStats = new AggregatedAccountStorageStats(newStorageStatsMap);
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
// Delete account and container
newStorageStatsMap = new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
newStorageStatsMap.remove((short) 1);
newStorageStatsMap.get((short) 2).remove((short) 1);
// Now remove all containers for account 1 and container 1 of account 2
for (short containerId : aggregatedAccountStorageStats.getStorageStats().get((short) 1).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, containerId);
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
mySqlStore.shutdown();
}
/**
* Test methods to store, delete and fetch monthly aggregated stats
* @throws Exception
*/
@Test
public void testMonthlyAggregatedStats() throws Exception {
String monthValue = "2020-01";
AggregatedAccountStorageStats currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
if (currentAggregatedStats.getStorageStats().size() == 0) {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
}
// fetch the month and it should return emtpy string
Assert.assertEquals("", mySqlStore.queryRecordedMonth());
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
Map<String, Map<String, Long>> monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
String obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Change the value and store it back to mysql database
monthValue = "2020-02";
currentAggregatedStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(currentAggregatedStats);
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Delete the snapshots
mySqlStore.deleteSnapshotOfAggregatedAccountStats();
assertTrue(mySqlStore.queryMonthlyAggregatedAccountStats(false).isEmpty());
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 =
convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 =
convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 =
convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(
ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]")))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid)))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 =
mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(
StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false),
obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store, delete and fetch aggregated partition class stats.
* @throws Exception
*/
@Test
public void testAggregatedPartitionClassStats() throws Exception {
testHostPartitionClassStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
StatsSnapshot aggregated =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
StatsSnapshot aggregated3 =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore3.storeAggregatedPartitionClassStats(aggregated3);
StatsSnapshot obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
assertEquals(mySqlStore.queryAggregatedPartitionClassStatsByClusterName("random-cluster").getSubMap().size(), 0);
StatsSnapshot obtained3 = mySqlStore3.queryAggregatedPartitionClassStats();
assertEquals(aggregated3, obtained3);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(aggregated);
newSnapshot.getSubMap()
.get("default")
.getSubMap()
.get(Utils.partitionClassStatsAccountContainerKey((short) 1, (short) 1))
.setValue(1);
newSnapshot.updateValue();
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
// Delete some account and container
newSnapshot = new StatsSnapshot(newSnapshot);
short accountId = (short) 1;
short containerId = (short) 1;
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(accountId, containerId);
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newSnapshot.getSubMap().get(partitionClassName).getSubMap().remove(accountContainerKey);
}
newSnapshot.updateValue();
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(newSnapshot, obtained);
mySqlStore3.shutdown();
}
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(
mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(),
0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false),
obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default")
.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
private AccountStatsMySqlStore createAccountStatsMySqlStore(String clusterName, String hostname, int port)
throws Exception {
Path localBackupFilePath = createTemporaryFile();
Properties configProps = Utils.loadPropsFromResource("accountstats_mysql.properties");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_CLUSTER_NAME, clusterName);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_HOST_NAME, hostname);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_DATACENTER_NAME, "dc1");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_PORT, String.valueOf(port));
configProps.setProperty(AccountStatsMySqlConfig.DOMAIN_NAMES_TO_REMOVE, ".github.com");
configProps.setProperty(AccountStatsMySqlConfig.UPDATE_BATCH_SIZE, String.valueOf(batchSize));
configProps.setProperty(AccountStatsMySqlConfig.POOL_SIZE, String.valueOf(5));
configProps.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
VerifiableProperties verifiableProperties = new VerifiableProperties(configProps);
return (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(verifiableProperties,
new ClusterMapConfig(verifiableProperties), new MetricRegistry()).getAccountStatsStore();
}
private static Path createTemporaryFile() throws IOException {
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
return tempDir.resolve("localbackup");
}
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers,
StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
private static HostAccountStorageStatsWrapper generateHostAccountStorageStatsWrapper(int numPartitions,
int numAccounts, int numContainersPerAccount, StatsReportType reportType) {
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(
StorageStatsUtilTest.generateRandomHostAccountStorageStats(numPartitions, numAccounts, numContainersPerAccount,
100000L, 2, 10));
StatsHeader statsHeader =
new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 1000, numPartitions, numPartitions,
Collections.emptyList());
return new HostAccountStorageStatsWrapper(statsHeader, hostAccountStorageStats);
}
private void assertTableSize(AccountStatsMySqlStore mySqlStore, int expectedNumRows) throws SQLException {
int numRows = 0;
try (Connection connection = mySqlStore.getDataSource().getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM " + AccountReportsDao.ACCOUNT_REPORTS_TABLE)) {
while (resultSet.next()) {
numRows++;
}
}
}
}
assertEquals(expectedNumRows, numRows);
}
private void assertTwoStatsSnapshots(StatsSnapshot snapshot1, StatsSnapshot snapshot2) {
assertEquals("Snapshot values are not equal", snapshot1.getValue(), snapshot2.getValue());
if (snapshot1.getSubMap() == null) {
assertNull(snapshot2.getSubMap());
} else {
assertEquals("Snapshot submap size mismatch", snapshot1.getSubMap().size(), snapshot2.getSubMap().size());
for (String key : snapshot1.getSubMap().keySet()) {
assertTrue(snapshot2.getSubMap().containsKey(key));
assertTwoStatsSnapshots(snapshot1.getSubMap().get(key), snapshot2.getSubMap().get(key));
}
}
}
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats,
Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats =
partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap()
.get(accountKey)
.getSubMap()
.entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey =
Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey),
Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()),
new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(
HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats =
accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId)
.values()
.forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName,
partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()),
hostPartitionClassStorageStats);
}
}
| cgtz/ambry | ambry-mysql/src/integration-test/java/com/github/ambry/accountstats/AccountStatsMySqlStoreIntegrationTest.java | Java | apache-2.0 | 46,523 |
package info.novatec.testit.webtester.support.assertj;
import static info.novatec.testit.webtester.support.assertj.WebTesterAssertions.assertThat;
import static org.mockito.Mockito.doReturn;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import info.novatec.testit.webtester.pageobjects.RadioButton;
@RunWith(MockitoJUnitRunner.class)
public class RadioButtonAssertTest {
@Mock
RadioButton selectedRadioButton;
@Mock
RadioButton radioButton;
@Before
public void setUp() {
doReturn(true).when(selectedRadioButton).isSelected();
}
/* selected */
@Test
public void selectedTrueTest() {
assertThat(selectedRadioButton).isSelected(true);
}
@Test(expected = AssertionError.class)
public void selectedFalseTest() {
assertThat(radioButton).isSelected(true);
}
@Test
public void notSelectedTrueTest() {
assertThat(radioButton).isNotSelected(true);
}
@Test(expected = AssertionError.class)
public void notSelectedFalseTest() {
assertThat(selectedRadioButton).isNotSelected(true);
}
}
| dbe-it/webtester-core | webtester-support-assertj/src/test/java/info/novatec/testit/webtester/support/assertj/RadioButtonAssertTest.java | Java | apache-2.0 | 1,213 |
package com.jt.test.sort;
import java.util.Arrays;
import java.util.Random;
/**
* since 2016/10/19.
*/
public class Select {
public static void sort(Comparable[] data) {
for (int i = 0; i < data.length; i++) {
int min = i;
for (int j = i+1; j < data.length; j++) {
if (less(data, min, j)) {
min = j;
}
}
exch(data, i, min);
}
}
private static boolean less(Comparable[] data, int min, int j) {
return data[min].compareTo(data[j]) > 0;
}
private static void exch(Comparable[] data, int i, int min) {
Comparable tmp = data[i];
data[i] = data[min];
data[min] = tmp;
}
public static boolean isSort(Comparable[] data) {
for (int i = 0; i < data.length-1; i++) {
if (less(data, i, i + 1)) {
return false;
}
}
return true;
}
public static void main(String[] args) throws Exception {
Random random = new Random();
Integer[] datas = new Integer[10];
for (int i = 0; i < 10; i++) {
datas[i] = random.nextInt(100);
}
sort(datas);
if (!isSort(datas)) {
System.err.println("not sort");
}
System.out.println(Arrays.toString(datas));
}
}
| jt120/algorithm | new-man/src/test/java/com/jt/test/sort/Select.java | Java | apache-2.0 | 1,378 |
/*
* Copyright 2000-2008 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.actions.updown;
import com.intellij.ide.DataManager;
import com.intellij.openapi.application.Result;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.actionSystem.EditorActionHandler;
import com.intellij.openapi.editor.actionSystem.EditorActionManager;
import com.intellij.openapi.editor.ex.DocumentEx;
import com.intellij.testFramework.fixtures.LightCodeInsightFixtureTestCase;
import org.jetbrains.plugins.groovy.GroovyFileType;
import org.jetbrains.plugins.groovy.util.TestUtils;
import org.jetbrains.plugins.groovy.lang.editor.actions.GroovyEditorActionsManager;
import java.util.List;
/**
* @author ilyas
*/
public class GroovyMoveStatementTest extends LightCodeInsightFixtureTestCase {
@Override
protected String getBasePath() {
return TestUtils.getTestDataPath() + "groovy/actions/moveStatement/";
}
public void testClazz1() throws Throwable { downTest(); }
public void testClazz2() throws Throwable { upTest(); }
public void testClos2() throws Throwable { upTest(); }
public void testMeth1() throws Throwable { downTest(); }
public void testMeth2() throws Throwable { downTest(); }
public void testMeth3() throws Throwable { upTest(); }
public void testMeth4() throws Throwable { upTest(); }
public void testIfst() throws Throwable { downTest(); }
public void testIfst2() throws Throwable { upTest(); }
public void testSimple1() throws Throwable { downTest(); }
public void testSimple2() throws Throwable { upTest(); }
public void testTryst1() throws Throwable { downTest(); }
public void testTryst2() throws Throwable { downTest(); }
public void testStatementOutsideClosure() throws Throwable { downTest(); }
public void testVariableOutsideClosure() throws Throwable { upTest(); }
public void testVariableOutsideClosureDown() throws Throwable { downTest(); }
public void testStatementInsideClosure() throws Throwable { upTest(); }
public void testMoveGroovydocWithMethod() throws Throwable { downTest(); }
public void testMoveMethodWithGroovydoc() throws Throwable { downTest(); }
public void testMoveSecondFieldUp() throws Throwable { upTest(); }
public void testMoveFirstFieldDown() throws Throwable { downTest(); }
public void testVariableOverMethodInScript() throws Throwable { downTest(); }
public void testVariableOverClassInScript() throws Throwable { downTest(); }
public void testUpFromLastOffset() throws Throwable { upTest(); }
public void testClosureWithPrequel() throws Throwable { upTest(); }
public void testMultiLineVariable() throws Throwable { downTest(); }
public void testClosureVariableByRBrace() throws Throwable { upTest(); }
private void downTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_DOWN_ACTION);
}
private void upTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_UP_ACTION);
}
public void doTest(final String actionId) throws Exception {
final List<String> data = TestUtils.readInput(getTestDataPath() + getTestName(true) + ".test");
myFixture.configureByText(GroovyFileType.GROOVY_FILE_TYPE, data.get(0));
final EditorActionHandler handler = EditorActionManager.getInstance().getActionHandler(actionId);
new WriteCommandAction(getProject()) {
protected void run(Result result) throws Throwable {
final Editor editor = myFixture.getEditor();
handler.execute(editor, DataManager.getInstance().getDataContext(editor.getContentComponent()));
((DocumentEx)editor.getDocument()).stripTrailingSpaces(false);
}
}.execute();
myFixture.checkResult(data.get(1));
}
}
| jexp/idea2 | plugins/groovy/test/org/jetbrains/plugins/groovy/lang/actions/updown/GroovyMoveStatementTest.java | Java | apache-2.0 | 4,349 |
package org.apereo.cas.web.report;
import org.apereo.cas.web.report.util.ControllerUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.endpoint.mvc.AbstractNamedMvcEndpoint;
import org.springframework.cloud.bus.BusProperties;
import org.springframework.cloud.config.server.config.ConfigServerProperties;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.HashMap;
import java.util.Map;
/**
* Controller that exposes the CAS internal state and beans
* as JSON. The report is available at {@code /status/config}.
*
* @author Misagh Moayyed
* @since 4.1
*/
public class ConfigurationStateController extends AbstractNamedMvcEndpoint {
private static final String VIEW_CONFIG = "monitoring/viewConfig";
@Autowired(required = false)
private BusProperties busProperties;
@Autowired
private ConfigServerProperties configServerProperties;
public ConfigurationStateController() {
super("configstate", "/config", true, true);
}
/**
* Handle request.
*
* @param request the request
* @param response the response
* @return the model and view
* @throws Exception the exception
*/
@GetMapping
protected ModelAndView handleRequestInternal(final HttpServletRequest request,
final HttpServletResponse response) throws Exception {
final Map<String, Object> model = new HashMap<>();
final String path = request.getContextPath();
ControllerUtils.configureModelMapForConfigServerCloudBusEndpoints(busProperties, configServerProperties, path, model);
return new ModelAndView(VIEW_CONFIG, model);
}
}
| gabedwrds/cas | support/cas-server-support-reports/src/main/java/org/apereo/cas/web/report/ConfigurationStateController.java | Java | apache-2.0 | 1,890 |
package com.canoo.ant.table;
import com.canoo.ant.filter.AllEqualsFilter;
import com.canoo.ant.filter.AllFilter;
import com.canoo.ant.filter.ITableFilter;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.*;
public abstract class APropertyTable implements IPropertyTable {
private static final Logger LOG = Logger.getLogger(APropertyTable.class);
private static final int MAX_DEPTH = 10; // max recursion depth
private static final ThreadLocal DEPTH = new ThreadLocal();
private File fContainer;
private String fTable;
private String fPrefix;
private ITableFilter fFilter;
private List fRawTable;
private List fMetaTable;
protected static final String EMPTY = "";
protected static final String KEY_JOIN = "JOIN";
protected APropertyTable() {
fFilter = new AllFilter();
if( DEPTH.get() == null ) {
setDepth(0);
}
}
private static void setDepth(int depth){
DEPTH.set(new Integer(depth));
}
private static int getDepth(){
return((Integer)DEPTH.get()).intValue();
}
/**
* @return columnName -> expander (Type IPropertyTable)
*/
public Map getColumnInfo() {
List meta = getMetaTable();
Map result = new HashMap(meta.size()); // smaller is likely
// find all properties for this table
List tableSpecificColumnInfo = new AllEqualsFilter(TableFactory.KEY_TABLE).filter(meta, getTable());
for (Iterator eachColumnInfo = tableSpecificColumnInfo.iterator(); eachColumnInfo.hasNext();) {
Properties colInfo = (Properties) eachColumnInfo.next();
try {
// tableClass defaults to the current class
IPropertyTable table = TableFactory.createTable(colInfo, getClass().getName());
ITableFilter filter = TableFactory.createFilter(colInfo);
final File container;
if (colInfo.getProperty(TableFactory.KEY_CONTAINER, "").length() > 0) {
container = new File(getContainer().getParentFile(), colInfo.getProperty(TableFactory.KEY_CONTAINER));
colInfo.remove(TableFactory.KEY_CONTAINER); // to be sure that it doesn't get used with wrong path
}
else {
container = getContainer();
}
String key = colInfo.getProperty(TableFactory.KEY_NAME); // no default possible
TableFactory.initOrDefault(table, filter, colInfo, container, key);
result.put(key, table);
} catch (Exception e) {
LOG.error("cannot work with Property: " + colInfo.toString(), e);
throw new RuntimeException("Cannot work with Property: " + colInfo.toString(), e);
}
}
return result;
}
public List getPropertiesList(final String filterValue, final String prefix) {
// start with copy of initial table
// if current filter concerns extension keys, filter before extending
// filtering in advance also lowers memory consumption in the average
List result = getFilter().filter(getRawTable(), filterValue);
if (getDepth() > MAX_DEPTH){
LOG.error("processing grounded due to excessive recursion calls: "+getDepth());
return result;
}
setDepth(getDepth()+1);
final Map colInfo = getColumnInfo();
// only go over entries in the colInfo.
// (property names without colInfo info are not expanded)
for (Iterator eachExpandable = colInfo.keySet().iterator(); eachExpandable.hasNext();) {
String expansionName = (String) eachExpandable.next();
expandName(result, expansionName, colInfo);
}
setDepth(getDepth()-1);
// filter a second time to allow filters to work on expansions
result = getFilter().filter(result, filterValue);
// prefix is processed after filtering
if (prefix!=null && prefix.length()>0){
result = mapPrefix(result, prefix);
}
return result;
}
// like a ruby map!
private List mapPrefix(List result, final String prefix) {
List collect = new ArrayList(result.size());
for (Iterator eachProps = result.iterator(); eachProps.hasNext();) {
Properties props = (Properties) eachProps.next();
Properties mapped = new Properties();
for (Iterator eachKey = props.keySet().iterator(); eachKey.hasNext();) {
String key = (String) eachKey.next();
String value = props.getProperty(key);
mapped.setProperty(prefix+"."+key, value);
}
collect.add(mapped);
}
return collect;
}
protected void expandName(List result, String expansionName, Map colInfo) {
List expansions = new LinkedList(); // cannot add while iterating. store and add later
for (Iterator eachProperties = result.iterator(); eachProperties.hasNext();) {
Properties props = (Properties) eachProperties.next();
List newExpansions = expandProps(props, expansionName, colInfo);
// default behaviour: like OUTER join, we do not shrink if nothing found
if (newExpansions.size() > 0) {
eachProperties.remove();
expansions.addAll(newExpansions);
}
}
result.addAll(expansions);
}
protected List expandProps(Properties props, String expansionName, Map colInfo) {
String value = props.getProperty(expansionName);
List propExpansions = new LinkedList();
IPropertyTable expansionTable = (IPropertyTable) colInfo.get(expansionName);
// recursive call
List expandWith = expansionTable.getPropertiesList(value, expansionTable.getPrefix());
for (Iterator eachExpansion = expandWith.iterator(); eachExpansion.hasNext();) {
Properties expandProps = (Properties) eachExpansion.next();
// merge expansion with current line
expandProps.putAll(props);
// store for later adding
propExpansions.add(expandProps);
}
return propExpansions;
}
//-------------- field accessors ------------------
public File getContainer() {
return fContainer;
}
public void setContainer(File container) {
fContainer = container;
}
public String getTable() {
return fTable;
}
public void setTable(String table) {
fTable = table;
}
public ITableFilter getFilter() {
return fFilter;
}
public void setFilter(ITableFilter filter) {
fFilter = filter;
}
public String getPrefix() {
return fPrefix;
}
public void setPrefix(String prefix) {
fPrefix = prefix;
}
//-------------- how to read specifics ------------------
/** lazy getter, cached */
public List getRawTable() {
fRawTable = getCachedTable(getTable(), fRawTable);
return fRawTable;
}
/** lazy getter, cached */
public List getMetaTable() {
if (hasJoinTable()) {
fMetaTable = getCachedTable(KEY_JOIN, fMetaTable);
}
else {
fMetaTable = Collections.EMPTY_LIST;
}
return fMetaTable;
}
/**
* Indicates if the table container has a JOIN table.
* @return default is <code>true</code>
*/
protected boolean hasJoinTable() {
return true;
}
protected List getCachedTable(final String table, List tableCache) {
if (tableCache != null) {
return tableCache;
}
try {
tableCache = read(table);
}
catch (final IOException e) {
LOG.error("Cannot read " + getContainer() + " " + table, e);
String message = "Cannot read container >" + getContainer() + "<";
if (table != null)
message += " (table " + table + ")";
message += ": " + e.getMessage();
throw new RuntimeException(message, e);
}
if (tableCache.isEmpty()) {
LOG.debug("no entry in " + getContainer() + "/" + table);
}
LOG.debug(tableCache.size()+" entries in "+getContainer()+ " " + table);
return tableCache;
}
protected abstract List read(String table) throws IOException;
}
| lukecampbell/webtest | src/main/java/com/canoo/ant/table/APropertyTable.java | Java | apache-2.0 | 8,495 |
/*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.api.client.util.BackOff;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.gax.retrying.RetrySettings;
import com.google.cloud.ByteArray;
import com.google.cloud.Date;
import com.google.cloud.Timestamp;
import com.google.cloud.spanner.Type.StructField;
import com.google.cloud.spanner.spi.v1.SpannerRpc;
import com.google.cloud.spanner.v1.stub.SpannerStubSettings;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
import com.google.protobuf.ListValue;
import com.google.protobuf.Value.KindCase;
import com.google.spanner.v1.PartialResultSet;
import com.google.spanner.v1.ResultSetMetadata;
import com.google.spanner.v1.ResultSetStats;
import com.google.spanner.v1.Transaction;
import com.google.spanner.v1.TypeCode;
import io.grpc.Context;
import io.opencensus.common.Scope;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracer;
import io.opencensus.trace.Tracing;
import java.io.IOException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/** Implementation of {@link ResultSet}. */
abstract class AbstractResultSet<R> extends AbstractStructReader implements ResultSet {
private static final Tracer tracer = Tracing.getTracer();
interface Listener {
/**
* Called when transaction metadata is seen. This method may be invoked at most once. If the
* method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}.
*/
void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId)
throws SpannerException;
/** Called when the read finishes with an error. Returns the error that should be thrown. */
SpannerException onError(SpannerException e, boolean withBeginTransaction);
/** Called when the read finishes normally. */
void onDone(boolean withBeginTransaction);
}
@VisibleForTesting
static class GrpcResultSet extends AbstractResultSet<List<Object>> {
private final GrpcValueIterator iterator;
private final Listener listener;
private GrpcStruct currRow;
private SpannerException error;
private ResultSetStats statistics;
private boolean closed;
GrpcResultSet(CloseableIterator<PartialResultSet> iterator, Listener listener) {
this.iterator = new GrpcValueIterator(iterator);
this.listener = listener;
}
@Override
protected GrpcStruct currRow() {
checkState(!closed, "ResultSet is closed");
checkState(currRow != null, "next() call required");
return currRow;
}
@Override
public boolean next() throws SpannerException {
if (error != null) {
throw newSpannerException(error);
}
try {
if (currRow == null) {
ResultSetMetadata metadata = iterator.getMetadata();
if (metadata.hasTransaction()) {
listener.onTransactionMetadata(
metadata.getTransaction(), iterator.isWithBeginTransaction());
} else if (iterator.isWithBeginTransaction()) {
// The query should have returned a transaction.
throw SpannerExceptionFactory.newSpannerException(
ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG);
}
currRow = new GrpcStruct(iterator.type(), new ArrayList<>());
}
boolean hasNext = currRow.consumeRow(iterator);
if (!hasNext) {
statistics = iterator.getStats();
}
return hasNext;
} catch (Throwable t) {
throw yieldError(
SpannerExceptionFactory.asSpannerException(t),
iterator.isWithBeginTransaction() && currRow == null);
}
}
@Override
@Nullable
public ResultSetStats getStats() {
return statistics;
}
@Override
public void close() {
listener.onDone(iterator.isWithBeginTransaction());
iterator.close("ResultSet closed");
closed = true;
}
@Override
public Type getType() {
checkState(currRow != null, "next() call required");
return currRow.getType();
}
private SpannerException yieldError(SpannerException e, boolean beginTransaction) {
SpannerException toThrow = listener.onError(e, beginTransaction);
close();
throw toThrow;
}
}
/**
* Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages.
*/
private static class GrpcValueIterator extends AbstractIterator<com.google.protobuf.Value> {
private enum StreamValue {
METADATA,
RESULT,
}
private final CloseableIterator<PartialResultSet> stream;
private ResultSetMetadata metadata;
private Type type;
private PartialResultSet current;
private int pos;
private ResultSetStats statistics;
GrpcValueIterator(CloseableIterator<PartialResultSet> stream) {
this.stream = stream;
}
@SuppressWarnings("unchecked")
@Override
protected com.google.protobuf.Value computeNext() {
if (!ensureReady(StreamValue.RESULT)) {
endOfData();
return null;
}
com.google.protobuf.Value value = current.getValues(pos++);
KindCase kind = value.getKindCase();
if (!isMergeable(kind)) {
if (pos == current.getValuesCount() && current.getChunkedValue()) {
throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet.");
} else {
return value;
}
}
if (!current.getChunkedValue() || pos != current.getValuesCount()) {
return value;
}
Object merged =
kind == KindCase.STRING_VALUE
? value.getStringValue()
: new ArrayList<>(value.getListValue().getValuesList());
while (current.getChunkedValue() && pos == current.getValuesCount()) {
if (!ensureReady(StreamValue.RESULT)) {
throw newSpannerException(
ErrorCode.INTERNAL, "Stream closed in the middle of chunked value");
}
com.google.protobuf.Value newValue = current.getValues(pos++);
if (newValue.getKindCase() != kind) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Unexpected type in middle of chunked value. Expected: "
+ kind
+ " but got: "
+ newValue.getKindCase());
}
if (kind == KindCase.STRING_VALUE) {
merged = merged + newValue.getStringValue();
} else {
concatLists(
(List<com.google.protobuf.Value>) merged, newValue.getListValue().getValuesList());
}
}
if (kind == KindCase.STRING_VALUE) {
return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build();
} else {
return com.google.protobuf.Value.newBuilder()
.setListValue(
ListValue.newBuilder().addAllValues((List<com.google.protobuf.Value>) merged))
.build();
}
}
ResultSetMetadata getMetadata() throws SpannerException {
if (metadata == null) {
if (!ensureReady(StreamValue.METADATA)) {
throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata");
}
}
return metadata;
}
/**
* Get the query statistics. Query statistics are delivered with the last PartialResultSet in
* the stream. Any attempt to call this method before the caller has finished consuming the
* results will return null.
*/
@Nullable
ResultSetStats getStats() {
return statistics;
}
Type type() {
checkState(type != null, "metadata has not been received");
return type;
}
private boolean ensureReady(StreamValue requiredValue) throws SpannerException {
while (current == null || pos >= current.getValuesCount()) {
if (!stream.hasNext()) {
return false;
}
current = stream.next();
pos = 0;
if (type == null) {
// This is the first message on the stream.
if (!current.hasMetadata() || !current.getMetadata().hasRowType()) {
throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message");
}
metadata = current.getMetadata();
com.google.spanner.v1.Type typeProto =
com.google.spanner.v1.Type.newBuilder()
.setCode(TypeCode.STRUCT)
.setStructType(metadata.getRowType())
.build();
try {
type = Type.fromProto(typeProto);
} catch (IllegalArgumentException e) {
throw newSpannerException(
ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e);
}
}
if (current.hasStats()) {
statistics = current.getStats();
}
if (requiredValue == StreamValue.METADATA) {
return true;
}
}
return true;
}
void close(@Nullable String message) {
stream.close(message);
}
boolean isWithBeginTransaction() {
return stream.isWithBeginTransaction();
}
/** @param a is a mutable list and b will be concatenated into a. */
private void concatLists(List<com.google.protobuf.Value> a, List<com.google.protobuf.Value> b) {
if (a.size() == 0 || b.size() == 0) {
a.addAll(b);
return;
} else {
com.google.protobuf.Value last = a.get(a.size() - 1);
com.google.protobuf.Value first = b.get(0);
KindCase lastKind = last.getKindCase();
KindCase firstKind = first.getKindCase();
if (isMergeable(lastKind) && lastKind == firstKind) {
com.google.protobuf.Value merged;
if (lastKind == KindCase.STRING_VALUE) {
String lastStr = last.getStringValue();
String firstStr = first.getStringValue();
merged =
com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build();
} else { // List
List<com.google.protobuf.Value> mergedList = new ArrayList<>();
mergedList.addAll(last.getListValue().getValuesList());
concatLists(mergedList, first.getListValue().getValuesList());
merged =
com.google.protobuf.Value.newBuilder()
.setListValue(ListValue.newBuilder().addAllValues(mergedList))
.build();
}
a.set(a.size() - 1, merged);
a.addAll(b.subList(1, b.size()));
} else {
a.addAll(b);
}
}
}
private boolean isMergeable(KindCase kind) {
return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE;
}
}
static class GrpcStruct extends Struct implements Serializable {
private final Type type;
private final List<Object> rowData;
/**
* Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as
* a serialization proxy.
*/
private Object writeReplace() {
Builder builder = Struct.newBuilder();
List<Type.StructField> structFields = getType().getStructFields();
for (int i = 0; i < structFields.size(); i++) {
Type.StructField field = structFields.get(i);
String fieldName = field.getName();
Object value = rowData.get(i);
Type fieldType = field.getType();
switch (fieldType.getCode()) {
case BOOL:
builder.set(fieldName).to((Boolean) value);
break;
case INT64:
builder.set(fieldName).to((Long) value);
break;
case FLOAT64:
builder.set(fieldName).to((Double) value);
break;
case NUMERIC:
builder.set(fieldName).to((BigDecimal) value);
break;
case STRING:
builder.set(fieldName).to((String) value);
break;
case JSON:
builder.set(fieldName).to(Value.json((String) value));
break;
case BYTES:
builder.set(fieldName).to((ByteArray) value);
break;
case TIMESTAMP:
builder.set(fieldName).to((Timestamp) value);
break;
case DATE:
builder.set(fieldName).to((Date) value);
break;
case ARRAY:
switch (fieldType.getArrayElementType().getCode()) {
case BOOL:
builder.set(fieldName).toBoolArray((Iterable<Boolean>) value);
break;
case INT64:
builder.set(fieldName).toInt64Array((Iterable<Long>) value);
break;
case FLOAT64:
builder.set(fieldName).toFloat64Array((Iterable<Double>) value);
break;
case NUMERIC:
builder.set(fieldName).toNumericArray((Iterable<BigDecimal>) value);
break;
case STRING:
builder.set(fieldName).toStringArray((Iterable<String>) value);
break;
case JSON:
builder.set(fieldName).toJsonArray((Iterable<String>) value);
break;
case BYTES:
builder.set(fieldName).toBytesArray((Iterable<ByteArray>) value);
break;
case TIMESTAMP:
builder.set(fieldName).toTimestampArray((Iterable<Timestamp>) value);
break;
case DATE:
builder.set(fieldName).toDateArray((Iterable<Date>) value);
break;
case STRUCT:
builder
.set(fieldName)
.toStructArray(fieldType.getArrayElementType(), (Iterable<Struct>) value);
break;
default:
throw new AssertionError(
"Unhandled array type code: " + fieldType.getArrayElementType());
}
break;
case STRUCT:
if (value == null) {
builder.set(fieldName).to(fieldType, null);
} else {
builder.set(fieldName).to((Struct) value);
}
break;
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
return builder.build();
}
GrpcStruct(Type type, List<Object> rowData) {
this.type = type;
this.rowData = rowData;
}
@Override
public String toString() {
return this.rowData.toString();
}
boolean consumeRow(Iterator<com.google.protobuf.Value> iterator) {
rowData.clear();
if (!iterator.hasNext()) {
return false;
}
for (Type.StructField fieldType : getType().getStructFields()) {
if (!iterator.hasNext()) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value stream: end of stream reached before row is complete");
}
com.google.protobuf.Value value = iterator.next();
rowData.add(decodeValue(fieldType.getType(), value));
}
return true;
}
private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.NULL_VALUE) {
return null;
}
switch (fieldType.getCode()) {
case BOOL:
checkType(fieldType, proto, KindCase.BOOL_VALUE);
return proto.getBoolValue();
case INT64:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Long.parseLong(proto.getStringValue());
case FLOAT64:
return valueProtoToFloat64(proto);
case NUMERIC:
return new BigDecimal(proto.getStringValue());
case STRING:
case JSON:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return proto.getStringValue();
case BYTES:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return ByteArray.fromBase64(proto.getStringValue());
case TIMESTAMP:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Timestamp.parseTimestamp(proto.getStringValue());
case DATE:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Date.parseDate(proto.getStringValue());
case ARRAY:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue listValue = proto.getListValue();
return decodeArrayValue(fieldType.getArrayElementType(), listValue);
case STRUCT:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue structValue = proto.getListValue();
return decodeStructValue(fieldType, structValue);
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
private static Struct decodeStructValue(Type structType, ListValue structValue) {
List<Type.StructField> fieldTypes = structType.getStructFields();
checkArgument(
structValue.getValuesCount() == fieldTypes.size(),
"Size mismatch between type descriptor and actual values.");
List<Object> fields = new ArrayList<>(fieldTypes.size());
List<com.google.protobuf.Value> fieldValues = structValue.getValuesList();
for (int i = 0; i < fieldTypes.size(); ++i) {
fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i)));
}
return new GrpcStruct(structType, fields);
}
static Object decodeArrayValue(Type elementType, ListValue listValue) {
switch (elementType.getCode()) {
case BOOL:
// Use a view: element conversion is virtually free.
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue());
case INT64:
// For int64/float64 types, use custom containers. These avoid wrapper object
// creation for non-null arrays.
return new Int64Array(listValue);
case FLOAT64:
return new Float64Array(listValue);
case NUMERIC:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: new BigDecimal(value.getStringValue()));
}
return list;
}
case STRING:
case JSON:
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue());
case BYTES:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: ByteArray.fromBase64(value.getStringValue()));
}
return list;
}
case TIMESTAMP:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Timestamp.parseTimestamp(value.getStringValue()));
}
return list;
}
case DATE:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Date.parseDate(value.getStringValue()));
}
return list;
}
case STRUCT:
{
ArrayList<Struct> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
if (value.getKindCase() == KindCase.NULL_VALUE) {
list.add(null);
} else {
ListValue structValue = value.getListValue();
list.add(decodeStructValue(elementType, structValue));
}
}
return list;
}
default:
throw new AssertionError("Unhandled type code: " + elementType.getCode());
}
}
private static void checkType(
Type fieldType, com.google.protobuf.Value proto, KindCase expected) {
if (proto.getKindCase() != expected) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ fieldType
+ " expected "
+ expected
+ " but was "
+ proto.getKindCase());
}
}
Struct immutableCopy() {
return new GrpcStruct(type, new ArrayList<>(rowData));
}
@Override
public Type getType() {
return type;
}
@Override
public boolean isNull(int columnIndex) {
return rowData.get(columnIndex) == null;
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return (Boolean) rowData.get(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return (Long) rowData.get(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return (Double) rowData.get(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return (BigDecimal) rowData.get(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return (ByteArray) rowData.get(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return (Timestamp) rowData.get(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return (Date) rowData.get(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
final List<Type.StructField> structFields = getType().getStructFields();
final StructField structField = structFields.get(columnIndex);
final Type columnType = structField.getType();
final boolean isNull = rowData.get(columnIndex) == null;
switch (columnType.getCode()) {
case BOOL:
return Value.bool(isNull ? null : getBooleanInternal(columnIndex));
case INT64:
return Value.int64(isNull ? null : getLongInternal(columnIndex));
case NUMERIC:
return Value.numeric(isNull ? null : getBigDecimalInternal(columnIndex));
case FLOAT64:
return Value.float64(isNull ? null : getDoubleInternal(columnIndex));
case STRING:
return Value.string(isNull ? null : getStringInternal(columnIndex));
case BYTES:
return Value.bytes(isNull ? null : getBytesInternal(columnIndex));
case TIMESTAMP:
return Value.timestamp(isNull ? null : getTimestampInternal(columnIndex));
case DATE:
return Value.date(isNull ? null : getDateInternal(columnIndex));
case STRUCT:
return Value.struct(isNull ? null : getStructInternal(columnIndex));
case ARRAY:
switch (columnType.getArrayElementType().getCode()) {
case BOOL:
return Value.boolArray(isNull ? null : getBooleanListInternal(columnIndex));
case INT64:
return Value.int64Array(isNull ? null : getLongListInternal(columnIndex));
case NUMERIC:
return Value.numericArray(isNull ? null : getBigDecimalListInternal(columnIndex));
case FLOAT64:
return Value.float64Array(isNull ? null : getDoubleListInternal(columnIndex));
case STRING:
return Value.stringArray(isNull ? null : getStringListInternal(columnIndex));
case BYTES:
return Value.bytesArray(isNull ? null : getBytesListInternal(columnIndex));
case TIMESTAMP:
return Value.timestampArray(isNull ? null : getTimestampListInternal(columnIndex));
case DATE:
return Value.dateArray(isNull ? null : getDateListInternal(columnIndex));
case STRUCT:
return Value.structArray(
columnType.getArrayElementType(),
isNull ? null : getStructListInternal(columnIndex));
default:
throw new IllegalArgumentException(
"Invalid array value type " + this.type.getArrayElementType());
}
default:
throw new IllegalArgumentException("Invalid value type " + this.type);
}
}
@Override
protected Struct getStructInternal(int columnIndex) {
return (Struct) rowData.get(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
List<Boolean> values = (List<Boolean>) rowData.get(columnIndex);
boolean[] r = new boolean[values.size()];
for (int i = 0; i < values.size(); ++i) {
if (values.get(i) == null) {
throw throwNotNull(columnIndex);
}
r[i] = values.get(i);
}
return r;
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Boolean>) rowData.get(columnIndex));
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Int64Array getLongListInternal(int columnIndex) {
return (Int64Array) rowData.get(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Float64Array getDoubleListInternal(int columnIndex) {
return (Float64Array) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<NUMERIC> produces a List<BigDecimal>.
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return (List<BigDecimal>) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRING> produces a List<String>.
protected List<String> getStringListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<String> produces a List<String>.
protected List<String> getJsonListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BYTES> produces a List<ByteArray>.
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return Collections.unmodifiableList((List<ByteArray>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<TIMESTAMP> produces a List<Timestamp>.
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Timestamp>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<DATE> produces a List<Date>.
protected List<Date> getDateListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Date>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRUCT<...>> produces a List<STRUCT>.
protected List<Struct> getStructListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Struct>) rowData.get(columnIndex));
}
}
@VisibleForTesting
interface CloseableIterator<T> extends Iterator<T> {
/**
* Closes the iterator, freeing any underlying resources.
*
* @param message a message to include in the final RPC status
*/
void close(@Nullable String message);
boolean isWithBeginTransaction();
}
/** Adapts a streaming read/query call into an iterator over partial result sets. */
@VisibleForTesting
static class GrpcStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName());
private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build();
private final ConsumerImpl consumer = new ConsumerImpl();
private final BlockingQueue<PartialResultSet> stream;
private final Statement statement;
private SpannerRpc.StreamingCall call;
private volatile boolean withBeginTransaction;
private SpannerException error;
@VisibleForTesting
GrpcStreamIterator(int prefetchChunks) {
this(null, prefetchChunks);
}
@VisibleForTesting
GrpcStreamIterator(Statement statement, int prefetchChunks) {
this.statement = statement;
// One extra to allow for END_OF_STREAM message.
this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1);
}
protected final SpannerRpc.ResultStreamConsumer consumer() {
return consumer;
}
public void setCall(SpannerRpc.StreamingCall call, boolean withBeginTransaction) {
this.call = call;
this.withBeginTransaction = withBeginTransaction;
}
@Override
public void close(@Nullable String message) {
if (call != null) {
call.cancel(message);
}
}
@Override
public boolean isWithBeginTransaction() {
return withBeginTransaction;
}
@Override
protected final PartialResultSet computeNext() {
PartialResultSet next;
try {
// TODO: Ideally honor io.grpc.Context while blocking here. In practice,
// cancellation/deadline results in an error being delivered to "stream", which
// should mean that we do not block significantly longer afterwards, but it would
// be more robust to use poll() with a timeout.
next = stream.take();
} catch (InterruptedException e) {
// Treat interrupt as a request to cancel the read.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
if (next != END_OF_STREAM) {
call.request(1);
return next;
}
// All done - close() no longer needs to cancel the call.
call = null;
if (error != null) {
throw SpannerExceptionFactory.newSpannerException(error);
}
endOfData();
return null;
}
private void addToStream(PartialResultSet results) {
// We assume that nothing from the user will interrupt gRPC event threads.
Uninterruptibles.putUninterruptibly(stream, results);
}
private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer {
@Override
public void onPartialResultSet(PartialResultSet results) {
addToStream(results);
}
@Override
public void onCompleted() {
addToStream(END_OF_STREAM);
}
@Override
public void onError(SpannerException e) {
if (statement != null) {
if (logger.isLoggable(Level.FINEST)) {
// Include parameter values if logging level is set to FINEST or higher.
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.toString()),
e);
logger.log(Level.FINEST, "Error executing statement", e);
} else {
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.getSql()),
e);
}
}
error = e;
addToStream(END_OF_STREAM);
}
}
}
/**
* Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps
* track of the most recent resume token seen, and will buffer partial result set chunks that do
* not have a resume token until one is seen or buffer space is exceeded, which reduces the chance
* of yielding data to the caller that cannot be resumed.
*/
@VisibleForTesting
abstract static class ResumableStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final RetrySettings STREAMING_RETRY_SETTINGS =
SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings();
private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName());
private final BackOff backOff = newBackOff();
private final LinkedList<PartialResultSet> buffer = new LinkedList<>();
private final int maxBufferSize;
private final Span span;
private CloseableIterator<PartialResultSet> stream;
private ByteString resumeToken;
private boolean finished;
/**
* Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have
* reached the maximum buffer size without seeing a restart token; in this case, we will drain
* the buffer and remain in this state until we see a new restart token.
*/
private boolean safeToRetry = true;
protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) {
checkArgument(maxBufferSize >= 0);
this.maxBufferSize = maxBufferSize;
this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan();
}
private static ExponentialBackOff newBackOff() {
return new ExponentialBackOff.Builder()
.setMultiplier(STREAMING_RETRY_SETTINGS.getRetryDelayMultiplier())
.setInitialIntervalMillis(
Math.max(10, (int) STREAMING_RETRY_SETTINGS.getInitialRetryDelay().toMillis()))
.setMaxIntervalMillis(
Math.max(1000, (int) STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis()))
.setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned.
.build();
}
private static void backoffSleep(Context context, BackOff backoff) throws SpannerException {
backoffSleep(context, nextBackOffMillis(backoff));
}
private static long nextBackOffMillis(BackOff backoff) throws SpannerException {
try {
return backoff.nextBackOffMillis();
} catch (IOException e) {
throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e);
}
}
private static void backoffSleep(Context context, long backoffMillis) throws SpannerException {
tracer
.getCurrentSpan()
.addAnnotation(
"Backing off",
ImmutableMap.of("Delay", AttributeValue.longAttributeValue(backoffMillis)));
final CountDownLatch latch = new CountDownLatch(1);
final Context.CancellationListener listener =
ignored -> {
// Wakeup on cancellation / DEADLINE_EXCEEDED.
latch.countDown();
};
context.addListener(listener, DirectExecutor.INSTANCE);
try {
if (backoffMillis == BackOff.STOP) {
// Highly unlikely but we handle it just in case.
backoffMillis = STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis();
}
if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) {
// Woken by context cancellation.
throw newSpannerExceptionForCancellation(context, null);
}
} catch (InterruptedException interruptExcept) {
throw newSpannerExceptionForCancellation(context, interruptExcept);
} finally {
context.removeListener(listener);
}
}
private enum DirectExecutor implements Executor {
INSTANCE;
@Override
public void execute(Runnable command) {
command.run();
}
}
abstract CloseableIterator<PartialResultSet> startStream(@Nullable ByteString resumeToken);
@Override
public void close(@Nullable String message) {
if (stream != null) {
stream.close(message);
span.end(TraceUtil.END_SPAN_OPTIONS);
stream = null;
}
}
@Override
public boolean isWithBeginTransaction() {
return stream != null && stream.isWithBeginTransaction();
}
@Override
protected PartialResultSet computeNext() {
Context context = Context.current();
while (true) {
// Eagerly start stream before consuming any buffered items.
if (stream == null) {
span.addAnnotation(
"Starting/Resuming stream",
ImmutableMap.of(
"ResumeToken",
AttributeValue.stringAttributeValue(
resumeToken == null ? "null" : resumeToken.toStringUtf8())));
try (Scope s = tracer.withSpan(span)) {
// When start a new stream set the Span as current to make the gRPC Span a child of
// this Span.
stream = checkNotNull(startStream(resumeToken));
}
}
// Buffer contains items up to a resume token or has reached capacity: flush.
if (!buffer.isEmpty()
&& (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) {
return buffer.pop();
}
try {
if (stream.hasNext()) {
PartialResultSet next = stream.next();
boolean hasResumeToken = !next.getResumeToken().isEmpty();
if (hasResumeToken) {
resumeToken = next.getResumeToken();
safeToRetry = true;
}
// If the buffer is empty and this chunk has a resume token or we cannot resume safely
// anyway, we can yield it immediately rather than placing it in the buffer to be
// returned on the next iteration.
if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) {
return next;
}
buffer.add(next);
if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) {
// We need to flush without a restart token. Errors encountered until we see
// such a token will fail the read.
safeToRetry = false;
}
} else {
finished = true;
if (buffer.isEmpty()) {
endOfData();
return null;
}
}
} catch (SpannerException e) {
if (safeToRetry && e.isRetryable()) {
span.addAnnotation(
"Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e));
logger.log(Level.FINE, "Retryable exception, will sleep and retry", e);
// Truncate any items in the buffer before the last retry token.
while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) {
buffer.removeLast();
}
assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken);
stream = null;
try (Scope s = tracer.withSpan(span)) {
long delay = e.getRetryDelayInMillis();
if (delay != -1) {
backoffSleep(context, delay);
} else {
backoffSleep(context, backOff);
}
}
continue;
}
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
} catch (RuntimeException e) {
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
}
}
}
}
static double valueProtoToFloat64(com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.STRING_VALUE) {
switch (proto.getStringValue()) {
case "-Infinity":
return Double.NEGATIVE_INFINITY;
case "Infinity":
return Double.POSITIVE_INFINITY;
case "NaN":
return Double.NaN;
default:
// Fall-through to handling below to produce an error.
}
}
if (proto.getKindCase() != KindCase.NUMBER_VALUE) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ Type.float64()
+ " expected NUMBER_VALUE or STRING_VALUE with value one of"
+ " \"Infinity\", \"-Infinity\", or \"NaN\" but was "
+ proto.getKindCase()
+ (proto.getKindCase() == KindCase.STRING_VALUE
? " with value \"" + proto.getStringValue() + "\""
: ""));
}
return proto.getNumberValue();
}
static NullPointerException throwNotNull(int columnIndex) {
throw new NullPointerException(
"Cannot call array getter for column " + columnIndex + " with null elements");
}
/**
* Memory-optimized base class for {@code ARRAY<INT64>} and {@code ARRAY<FLOAT64>} types. Both of
* these involve conversions from the type yielded by JSON parsing, which are {@code String} and
* {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array
* element, we use primitive arrays and a {@code BitSet} to track nulls.
*/
abstract static class PrimitiveArray<T, A> extends AbstractList<T> {
private final A data;
private final BitSet nulls;
private final int size;
PrimitiveArray(ListValue protoList) {
this.size = protoList.getValuesCount();
A data = newArray(size);
BitSet nulls = new BitSet(size);
for (int i = 0; i < protoList.getValuesCount(); ++i) {
if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) {
nulls.set(i);
} else {
setProto(data, i, protoList.getValues(i));
}
}
this.data = data;
this.nulls = nulls;
}
PrimitiveArray(A data, BitSet nulls, int size) {
this.data = data;
this.nulls = nulls;
this.size = size;
}
abstract A newArray(int size);
abstract void setProto(A array, int i, com.google.protobuf.Value protoValue);
abstract T get(A array, int i);
@Override
public T get(int index) {
if (index < 0 || index >= size) {
throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size);
}
return nulls.get(index) ? null : get(data, index);
}
@Override
public int size() {
return size;
}
A toPrimitiveArray(int columnIndex) {
if (nulls.length() > 0) {
throw throwNotNull(columnIndex);
}
A r = newArray(size);
System.arraycopy(data, 0, r, 0, size);
return r;
}
}
static class Int64Array extends PrimitiveArray<Long, long[]> {
Int64Array(ListValue protoList) {
super(protoList);
}
Int64Array(long[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
long[] newArray(int size) {
return new long[size];
}
@Override
void setProto(long[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = Long.parseLong(protoValue.getStringValue());
}
@Override
Long get(long[] array, int i) {
return array[i];
}
}
static class Float64Array extends PrimitiveArray<Double, double[]> {
Float64Array(ListValue protoList) {
super(protoList);
}
Float64Array(double[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
double[] newArray(int size) {
return new double[size];
}
@Override
void setProto(double[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = valueProtoToFloat64(protoValue);
}
@Override
Double get(double[] array, int i) {
return array[i];
}
}
protected abstract GrpcStruct currRow();
@Override
public Struct getCurrentRowAsStruct() {
return currRow().immutableCopy();
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return currRow().getBooleanInternal(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return currRow().getLongInternal(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return currRow().getDoubleInternal(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return currRow().getBigDecimalInternal(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return currRow().getStringInternal(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return currRow().getJsonInternal(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return currRow().getBytesInternal(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return currRow().getTimestampInternal(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return currRow().getDateInternal(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
return currRow().getValueInternal(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
return currRow().getBooleanArrayInternal(columnIndex);
}
@Override
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return currRow().getBooleanListInternal(columnIndex);
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return currRow().getLongArrayInternal(columnIndex);
}
@Override
protected List<Long> getLongListInternal(int columnIndex) {
return currRow().getLongListInternal(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return currRow().getDoubleArrayInternal(columnIndex);
}
@Override
protected List<Double> getDoubleListInternal(int columnIndex) {
return currRow().getDoubleListInternal(columnIndex);
}
@Override
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return currRow().getBigDecimalListInternal(columnIndex);
}
@Override
protected List<String> getStringListInternal(int columnIndex) {
return currRow().getStringListInternal(columnIndex);
}
@Override
protected List<String> getJsonListInternal(int columnIndex) {
return currRow().getJsonListInternal(columnIndex);
}
@Override
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return currRow().getBytesListInternal(columnIndex);
}
@Override
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return currRow().getTimestampListInternal(columnIndex);
}
@Override
protected List<Date> getDateListInternal(int columnIndex) {
return currRow().getDateListInternal(columnIndex);
}
@Override
protected List<Struct> getStructListInternal(int columnIndex) {
return currRow().getStructListInternal(columnIndex);
}
@Override
public boolean isNull(int columnIndex) {
return currRow().isNull(columnIndex);
}
}
| looker-open-source/java-spanner | google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java | Java | apache-2.0 | 50,481 |
/*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.collect.ImmutableSet;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.SpecializeModule.SpecializationState;
import com.google.javascript.rhino.Node;
/**
* Tests for {@link SpecializeModule}.
*
* @author dcc@google.com (Devin Coughlin)
*/
public class SpecializeModuleTest extends CompilerTestCase {
private static final String SHARED_EXTERNS = "var alert = function() {}";
public SpecializeModuleTest() {
super(SHARED_EXTERNS);
}
private PassFactory inlineFunctions =
new PassFactory("inlineFunctions", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new InlineFunctions(compiler,
compiler.getUniqueNameIdSupplier(), true, false, true);
}
};
private PassFactory removeUnusedPrototypeProperties =
new PassFactory("removeUnusedPrototypeProperties", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new RemoveUnusedPrototypeProperties(compiler, false, false);
}
};
private PassFactory devirtualizePrototypeMethods =
new PassFactory("devirtualizePrototypeMethods", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new DevirtualizePrototypeMethods(compiler);
}
};
@Override
protected CompilerPass getProcessor(final Compiler compiler) {
final SpecializeModule specializeModule = new SpecializeModule(compiler,
devirtualizePrototypeMethods, inlineFunctions,
removeUnusedPrototypeProperties);
return new CompilerPass() {
public void process(Node externs, Node root) {
specializeModule.process(externs, root);
/* Make sure variables are declared before used */
new VarCheck(compiler).process(externs, root);
}
};
}
@Override
public void setUp() throws Exception {
super.setUp();
enableNormalize();
}
public void testSpecializeInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
});
}
public void testSpecializeCascadedInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return C()};" +
"var C = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return C()};" + /* Removed from m1, so add to m2 */
"C = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testSpecializeInlineWithMultipleDependents() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();",
// m3
"A();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();",
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();",
});
}
public void testSpecializeInlineWithNamespaces() {
JSModule[] modules = createModuleStar(
// m1
"var ns = {};" +
/* Recursion in A() prevents inline of A*/
"ns.A = function() {alert(B());ns.A()};" +
"var B = function() {return 6};" +
"ns.A();",
// m2
"B = function() {return 7};" +
"ns.A();");
test(modules, new String[] {
// m1
"var ns = {};" +
"ns.A = function() {alert(6);ns.A()};" + /* Specialized A */
"ns.A();" +
"var B;",
// m2
"ns.A = function() {alert(B());ns.A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"ns.A();"
});
}
public void testSpecializeInlineWithRegularFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"function A() {alert(6);A()}" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
/* Start of original m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testDontSpecializeLocalNonAnonymousFunctions() {
/* normalize result, but not expected */
enableNormalize(false);
JSModule[] modules = createModuleStar(
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
"");
test(modules, new String[] {
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
""
});
}
public void testAddDummyVarsForRemovedFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B() + C());A()};" +
"var B = function() {return 6};" +
"var C = function() {return 8};" +
"A();",
// m2
"" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6 + 8);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B() + C());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"C = function() {return 8};" + /* Removed from m1, so add to m2 */
"A();"
});
}
public void testSpecializeRemoveUnusedProperties() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"var aliasA = Foo.prototype.a;" +
"var x = new Foo();" +
"x.a();",
// m2
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};"
});
}
public void testDontSpecializeAliasedFunctions_inline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();");
test(modules, new String[] {
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();"
});
}
public void testDontSpecializeAliasedFunctions_remove_unused_properties() {
JSModule[] modules = createModuleStar(
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"Foo.prototype.d = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"Foo.prototype.d = function() {return 7};"
});
}
public void testSpecializeDevirtualizePrototypeMethods() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};" +
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var JSCompiler_StaticMethods_a =" +
"function(JSCompiler_StaticMethods_a$self) {" +
"JSCompiler_StaticMethods_a(JSCompiler_StaticMethods_a$self);" +
"return 7" +
"};" +
"var x = new Foo();" +
"JSCompiler_StaticMethods_a(x);",
// m2
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};"
});
}
public void testSpecializeDevirtualizePrototypeMethodsWithInline() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {return 7};" +
"var x = new Foo();" +
"var z = x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var x = new Foo();" +
"var z = 7;",
// m2
"Foo.prototype.a = function() {return 7};"
});
}
/**
* Tests for {@link SpecializeModule.SpecializationState}.
*/
public static class SpecializeModuleSpecializationStateTest
extends CompilerTestCase {
Compiler lastCompiler;
SpecializationState lastState;
@Override
public CompilerPass getProcessor(final Compiler compiler) {
lastCompiler = compiler;
return new CompilerPass() {
public void process(Node externs, Node root) {
SimpleDefinitionFinder defFinder =
new SimpleDefinitionFinder(compiler);
defFinder.process(externs, root);
SimpleFunctionAliasAnalysis functionAliasAnalysis =
new SimpleFunctionAliasAnalysis();
functionAliasAnalysis.analyze(defFinder);
lastState = new SpecializationState(functionAliasAnalysis);
}
};
}
public void testRemovedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
Node functionF = findFunction("F");
lastState.reportRemovedFunction(functionF, functionF.getParent());
assertEquals(ImmutableSet.of(functionF), lastState.getRemovedFunctions());
Node functionG = findFunction("F");
lastState.reportRemovedFunction(functionG, functionF.getParent());
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getRemovedFunctions());
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
}
public void testSpecializedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
Node functionF = findFunction("F");
lastState.reportSpecializedFunction(functionF);
assertEquals(ImmutableSet.of(functionF),
lastState.getSpecializedFunctions());
Node functionG = findFunction("F");
lastState.reportSpecializedFunction(functionG);
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getSpecializedFunctions());
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
}
public void testCanFixupFunction() {
testSame("function F(){}\n" +
"var G = function(a){};\n" +
"var ns = {};" +
"ns.H = function(){};" +
"var ns2 = {I : function anon1(){}};" +
"(function anon2(){})();");
assertTrue(lastState.canFixupFunction(findFunction("F")));
assertTrue(lastState.canFixupFunction(findFunction("G")));
assertTrue(lastState.canFixupFunction(findFunction("ns.H")));
assertFalse(lastState.canFixupFunction(findFunction("anon1")));
assertFalse(lastState.canFixupFunction(findFunction("anon2")));
// Can't guarantee safe fixup for aliased functions
testSame("function A(){}\n" +
"var aliasA = A;\n");
assertFalse(lastState.canFixupFunction(findFunction("A")));
}
private Node findFunction(String name) {
FunctionFinder f = new FunctionFinder(name);
new NodeTraversal(lastCompiler, f).traverse(lastCompiler.jsRoot);
assertNotNull("Couldn't find " + name, f.found);
return f.found;
}
/**
* Quick Traversal to find a given function in the AST.
*/
private class FunctionFinder extends AbstractPostOrderCallback {
Node found = null;
final String target;
FunctionFinder(String target) {
this.target = target;
}
public void visit(NodeTraversal t, Node n, Node parent) {
if (NodeUtil.isFunction(n)
&& target.equals(NodeUtil.getFunctionName(n))) {
found = n;
}
}
}
}
}
| JonathanWalsh/Granule-Closure-Compiler | test/com/google/javascript/jscomp/SpecializeModuleTest.java | Java | apache-2.0 | 16,009 |
package me.tatarka.timesync.lib;
import android.content.Context;
import java.util.Arrays;
/**
* A class for interacting with a {@link TimeSync}. You can get and set it's configuration, and
* force it to sync immediately. Ta get an instance of the class for a given {@link TimeSync}, use
* {@link TimeSync#get(android.content.Context, Class)}.
*/
public final class TimeSyncProxy {
private Context context;
private String name;
private TimeSync listener;
TimeSyncProxy(Context context, String name) {
this.context = context;
this.name = name;
listener = TimeSyncParser.parseListeners(context).get(name);
}
/**
* Syncs immediately. This is useful for a response to a user action. Use this sparingly, as
* frequent syncs defeat the purpose of using this library.
*/
public void sync() {
TimeSyncService.sync(context, name);
}
/**
* Syncs sometime in the near future, randomizing per device. This is useful in response to a
* server message, using GCM for example, so that the server is not overwhelmed with all devices
* trying to sync at once.
*/
public void syncInexact() {
TimeSyncService.syncInexact(context, name);
}
/**
* Gets the current configuration for the {@link TimeSync}.
*
* @return the configuration
* @see TimeSync.Config
*/
public TimeSync.Config config() {
return listener.config();
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(Iterable<TimeSync.Edit> edits) {
listener.edit(edits);
TimeSyncService.update(context, name);
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(TimeSync.Edit... edits) {
edit(Arrays.asList(edits));
}
}
| evant/timesync | lib/src/main/java/me/tatarka/timesync/lib/TimeSyncProxy.java | Java | apache-2.0 | 2,035 |
/**
* jetbrick-template
* http://subchen.github.io/jetbrick-template/
*
* Copyright 2010-2014 Guoqiang Chen. All rights reserved.
* Email: subchen@gmail.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrick.template.resource;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicLong;
import jetbrick.template.utils.UnsafeByteArrayInputStream;
/**
* 以源码形式存在的资源.
*
* @since 1.1.3
* @author Guoqiang Chen
*/
public class SourceCodeResource extends Resource {
private static final String ENCODING = "utf-8";
private static AtomicLong index = new AtomicLong();
private final String source;
public SourceCodeResource(String source) {
super("/unknown/file." + index.incrementAndGet(), ENCODING);
this.source = source;
}
@Override
public String getAbsolutePath() {
return "(unknown)";
}
@Override
public long lastModified() {
return 0;
}
@Override
public InputStream getInputStream() throws IOException {
return new UnsafeByteArrayInputStream(source.getBytes(ENCODING));
}
@Override
public char[] getSource() {
return source.toCharArray();
}
@Override
public char[] getSource(String encoding) {
return source.toCharArray();
}
}
| subchen/jetbrick-template-1x | src/main/java/jetbrick/template/resource/SourceCodeResource.java | Java | apache-2.0 | 1,866 |
package org.jboss.examples.ticketmonster.model;
import static javax.persistence.CascadeType.ALL;
import static javax.persistence.FetchType.EAGER;
import static javax.persistence.GenerationType.IDENTITY;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import org.hibernate.validator.constraints.NotEmpty;
/**
* <p>
* Represents a single venue
* </p>
*
* @author Shane Bryzak
* @author Pete Muir
*/
/*
* We suppress the warning about not specifying a serialVersionUID, as we are still developing this app, and want the JVM to
* generate the serialVersionUID for us. When we put this app into production, we'll generate and embed the serialVersionUID
*/
@SuppressWarnings("serial")
@Entity
public class Venue implements Serializable {
/* Declaration of fields */
/**
* The synthetic id of the object.
*/
@Id
@GeneratedValue(strategy = IDENTITY)
private Long id;
/**
* <p>
* The name of the event.
* </p>
*
* <p>
* The name of the event forms it's natural identity and cannot be shared between events.
* </p>
*
* <p>
* The name must not be null and must be one or more characters, the Bean Validation constrain <code>@NotEmpty</code>
* enforces this.
* </p>
*/
@Column(unique = true)
@NotEmpty
private String name;
/**
* The address of the venue
*/
private Address address = new Address();
/**
* A description of the venue
*/
private String description;
/**
* <p>
* A set of sections in the venue
* </p>
*
* <p>
* The <code>@OneToMany<code> JPA mapping establishes this relationship. TODO Explain EAGER fetch.
* This relationship is bi-directional (a section knows which venue it is part of), and the <code>mappedBy</code>
* attribute establishes this. We cascade all persistence operations to the set of performances, so, for example if a venue
* is removed, then all of it's sections will also be removed.
* </p>
*/
@OneToMany(cascade = ALL, fetch = EAGER, mappedBy = "venue")
private Set<Section> sections = new HashSet<Section>();
/**
* The capacity of the venue
*/
private int capacity;
/**
* An optional media item to entice punters to the venue. The <code>@ManyToOne</code> establishes the relationship.
*/
@ManyToOne
private MediaItem mediaItem;
/* Boilerplate getters and setters */
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public MediaItem getMediaItem() {
return mediaItem;
}
public void setMediaItem(MediaItem description) {
this.mediaItem = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Set<Section> getSections() {
return sections;
}
public void setSections(Set<Section> sections) {
this.sections = sections;
}
public int getCapacity() {
return capacity;
}
public void setCapacity(int capacity) {
this.capacity = capacity;
}
/* toString(), equals() and hashCode() for Venue, using the natural identity of the object */
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Venue venue = (Venue) o;
if (address != null ? !address.equals(venue.address) : venue.address != null)
return false;
if (name != null ? !name.equals(venue.name) : venue.name != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (address != null ? address.hashCode() : 0);
return result;
}
@Override
public String toString() {
return name;
}
}
| jim-minter/ose3-demos | git/monster/src/main/java/org/jboss/examples/ticketmonster/model/Venue.java | Java | apache-2.0 | 4,603 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import junit.framework.TestCase;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* Tests for the MessageDigestHashFunction.
*
* @author Kurt Alfred Kluever
*/
public class MessageDigestHashFunctionTest extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
sha1.putInt(42);
fail();
} catch (IllegalStateException expected) {
}
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
HashCode unused = sha1.hash();
fail();
} catch (IllegalStateException expected) {
}
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)),
ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}
| aiyanbo/guava | guava-tests/test/com/google/common/hash/MessageDigestHashFunctionTest.java | Java | apache-2.0 | 4,293 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.master;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class TransportMasterNodeActionTests extends ESTestCase {
private static ThreadPool threadPool;
private ClusterService clusterService;
private TransportService transportService;
private CapturingTransport transport;
private DiscoveryNode localNode;
private DiscoveryNode remoteNode;
private DiscoveryNode[] allNodes;
@BeforeClass
public static void beforeClass() {
threadPool = new TestThreadPool("TransportMasterNodeActionTests");
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
allNodes = new DiscoveryNode[]{localNode, remoteNode};
}
@After
public void tearDown() throws Exception {
super.tearDown();
clusterService.close();
transportService.close();
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
void assertListenerThrows(String msg, ActionFuture<?> listener, Class<?> klass) throws InterruptedException {
try {
listener.get();
fail(msg);
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(klass));
}
}
public static class Request extends MasterNodeRequest<Request> {
@Override
public ActionRequestValidationException validate() {
return null;
}
}
class Response extends ActionResponse {}
class Action extends TransportMasterNodeAction<Request, Response> {
Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, actionName, transportService, clusterService, threadPool,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new);
}
@Override
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
// remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER
super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener));
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
listener.onResponse(new Response()); // default implementation, overridden in specific tests
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return null; // default implementation, overridden in specific tests
}
}
public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException {
final boolean masterOperationFailure = randomBoolean();
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Throwable exception = new Throwable();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
if (masterOperationFailure) {
listener.onFailure(exception);
} else {
listener.onResponse(response);
}
}
}.execute(request, listener);
assertTrue(listener.isDone());
if (masterOperationFailure) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), equalTo(exception));
}
} else {
assertThat(listener.get(), equalTo(response));
}
}
public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException {
final boolean retryableBlock = randomBoolean();
final boolean unblockBeforeTimeout = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(unblockBeforeTimeout ? 60 : 0));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlock block = new ClusterBlock(1, "", retryableBlock, true,
randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.builder().addGlobalBlock(block)).build();
setState(clusterService, stateWithBlock);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
Set<ClusterBlock> blocks = state.blocks().global();
return blocks.isEmpty() ? null : new ClusterBlockException(blocks);
}
}.execute(request, listener);
if (retryableBlock && unblockBeforeTimeout) {
assertFalse(listener.isDone());
setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build());
assertTrue(listener.isDone());
listener.get();
return;
}
assertTrue(listener.isDone());
if (retryableBlock) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(MasterNotDiscoveredException.class));
assertThat(ex.getCause().getCause(), instanceOf(ClusterBlockException.class));
}
} else {
assertListenerThrows("ClusterBlockException should be thrown", listener, ClusterBlockException.class);
}
}
public void testForceLocalOperation() throws ExecutionException, InterruptedException {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected boolean localExecute(Request request) {
return true;
}
}.execute(request, listener);
assertTrue(listener.isDone());
listener.get();
}
public void testMasterNotAvailable() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertTrue(listener.isDone());
assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class);
}
public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
}
public void testDelegateToMaster() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
Response response = new Response();
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException {
boolean failsWithConnectTransportException = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
if (failsWithConnectTransportException) {
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(remoteNode, "Fake error"));
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
} else {
Throwable t = new Throwable();
transport.handleRemoteError(capturedRequest.requestId, t);
assertTrue(listener.isDone());
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause().getCause(), equalTo(t));
}
}
}
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
Throwable failure = randomBoolean()
? new Discovery.FailedToCommitClusterStateException("Fake error")
: new NotMasterException("Fake error");
listener.onFailure(failure);
}
}.execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
}
| palecur/elasticsearch | core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java | Java | apache-2.0 | 16,875 |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui.components;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.wm.IdeGlassPane;
import com.intellij.ui.IdeBorderFactory;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ReflectionUtil;
import com.intellij.util.ui.ButtonlessScrollBarUI;
import com.intellij.util.ui.JBInsets;
import com.intellij.util.ui.RegionPainter;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.border.Border;
import javax.swing.border.LineBorder;
import javax.swing.plaf.ScrollBarUI;
import javax.swing.plaf.ScrollPaneUI;
import javax.swing.plaf.UIResource;
import javax.swing.plaf.basic.BasicScrollBarUI;
import javax.swing.plaf.basic.BasicScrollPaneUI;
import java.awt.*;
import java.awt.event.InputEvent;
import java.awt.event.MouseEvent;
import java.awt.event.MouseWheelEvent;
import java.awt.event.MouseWheelListener;
import java.lang.reflect.Field;
public class JBScrollPane extends JScrollPane {
/**
* This key is used to specify which colors should use the scroll bars on the pane.
* If a client property is set to {@code true} the bar's brightness
* will be modified according to the view's background.
*
* @see UIUtil#putClientProperty
* @see UIUtil#isUnderDarcula
*/
public static final Key<Boolean> BRIGHTNESS_FROM_VIEW = Key.create("JB_SCROLL_PANE_BRIGHTNESS_FROM_VIEW");
@Deprecated
public static final RegionPainter<Float> THUMB_PAINTER = ScrollPainter.EditorThumb.DEFAULT;
@Deprecated
public static final RegionPainter<Float> THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.DARCULA;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_PAINTER = ScrollPainter.EditorThumb.Mac.DEFAULT;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.Mac.DARCULA;
private static final Logger LOG = Logger.getInstance(JBScrollPane.class);
private int myViewportBorderWidth = -1;
private boolean myHasOverlayScrollbars;
private volatile boolean myBackgroundRequested; // avoid cyclic references
public JBScrollPane(int viewportWidth) {
init(false);
myViewportBorderWidth = viewportWidth;
updateViewportBorder();
}
public JBScrollPane() {
init();
}
public JBScrollPane(Component view) {
super(view);
init();
}
public JBScrollPane(int vsbPolicy, int hsbPolicy) {
super(vsbPolicy, hsbPolicy);
init();
}
public JBScrollPane(Component view, int vsbPolicy, int hsbPolicy) {
super(view, vsbPolicy, hsbPolicy);
init();
}
@Override
public Color getBackground() {
Color color = super.getBackground();
if (!myBackgroundRequested && EventQueue.isDispatchThread() && Registry.is("ide.scroll.background.auto")) {
if (!isBackgroundSet() || color instanceof UIResource) {
Component child = getViewport();
if (child != null) {
try {
myBackgroundRequested = true;
return child.getBackground();
}
finally {
myBackgroundRequested = false;
}
}
}
}
return color;
}
static Color getViewBackground(JScrollPane pane) {
if (pane == null) return null;
JViewport viewport = pane.getViewport();
if (viewport == null) return null;
Component view = viewport.getView();
if (view == null) return null;
return view.getBackground();
}
public static JScrollPane findScrollPane(Component c) {
if (c == null) return null;
if (!(c instanceof JViewport)) {
Container vp = c.getParent();
if (vp instanceof JViewport) c = vp;
}
c = c.getParent();
if (!(c instanceof JScrollPane)) return null;
return (JScrollPane)c;
}
private void init() {
init(true);
}
private void init(boolean setupCorners) {
setLayout(Registry.is("ide.scroll.new.layout") ? new Layout() : new ScrollPaneLayout());
if (setupCorners) {
setupCorners();
}
}
protected void setupCorners() {
setBorder(IdeBorderFactory.createBorder());
setCorner(UPPER_RIGHT_CORNER, new Corner(UPPER_RIGHT_CORNER));
setCorner(UPPER_LEFT_CORNER, new Corner(UPPER_LEFT_CORNER));
setCorner(LOWER_RIGHT_CORNER, new Corner(LOWER_RIGHT_CORNER));
setCorner(LOWER_LEFT_CORNER, new Corner(LOWER_LEFT_CORNER));
}
@Override
public void setUI(ScrollPaneUI ui) {
super.setUI(ui);
updateViewportBorder();
if (ui instanceof BasicScrollPaneUI) {
try {
Field field = BasicScrollPaneUI.class.getDeclaredField("mouseScrollListener");
field.setAccessible(true);
Object value = field.get(ui);
if (value instanceof MouseWheelListener) {
MouseWheelListener oldListener = (MouseWheelListener)value;
MouseWheelListener newListener = event -> {
if (isScrollEvent(event)) {
Object source = event.getSource();
if (source instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)source;
if (pane.isWheelScrollingEnabled()) {
JScrollBar bar = event.isShiftDown() ? pane.getHorizontalScrollBar() : pane.getVerticalScrollBar();
if (bar != null && bar.isVisible()) oldListener.mouseWheelMoved(event);
}
}
}
};
field.set(ui, newListener);
// replace listener if field updated successfully
removeMouseWheelListener(oldListener);
addMouseWheelListener(newListener);
}
}
catch (Exception exception) {
LOG.warn(exception);
}
}
}
@Override
public boolean isOptimizedDrawingEnabled() {
if (getLayout() instanceof Layout) {
return isOptimizedDrawingEnabledFor(getVerticalScrollBar()) &&
isOptimizedDrawingEnabledFor(getHorizontalScrollBar());
}
return !myHasOverlayScrollbars;
}
/**
* Returns {@code false} for visible translucent scroll bars, or {@code true} otherwise.
* It is needed to repaint translucent scroll bars on viewport repainting.
*/
private static boolean isOptimizedDrawingEnabledFor(JScrollBar bar) {
return bar == null || bar.isOpaque() || !bar.isVisible();
}
private void updateViewportBorder() {
if (getViewportBorder() instanceof ViewportBorder) {
setViewportBorder(new ViewportBorder(myViewportBorderWidth >= 0 ? myViewportBorderWidth : 1));
}
}
public static ViewportBorder createIndentBorder() {
return new ViewportBorder(2);
}
@Override
public JScrollBar createVerticalScrollBar() {
return new MyScrollBar(Adjustable.VERTICAL);
}
@NotNull
@Override
public JScrollBar createHorizontalScrollBar() {
return new MyScrollBar(Adjustable.HORIZONTAL);
}
@Override
protected JViewport createViewport() {
return new JBViewport();
}
@SuppressWarnings("deprecation")
@Override
public void layout() {
LayoutManager layout = getLayout();
ScrollPaneLayout scrollLayout = layout instanceof ScrollPaneLayout ? (ScrollPaneLayout)layout : null;
// Now we let JScrollPane layout everything as necessary
super.layout();
if (layout instanceof Layout) return;
if (scrollLayout != null) {
// Now it's time to jump in and expand the viewport so it fits the whole area
// (taking into consideration corners, headers and other stuff).
myHasOverlayScrollbars = relayoutScrollbars(
this, scrollLayout,
myHasOverlayScrollbars // If last time we did relayouting, we should restore it back.
);
}
else {
myHasOverlayScrollbars = false;
}
}
private boolean relayoutScrollbars(@NotNull JComponent container, @NotNull ScrollPaneLayout layout, boolean forceRelayout) {
JViewport viewport = layout.getViewport();
if (viewport == null) return false;
JScrollBar vsb = layout.getVerticalScrollBar();
JScrollBar hsb = layout.getHorizontalScrollBar();
JViewport colHead = layout.getColumnHeader();
JViewport rowHead = layout.getRowHeader();
Rectangle viewportBounds = viewport.getBounds();
boolean extendViewportUnderVScrollbar = vsb != null && shouldExtendViewportUnderScrollbar(vsb);
boolean extendViewportUnderHScrollbar = hsb != null && shouldExtendViewportUnderScrollbar(hsb);
boolean hasOverlayScrollbars = extendViewportUnderVScrollbar || extendViewportUnderHScrollbar;
if (!hasOverlayScrollbars && !forceRelayout) return false;
container.setComponentZOrder(viewport, container.getComponentCount() - 1);
if (vsb != null) container.setComponentZOrder(vsb, 0);
if (hsb != null) container.setComponentZOrder(hsb, 0);
if (extendViewportUnderVScrollbar) {
int x2 = Math.max(vsb.getX() + vsb.getWidth(), viewportBounds.x + viewportBounds.width);
viewportBounds.x = Math.min(viewportBounds.x, vsb.getX());
viewportBounds.width = x2 - viewportBounds.x;
}
if (extendViewportUnderHScrollbar) {
int y2 = Math.max(hsb.getY() + hsb.getHeight(), viewportBounds.y + viewportBounds.height);
viewportBounds.y = Math.min(viewportBounds.y, hsb.getY());
viewportBounds.height = y2 - viewportBounds.y;
}
if (extendViewportUnderVScrollbar) {
if (hsb != null) {
Rectangle scrollbarBounds = hsb.getBounds();
scrollbarBounds.width = viewportBounds.x + viewportBounds.width - scrollbarBounds.x;
hsb.setBounds(scrollbarBounds);
}
if (colHead != null) {
Rectangle headerBounds = colHead.getBounds();
headerBounds.width = viewportBounds.width;
colHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(UPPER_RIGHT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
if (extendViewportUnderHScrollbar) {
if (vsb != null) {
Rectangle scrollbarBounds = vsb.getBounds();
scrollbarBounds.height = viewportBounds.y + viewportBounds.height - scrollbarBounds.y;
vsb.setBounds(scrollbarBounds);
}
if (rowHead != null) {
Rectangle headerBounds = rowHead.getBounds();
headerBounds.height = viewportBounds.height;
rowHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(LOWER_LEFT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
viewport.setBounds(viewportBounds);
return hasOverlayScrollbars;
}
private boolean shouldExtendViewportUnderScrollbar(@Nullable JScrollBar scrollbar) {
if (scrollbar == null || !scrollbar.isVisible()) return false;
return isOverlaidScrollbar(scrollbar);
}
protected boolean isOverlaidScrollbar(@Nullable JScrollBar scrollbar) {
if (!ButtonlessScrollBarUI.isMacOverlayScrollbarSupported()) return false;
ScrollBarUI vsbUI = scrollbar == null ? null : scrollbar.getUI();
return vsbUI instanceof ButtonlessScrollBarUI && !((ButtonlessScrollBarUI)vsbUI).alwaysShowTrack();
}
private static void hideFromView(Component component) {
if (component == null) return;
component.setBounds(-10, -10, 1, 1);
}
private class MyScrollBar extends ScrollBar implements IdeGlassPane.TopComponent {
public MyScrollBar(int orientation) {
super(orientation);
}
@Override
public void updateUI() {
ScrollBarUI ui = getUI();
if (ui instanceof DefaultScrollBarUI) return;
setUI(JBScrollBar.createUI(this));
}
@Override
public boolean canBePreprocessed(MouseEvent e) {
return JBScrollPane.canBePreprocessed(e, this);
}
}
public static boolean canBePreprocessed(MouseEvent e, JScrollBar bar) {
if (e.getID() == MouseEvent.MOUSE_MOVED || e.getID() == MouseEvent.MOUSE_PRESSED) {
ScrollBarUI ui = bar.getUI();
if (ui instanceof BasicScrollBarUI) {
BasicScrollBarUI bui = (BasicScrollBarUI)ui;
try {
Rectangle rect = (Rectangle)ReflectionUtil.getDeclaredMethod(BasicScrollBarUI.class, "getThumbBounds", ArrayUtil.EMPTY_CLASS_ARRAY).invoke(bui);
Point point = SwingUtilities.convertPoint(e.getComponent(), e.getX(), e.getY(), bar);
return !rect.contains(point);
}
catch (Exception e1) {
return true;
}
}
else if (ui instanceof DefaultScrollBarUI) {
DefaultScrollBarUI dui = (DefaultScrollBarUI)ui;
Point point = e.getLocationOnScreen();
SwingUtilities.convertPointFromScreen(point, bar);
return !dui.isThumbContains(point.x, point.y);
}
}
return true;
}
private static class Corner extends JPanel {
private final String myPos;
public Corner(String pos) {
myPos = pos;
ScrollColorProducer.setBackground(this);
ScrollColorProducer.setForeground(this);
}
@Override
protected void paintComponent(Graphics g) {
g.setColor(getBackground());
g.fillRect(0, 0, getWidth(), getHeight());
if (SystemInfo.isMac || !Registry.is("ide.scroll.track.border.paint")) return;
g.setColor(getForeground());
int x2 = getWidth() - 1;
int y2 = getHeight() - 1;
if (myPos == UPPER_LEFT_CORNER || myPos == UPPER_RIGHT_CORNER) {
g.drawLine(0, y2, x2, y2);
}
if (myPos == LOWER_LEFT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, x2, 0);
}
if (myPos == UPPER_LEFT_CORNER || myPos == LOWER_LEFT_CORNER) {
g.drawLine(x2, 0, x2, y2);
}
if (myPos == UPPER_RIGHT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, 0, y2);
}
}
}
private static class ViewportBorder extends LineBorder {
public ViewportBorder(int thickness) {
super(null, thickness);
}
@Override
public void paintBorder(Component c, Graphics g, int x, int y, int width, int height) {
updateColor(c);
super.paintBorder(c, g, x, y, width, height);
}
private void updateColor(Component c) {
if (!(c instanceof JScrollPane)) return;
lineColor = getViewBackground((JScrollPane)c);
}
}
/**
* These client properties modify a scroll pane layout.
* Use the class object as a property key.
*
* @see #putClientProperty(Object, Object)
*/
public enum Flip {
NONE, VERTICAL, HORIZONTAL, BOTH
}
/**
* These client properties show a component position on a scroll pane.
* It is set by internal layout manager of the scroll pane.
*/
public enum Alignment {
TOP, LEFT, RIGHT, BOTTOM;
public static Alignment get(JComponent component) {
if (component != null) {
Object property = component.getClientProperty(Alignment.class);
if (property instanceof Alignment) return (Alignment)property;
Container parent = component.getParent();
if (parent instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)parent;
if (component == pane.getColumnHeader()) {
return TOP;
}
if (component == pane.getHorizontalScrollBar()) {
return BOTTOM;
}
boolean ltr = pane.getComponentOrientation().isLeftToRight();
if (component == pane.getVerticalScrollBar()) {
return ltr ? RIGHT : LEFT;
}
if (component == pane.getRowHeader()) {
return ltr ? LEFT : RIGHT;
}
}
// assume alignment for a scroll bar,
// which is not contained in a scroll pane
if (component instanceof JScrollBar) {
JScrollBar bar = (JScrollBar)component;
switch (bar.getOrientation()) {
case Adjustable.HORIZONTAL:
return BOTTOM;
case Adjustable.VERTICAL:
return bar.getComponentOrientation().isLeftToRight()
? RIGHT
: LEFT;
}
}
}
return null;
}
}
/**
* ScrollPaneLayout implementation that supports
* ScrollBar flipping and non-opaque ScrollBars.
*/
private static class Layout extends ScrollPaneLayout {
private static final Insets EMPTY_INSETS = new Insets(0, 0, 0, 0);
@Override
public void layoutContainer(Container parent) {
JScrollPane pane = (JScrollPane)parent;
// Calculate inner bounds of the scroll pane
Rectangle bounds = new Rectangle(pane.getWidth(), pane.getHeight());
JBInsets.removeFrom(bounds, pane.getInsets());
// Determine positions of scroll bars on the scroll pane
Object property = pane.getClientProperty(Flip.class);
Flip flip = property instanceof Flip ? (Flip)property : Flip.NONE;
boolean hsbOnTop = flip == Flip.BOTH || flip == Flip.VERTICAL;
boolean vsbOnLeft = pane.getComponentOrientation().isLeftToRight()
? flip == Flip.BOTH || flip == Flip.HORIZONTAL
: flip == Flip.NONE || flip == Flip.VERTICAL;
// If there's a visible row header remove the space it needs.
// The row header is treated as if it were fixed width, arbitrary height.
Rectangle rowHeadBounds = new Rectangle(bounds.x, 0, 0, 0);
if (rowHead != null && rowHead.isVisible()) {
rowHeadBounds.width = min(bounds.width, rowHead.getPreferredSize().width);
bounds.width -= rowHeadBounds.width;
if (vsbOnLeft) {
rowHeadBounds.x += bounds.width;
}
else {
bounds.x += rowHeadBounds.width;
}
}
// If there's a visible column header remove the space it needs.
// The column header is treated as if it were fixed height, arbitrary width.
Rectangle colHeadBounds = new Rectangle(0, bounds.y, 0, 0);
if (colHead != null && colHead.isVisible()) {
colHeadBounds.height = min(bounds.height, colHead.getPreferredSize().height);
bounds.height -= colHeadBounds.height;
if (hsbOnTop) {
colHeadBounds.y += bounds.height;
}
else {
bounds.y += colHeadBounds.height;
}
}
// If there's a JScrollPane.viewportBorder, remove the space it occupies
Border border = pane.getViewportBorder();
Insets insets = border == null ? null : border.getBorderInsets(parent);
JBInsets.removeFrom(bounds, insets);
if (insets == null) insets = EMPTY_INSETS;
// At this point:
// colHeadBounds is correct except for its width and x
// rowHeadBounds is correct except for its height and y
// bounds - the space available for the viewport and scroll bars
// Once we're through computing the dimensions of these three parts
// we can go back and set the bounds for the corners and the dimensions of
// colHeadBounds.x, colHeadBounds.width, rowHeadBounds.y, rowHeadBounds.height.
boolean isEmpty = bounds.width < 0 || bounds.height < 0;
Component view = viewport == null ? null : viewport.getView();
Dimension viewPreferredSize = view == null ? new Dimension() : view.getPreferredSize();
if (view instanceof JComponent) JBViewport.fixPreferredSize(viewPreferredSize, (JComponent)view, vsb, hsb);
Dimension viewportExtentSize = viewport == null ? new Dimension() : viewport.toViewCoordinates(bounds.getSize());
// If the view is tracking the viewports width we don't bother with a horizontal scrollbar.
// If the view is tracking the viewports height we don't bother with a vertical scrollbar.
Scrollable scrollable = null;
boolean viewTracksViewportWidth = false;
boolean viewTracksViewportHeight = false;
// Don't bother checking the Scrollable methods if there is no room for the viewport,
// we aren't going to show any scroll bars in this case anyway.
if (!isEmpty && view instanceof Scrollable) {
scrollable = (Scrollable)view;
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
}
// If there's a vertical scroll bar and we need one, allocate space for it.
// A vertical scroll bar is considered to be fixed width, arbitrary height.
boolean vsbOpaque = false;
boolean vsbNeeded = false;
int vsbPolicy = pane.getVerticalScrollBarPolicy();
if (!isEmpty && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
vsbNeeded = vsbPolicy == VERTICAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
}
Rectangle vsbBounds = new Rectangle(0, bounds.y - insets.top, 0, 0);
if (vsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) vsb.setOpaque(true);
vsbOpaque = vsb.isOpaque();
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
if (vsbOpaque && viewport != null) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
}
// If there's a horizontal scroll bar and we need one, allocate space for it.
// A horizontal scroll bar is considered to be fixed height, arbitrary width.
boolean hsbOpaque = false;
boolean hsbNeeded = false;
int hsbPolicy = pane.getHorizontalScrollBarPolicy();
if (!isEmpty && hsbPolicy != HORIZONTAL_SCROLLBAR_NEVER) {
hsbNeeded = hsbPolicy == HORIZONTAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
}
Rectangle hsbBounds = new Rectangle(bounds.x - insets.left, 0, 0, 0);
if (hsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) hsb.setOpaque(true);
hsbOpaque = hsb.isOpaque();
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
if (hsbOpaque && viewport != null) {
// If we added the horizontal scrollbar and reduced the vertical space
// we may have to add the vertical scrollbar, if that hasn't been done so already.
if (vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
}
// Set the size of the viewport first, and then recheck the Scrollable methods.
// Some components base their return values for the Scrollable methods on the size of the viewport,
// so that if we don't ask after resetting the bounds we may have gotten the wrong answer.
if (viewport != null) {
viewport.setBounds(bounds);
if (scrollable != null && hsbOpaque && vsbOpaque) {
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
boolean vsbNeededOld = vsbNeeded;
if (vsb != null && vsbPolicy == VERTICAL_SCROLLBAR_AS_NEEDED) {
boolean vsbNeededNew = !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded != vsbNeededNew) {
vsbNeeded = vsbNeededNew;
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
else if (vsbOpaque) {
bounds.width += vsbBounds.width;
}
if (vsbOpaque) viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
boolean hsbNeededOld = hsbNeeded;
if (hsb != null && hsbPolicy == HORIZONTAL_SCROLLBAR_AS_NEEDED) {
boolean hsbNeededNew = !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
if (hsbNeeded != hsbNeededNew) {
hsbNeeded = hsbNeededNew;
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
}
else if (hsbOpaque) {
bounds.height += hsbBounds.height;
}
if (hsbOpaque && vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
if (hsbNeededOld != hsbNeeded || vsbNeededOld != vsbNeeded) {
viewport.setBounds(bounds);
// You could argue that we should recheck the Scrollable methods again until they stop changing,
// but they might never stop changing, so we stop here and don't do any additional checks.
}
}
}
// Set the bounds of the row header.
rowHeadBounds.y = bounds.y - insets.top;
rowHeadBounds.height = bounds.height + insets.top + insets.bottom;
if (rowHead != null) {
rowHead.setBounds(rowHeadBounds);
rowHead.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.RIGHT : Alignment.LEFT);
}
// Set the bounds of the column header.
colHeadBounds.x = bounds.x - insets.left;
colHeadBounds.width = bounds.width + insets.left + insets.right;
if (colHead != null) {
colHead.setBounds(colHeadBounds);
colHead.putClientProperty(Alignment.class, hsbOnTop ? Alignment.BOTTOM : Alignment.TOP);
}
// Calculate overlaps for translucent scroll bars
int overlapWidth = 0;
int overlapHeight = 0;
if (vsbNeeded && !vsbOpaque && hsbNeeded && !hsbOpaque) {
overlapWidth = vsbBounds.width; // shrink horizontally
//overlapHeight = hsbBounds.height; // shrink vertically
}
// Set the bounds of the vertical scroll bar.
vsbBounds.y = bounds.y - insets.top;
vsbBounds.height = bounds.height + insets.top + insets.bottom;
if (vsb != null) {
vsb.setVisible(vsbNeeded);
if (vsbNeeded) {
if (vsbOpaque && colHead != null && UIManager.getBoolean("ScrollPane.fillUpperCorner")) {
if ((vsbOnLeft ? upperLeft : upperRight) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the vertical scrollbar to fill the upper corner near the column header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!hsbOnTop) vsbBounds.y -= colHeadBounds.height;
vsbBounds.height += colHeadBounds.height;
}
}
int overlapY = !hsbOnTop ? 0 : overlapHeight;
vsb.setBounds(vsbBounds.x, vsbBounds.y + overlapY, vsbBounds.width, vsbBounds.height - overlapHeight);
vsb.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.LEFT : Alignment.RIGHT);
}
// Modify the bounds of the translucent scroll bar.
if (!vsbOpaque) {
if (!vsbOnLeft) vsbBounds.x += vsbBounds.width;
vsbBounds.width = 0;
}
}
// Set the bounds of the horizontal scroll bar.
hsbBounds.x = bounds.x - insets.left;
hsbBounds.width = bounds.width + insets.left + insets.right;
if (hsb != null) {
hsb.setVisible(hsbNeeded);
if (hsbNeeded) {
if (hsbOpaque && rowHead != null && UIManager.getBoolean("ScrollPane.fillLowerCorner")) {
if ((vsbOnLeft ? lowerRight : lowerLeft) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the horizontal scrollbar to fill the lower corner near the row header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!vsbOnLeft) hsbBounds.x -= rowHeadBounds.width;
hsbBounds.width += rowHeadBounds.width;
}
}
int overlapX = !vsbOnLeft ? 0 : overlapWidth;
hsb.setBounds(hsbBounds.x + overlapX, hsbBounds.y, hsbBounds.width - overlapWidth, hsbBounds.height);
hsb.putClientProperty(Alignment.class, hsbOnTop ? Alignment.TOP : Alignment.BOTTOM);
}
// Modify the bounds of the translucent scroll bar.
if (!hsbOpaque) {
if (!hsbOnTop) hsbBounds.y += hsbBounds.height;
hsbBounds.height = 0;
}
}
// Set the bounds of the corners.
if (lowerLeft != null) {
lowerLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (lowerRight != null) {
lowerRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (upperLeft != null) {
upperLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (upperRight != null) {
upperRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (!vsbOpaque && vsbNeeded || !hsbOpaque && hsbNeeded) {
fixComponentZOrder(vsb, 0);
fixComponentZOrder(viewport, -1);
}
}
private static void fixComponentZOrder(Component component, int index) {
if (component != null) {
Container parent = component.getParent();
synchronized (parent.getTreeLock()) {
if (index < 0) index += parent.getComponentCount();
parent.setComponentZOrder(component, index);
}
}
}
private void adjustForVSB(Rectangle bounds, Insets insets, Rectangle vsbBounds, boolean vsbOpaque, boolean vsbOnLeft) {
vsbBounds.width = !vsb.isEnabled() ? 0 : min(bounds.width, vsb.getPreferredSize().width);
if (vsbOnLeft) {
vsbBounds.x = bounds.x - insets.left/* + vsbBounds.width*/;
if (vsbOpaque) bounds.x += vsbBounds.width;
}
else {
vsbBounds.x = bounds.x + bounds.width + insets.right - vsbBounds.width;
}
if (vsbOpaque) bounds.width -= vsbBounds.width;
}
private void adjustForHSB(Rectangle bounds, Insets insets, Rectangle hsbBounds, boolean hsbOpaque, boolean hsbOnTop) {
hsbBounds.height = !hsb.isEnabled() ? 0 : min(bounds.height, hsb.getPreferredSize().height);
if (hsbOnTop) {
hsbBounds.y = bounds.y - insets.top/* + hsbBounds.height*/;
if (hsbOpaque) bounds.y += hsbBounds.height;
}
else {
hsbBounds.y = bounds.y + bounds.height + insets.bottom - hsbBounds.height;
}
if (hsbOpaque) bounds.height -= hsbBounds.height;
}
private static int min(int one, int two) {
return Math.max(0, Math.min(one, two));
}
}
/**
* Indicates whether the specified event is not consumed and does not have unexpected modifiers.
*
* @param event a mouse wheel event to check for validity
* @return {@code true} if the specified event is valid, {@code false} otherwise
*/
public static boolean isScrollEvent(@NotNull MouseWheelEvent event) {
if (event.isConsumed()) return false; // event should not be consumed already
if (event.getWheelRotation() == 0) return false; // any rotation expected (forward or backward)
return 0 == (SCROLL_MODIFIERS & event.getModifiers());
}
private static final int SCROLL_MODIFIERS = // event modifiers allowed during scrolling
~InputEvent.SHIFT_MASK & ~InputEvent.SHIFT_DOWN_MASK & // for horizontal scrolling
~InputEvent.BUTTON1_MASK & ~InputEvent.BUTTON1_DOWN_MASK; // for selection
}
| hurricup/intellij-community | platform/platform-api/src/com/intellij/ui/components/JBScrollPane.java | Java | apache-2.0 | 33,437 |
package cn.xishan.oftenporter.porter.core.init;
import cn.xishan.oftenporter.porter.core.advanced.IConfigData;
import com.alibaba.fastjson.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Created by https://github.com/CLovinr on 2018-12-21.
*/
public class DealSharpProperties
{
private static final Logger LOGGER = LoggerFactory.getLogger(DealSharpProperties.class);
private static class PropOne
{
private String propKey,
originValue;
private int startIndex, endIndex;
public PropOne(String propKey, String originValue, int startIndex, int endIndex)
{
this.propKey = propKey;
this.originValue = originValue;
this.startIndex = startIndex;
this.endIndex = endIndex;
}
public String getPropKey()
{
return propKey;
}
public String replace(String propValue)
{
String str = originValue.substring(0, startIndex) + propValue + originValue.substring(endIndex);
return str;
}
}
/**
* 替换所有的#{propertyName}.
*
* @param string
* @param properties
* @param forEmpty 如果不为null,则用于替换所有不存在的属性。
* @return
*/
public static String replaceSharpProperties(String string, Map<String, ?> properties, String forEmpty)
{
for (Map.Entry<String, ?> entry : properties.entrySet())
{
if (string.contains("#{" + entry.getKey() + "}"))
{
String rs;
// if (entry.getValue() instanceof Map || entry.getValue() instanceof Collection)
// {
// rs = JSON.toJSONString(entry.getValue());
// } else
// {
// rs = String.valueOf(entry.getValue());
// }
if (entry.getValue() instanceof CharSequence)
{
rs = String.valueOf(entry.getValue());
} else if (entry.getValue() == null)
{
rs = "";
} else
{
rs = JSON.toJSONString(entry.getValue());
}
string = string.replace("#{" + entry.getKey() + "}", rs);
}
}
if (forEmpty != null)
{
string = string.replaceAll("#\\{[^{}]+\\}", forEmpty);//去掉未设置的
}
return string;
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap)
{
dealSharpProperties(srcMap, propertiesMap, false);
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
* @param keepNotFound 是否保留未找到的变量。
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap, boolean keepNotFound)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = srcMap.keySet();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = srcMap.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of \"" + properName + "\" with value \"" + valueString + "\",prop name eq value attr name");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr = null;
if (propertiesMap.containsKey(propOne.getPropKey()))
{
replaceStr = String.valueOf(propertiesMap.get(propOne.getPropKey()));
} else
{
if (keepNotFound)
{
containsVar.remove(properName);
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
}
if (replaceStr != null)
{
String newValue = propOne.replace(replaceStr);
srcMap.put(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
}
hasSet = true;
}
}
isFirst = false;
}
}
static void dealProperties(IConfigData configData)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = configData.propertyNames();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = configData.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of " + properName + " with value \"" + valueString + "\"");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr;
if (configData.contains(propOne.getPropKey()))
{
replaceStr = configData.getString(propOne.getPropKey());
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
String newValue = propOne.replace(replaceStr);
configData.set(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
hasSet = true;
}
}
isFirst = false;
}
}
private static final Pattern PROPERTIES_PATTERN = Pattern.compile("#\\{([^{}]+)}");
private static PropOne getPropertiesKey(String value)
{
Matcher matcher = PROPERTIES_PATTERN.matcher(value);
if (matcher.find())
{
PropOne propOne = new PropOne(matcher.group(1).trim(), value, matcher.start(), matcher.end());
return propOne;
} else
{
return null;
}
}
}
| gzxishan/OftenPorter | Porter-Core/src/main/java/cn/xishan/oftenporter/porter/core/init/DealSharpProperties.java | Java | apache-2.0 | 8,870 |
package dk.dbc.kafka.dispatch.sources;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
/**
* Source for reading InputStreams line-by-line
* @author Adam Tulinius
*/
public class InputStreamSource extends Source<String> {
private BufferedReader reader;
public InputStreamSource(InputStream inputStream) {
this.reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
}
@Override
public Optional<String> next() throws IOException {
String line = reader.readLine();
if (line != null) {
return Optional.of(line);
} else {
return Optional.empty();
}
}
}
| DBCDK/kafka-dispatch | src/main/java/dk/dbc/kafka/dispatch/sources/InputStreamSource.java | Java | apache-2.0 | 818 |
package com.planet_ink.coffee_mud.Commands;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2010 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
public class Go extends StdCommand
{
public Go(){}
private String[] access={"GO","WALK"};
public String[] getAccessWords(){return access;}
public int energyExpenseFactor(){return 1;}
public void ridersBehind(Vector riders,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
if(riders!=null)
for(int r=0;r<riders.size();r++)
{
Rider rider=(Rider)riders.elementAt(r);
if(rider instanceof MOB)
{
MOB rMOB=(MOB)rider;
if((rMOB.location()==sourceRoom)
||(rMOB.location()==destRoom))
{
boolean fallOff=false;
if(rMOB.location()==sourceRoom)
{
if(rMOB.riding()!=null)
rMOB.tell("You ride "+rMOB.riding().name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(rMOB,directionCode,flee,false,true,false))
fallOff=true;
}
if(fallOff)
{
if(rMOB.riding()!=null)
rMOB.tell("You fall off "+rMOB.riding().name()+"!");
rMOB.setRiding(null);
}
}
else
rMOB.setRiding(null);
}
else
if(rider instanceof Item)
{
Item rItem=(Item)rider;
if((rItem.owner()==sourceRoom)
||(rItem.owner()==destRoom))
destRoom.bringItemHere(rItem,-1,false);
else
rItem.setRiding(null);
}
}
}
public static Vector addRiders(Rider theRider,
Rideable riding,
Vector riders)
{
if((riding!=null)&&(riding.mobileRideBasis()))
for(int r=0;r<riding.numRiders();r++)
{
Rider rider=riding.fetchRider(r);
if((rider!=null)
&&(rider!=theRider)
&&(!riders.contains(rider)))
{
riders.addElement(rider);
if(rider instanceof Rideable)
addRiders(theRider,(Rideable)rider,riders);
}
}
return riders;
}
public Vector ridersAhead(Rider theRider,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
Vector riders=new Vector();
Rideable riding=theRider.riding();
Vector rideables=new Vector();
while((riding!=null)&&(riding.mobileRideBasis()))
{
rideables.addElement(riding);
addRiders(theRider,riding,riders);
if((riding instanceof Rider)&&((Rider)riding).riding()!=theRider.riding())
riding=((Rider)riding).riding();
else
riding=null;
}
if(theRider instanceof Rideable)
addRiders(theRider,(Rideable)theRider,riders);
for(int r=riders.size()-1;r>=0;r--)
{
Rider R=(Rider)riders.elementAt(r);
if((R instanceof Rideable)&&(((Rideable)R).numRiders()>0))
{
if(!rideables.contains(R))
rideables.addElement(R);
riders.removeElement(R);
}
}
for(int r=0;r<rideables.size();r++)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((sourceRoom).isContent((Item)riding)))
destRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&((sourceRoom).isInhabitant((MOB)riding)))
{
((MOB)riding).tell("You are ridden "+Directions.getDirectionName(directionCode)+".");
if(!move(((MOB)riding),directionCode,false,false,true,false))
{
if(theRider instanceof MOB)
((MOB)theRider).tell(((MOB)riding).name()+" won't seem to let you go that way.");
r=r-1;
for(;r>=0;r--)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((destRoom).isContent((Item)riding)))
sourceRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&(((MOB)riding).isMonster())
&&((destRoom).isInhabitant((MOB)riding)))
sourceRoom.bringMobHere((MOB)riding,false);
}
return null;
}
}
}
return riders;
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders)
{
return move(mob,directionCode,flee,nolook,noriders,false);
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders,
boolean always)
{
if(directionCode<0) return false;
if(mob==null) return false;
Room thisRoom=mob.location();
if(thisRoom==null) return false;
Room destRoom=thisRoom.getRoomInDir(directionCode);
Exit exit=thisRoom.getExitInDir(directionCode);
if(destRoom==null)
{
mob.tell("You can't go that way.");
return false;
}
Exit opExit=thisRoom.getReverseExit(directionCode);
String directionName=(directionCode==Directions.GATE)&&(exit!=null)?"through "+exit.name():Directions.getDirectionName(directionCode);
String otherDirectionName=(Directions.getOpDirectionCode(directionCode)==Directions.GATE)&&(exit!=null)?exit.name():Directions.getFromDirectionName(Directions.getOpDirectionCode(directionCode));
int generalMask=always?CMMsg.MASK_ALWAYS:0;
int leaveCode=generalMask|CMMsg.MSG_LEAVE;
if(flee)
leaveCode=generalMask|CMMsg.MSG_FLEE;
CMMsg enterMsg=null;
CMMsg leaveMsg=null;
if((mob.riding()!=null)&&(mob.riding().mobileRideBasis()))
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> ride(s) "+mob.riding().name()+" in from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) with "+mob.riding().name()+" "+directionName+".":"<S-NAME> ride(s) "+mob.riding().name()+" "+directionName+"."));
}
else
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_arrives)+" from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) "+directionName+".":"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_leaves)+" "+directionName+"."));
}
boolean gotoAllowed=CMSecurity.isAllowed(mob,destRoom,"GOTO");
if((exit==null)&&(!gotoAllowed))
{
mob.tell("You can't go that way.");
return false;
}
else
if(exit==null)
thisRoom.showHappens(CMMsg.MSG_OK_VISUAL,"The area to the "+directionName+" shimmers and becomes transparent.");
else
if((!exit.okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
else
if(!leaveMsg.target().okMessage(mob,leaveMsg)&&(!gotoAllowed))
return false;
else
if((opExit!=null)&&(!opExit.okMessage(mob,leaveMsg))&&(!gotoAllowed))
return false;
else
if(!enterMsg.target().okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
else
if(!mob.okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
if(mob.riding()!=null)
{
if((!mob.riding().okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
}
else
{
if(!mob.isMonster())
for(int i=0;i<energyExpenseFactor();i++)
mob.curState().expendEnergy(mob,mob.maxState(),true);
if((!flee)&&(!mob.curState().adjMovement(-1,mob.maxState()))&&(!gotoAllowed))
{
mob.tell("You are too tired.");
return false;
}
if((mob.soulMate()==null)&&(mob.playerStats()!=null)&&(mob.riding()==null)&&(mob.location()!=null))
mob.playerStats().adjHygiene(mob.location().pointsPerMove(mob));
}
Vector riders=null;
if(!noriders)
{
riders=ridersAhead(mob,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(riders==null) return false;
}
Vector enterTrailersSoFar=null;
Vector leaveTrailersSoFar=null;
if((leaveMsg.trailerMsgs()!=null)&&(leaveMsg.trailerMsgs().size()>0))
{
leaveTrailersSoFar=new Vector();
leaveTrailersSoFar.addAll(leaveMsg.trailerMsgs());
leaveMsg.trailerMsgs().clear();
}
if((enterMsg.trailerMsgs()!=null)&&(enterMsg.trailerMsgs().size()>0))
{
enterTrailersSoFar=new Vector();
enterTrailersSoFar.addAll(enterMsg.trailerMsgs());
enterMsg.trailerMsgs().clear();
}
if(exit!=null) exit.executeMsg(mob,enterMsg);
if(mob.location()!=null) mob.location().delInhabitant(mob);
((Room)leaveMsg.target()).send(mob,leaveMsg);
if(enterMsg.target()==null)
{
((Room)leaveMsg.target()).bringMobHere(mob,false);
mob.tell("You can't go that way.");
return false;
}
mob.setLocation((Room)enterMsg.target());
((Room)enterMsg.target()).addInhabitant(mob);
((Room)enterMsg.target()).send(mob,enterMsg);
if(opExit!=null) opExit.executeMsg(mob,leaveMsg);
if(!nolook)
{
CMLib.commands().postLook(mob,true);
if((!mob.isMonster())
&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTOWEATHER))
&&(((Room)enterMsg.target())!=null)
&&((thisRoom.domainType()&Room.INDOORS)>0)
&&((((Room)enterMsg.target()).domainType()&Room.INDOORS)==0)
&&(((Room)enterMsg.target()).getArea().getClimateObj().weatherType(((Room)enterMsg.target()))!=Climate.WEATHER_CLEAR)
&&(((Room)enterMsg.target()).isInhabitant(mob)))
mob.tell("\n\r"+((Room)enterMsg.target()).getArea().getClimateObj().weatherDescription(((Room)enterMsg.target())));
}
if(!noriders)
ridersBehind(riders,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(!flee)
for(int f=0;f<mob.numFollowers();f++)
{
MOB follower=mob.fetchFollower(f);
if(follower!=null)
{
if((follower.amFollowing()==mob)
&&((follower.location()==thisRoom)||(follower.location()==destRoom)))
{
if((follower.location()==thisRoom)&&(CMLib.flags().aliveAwakeMobile(follower,true)))
{
if(CMath.bset(follower.getBitmap(),MOB.ATT_AUTOGUARD))
thisRoom.show(follower,null,null,CMMsg.MSG_OK_ACTION,"<S-NAME> remain(s) on guard here.");
else
{
follower.tell("You follow "+mob.name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(follower,directionCode,false,false,false,false))
{
//follower.setFollowing(null);
}
}
}
}
//else
// follower.setFollowing(null);
}
}
if((leaveTrailersSoFar!=null)&&(leaveMsg.target() instanceof Room))
for(int t=0;t<leaveTrailersSoFar.size();t++)
((Room)leaveMsg.target()).send(mob,(CMMsg)leaveTrailersSoFar.elementAt(t));
if((enterTrailersSoFar!=null)&&(enterMsg.target() instanceof Room))
for(int t=0;t<enterTrailersSoFar.size();t++)
((Room)enterMsg.target()).send(mob,(CMMsg)enterTrailersSoFar.elementAt(t));
return true;
}
protected Command stander=null;
protected Vector ifneccvec=null;
public void standIfNecessary(MOB mob, int metaFlags)
throws java.io.IOException
{
if((ifneccvec==null)||(ifneccvec.size()!=2))
{
ifneccvec=new Vector();
ifneccvec.addElement("STAND");
ifneccvec.addElement("IFNECESSARY");
}
if(stander==null) stander=CMClass.getCommand("Stand");
if((stander!=null)&&(ifneccvec!=null))
stander.execute(mob,ifneccvec,metaFlags);
}
public boolean execute(MOB mob, Vector commands, int metaFlags)
throws java.io.IOException
{
standIfNecessary(mob,metaFlags);
if((commands.size()>3)
&&(commands.firstElement() instanceof Integer))
{
return move(mob,
((Integer)commands.elementAt(0)).intValue(),
((Boolean)commands.elementAt(1)).booleanValue(),
((Boolean)commands.elementAt(2)).booleanValue(),
((Boolean)commands.elementAt(3)).booleanValue(),false);
}
String whereStr=CMParms.combine(commands,1);
Room R=mob.location();
int direction=-1;
if(whereStr.equalsIgnoreCase("OUT"))
{
if(!CMath.bset(R.domainType(),Room.INDOORS))
{
mob.tell("You aren't indoors.");
return false;
}
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if((R.getExitInDir(d)!=null)
&&(R.getRoomInDir(d)!=null)
&&(!CMath.bset(R.getRoomInDir(d).domainType(),Room.INDOORS)))
{
if(direction>=0)
{
mob.tell("Which way out? Try North, South, East, etc..");
return false;
}
direction=d;
}
if(direction<0)
{
mob.tell("There is no direct way out of this place. Try a direction.");
return false;
}
}
if(direction<0)
direction=Directions.getGoodDirectionCode(whereStr);
if(direction<0)
{
Environmental E=null;
if(R!=null)
E=R.fetchFromRoomFavorItems(null,whereStr,Item.WORNREQ_UNWORNONLY);
if(E instanceof Rideable)
{
Command C=CMClass.getCommand("Enter");
return C.execute(mob,commands,metaFlags);
}
if((E instanceof Exit)&&(R!=null))
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if(R.getExitInDir(d)==E)
{ direction=d; break;}
}
}
String doing=(String)commands.elementAt(0);
if(direction>=0)
move(mob,direction,false,false,false,false);
else
{
boolean doneAnything=false;
if(commands.size()>2)
for(int v=1;v<commands.size();v++)
{
int num=1;
String s=(String)commands.elementAt(v);
if(CMath.s_int(s)>0)
{
num=CMath.s_int(s);
v++;
if(v<commands.size())
s=(String)commands.elementAt(v);
}
else
if(("NSEWUDnsewud".indexOf(s.charAt(s.length()-1))>=0)
&&(CMath.s_int(s.substring(0,s.length()-1))>0))
{
num=CMath.s_int(s.substring(0,s.length()-1));
s=s.substring(s.length()-1);
}
direction=Directions.getGoodDirectionCode(s);
if(direction>=0)
{
doneAnything=true;
for(int i=0;i<num;i++)
{
if(mob.isMonster())
{
if(!move(mob,direction,false,false,false,false))
return false;
}
else
{
Vector V=new Vector();
V.addElement(doing);
V.addElement(Directions.getDirectionName(direction));
mob.enqueCommand(V,metaFlags,0);
}
}
}
else
break;
}
if(!doneAnything)
mob.tell(CMStrings.capitalizeAndLower(doing)+" which direction?\n\rTry north, south, east, west, up, or down.");
}
return false;
}
public double actionsCost(MOB mob, Vector cmds){
double cost=CMath.div(CMProps.getIntVar(CMProps.SYSTEMI_DEFCMDTIME),100.0);
if((mob!=null)&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTORUN)))
cost /= 4.0;
return cost;
}
public boolean canBeOrdered(){return true;}
}
| robjcaskey/Unofficial-Coffee-Mud-Upstream | com/planet_ink/coffee_mud/Commands/Go.java | Java | apache-2.0 | 16,256 |
/*
* Copyright 2013-2020 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.plugins;
import com.intellij.icons.AllIcons;
import com.intellij.ide.DataManager;
import com.intellij.openapi.actionSystem.ActionGroup;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.ui.ClickListener;
import com.intellij.util.ui.JBUI;
import com.intellij.util.ui.UIUtil;
import consulo.awt.TargetAWT;
import consulo.localize.LocalizeValue;
import javax.annotation.Nonnull;
import javax.swing.*;
import java.awt.event.MouseEvent;
import java.util.function.Function;
/**
* @author VISTALL
* @since 03/12/2020
*/
public class LabelPopup extends JLabel {
private final LocalizeValue myPrefix;
public LabelPopup(LocalizeValue prefix, Function<LabelPopup, ? extends ActionGroup> groupBuilder) {
myPrefix = prefix;
setForeground(UIUtil.getLabelDisabledForeground());
setBorder(JBUI.Borders.empty(1, 1, 1, 5));
setIcon(TargetAWT.to(AllIcons.General.ComboArrow));
setHorizontalTextPosition(SwingConstants.LEADING);
new ClickListener() {
@Override
public boolean onClick(@Nonnull MouseEvent event, int clickCount) {
LabelPopup component = LabelPopup.this;
JBPopupFactory.getInstance()
.createActionGroupPopup(myPrefix.get(), groupBuilder.apply(component), DataManager.getInstance().getDataContext(component), JBPopupFactory.ActionSelectionAid.SPEEDSEARCH, true)
.showUnderneathOf(component);
return true;
}
}.installOn(this);
}
public void setPrefixedText(LocalizeValue tagValue) {
setText(LocalizeValue.join(myPrefix, LocalizeValue.space(), tagValue).get());
}
}
| consulo/consulo | modules/base/platform-impl/src/main/java/com/intellij/ide/plugins/LabelPopup.java | Java | apache-2.0 | 2,225 |
package org.support.project.knowledge.vo.notification.webhook;
public class WebhookLongIdJson {
public long id;
}
| support-project/knowledge | src/main/java/org/support/project/knowledge/vo/notification/webhook/WebhookLongIdJson.java | Java | apache-2.0 | 119 |
package etri.sdn.controller.module.vxlanflowmapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.codehaus.jackson.map.ObjectMapper;
public class Tester {
public static void main(String[] args) {
testV2PRequest();
testV2PResponse();
}
public static void testV2PResponse() {
HeaderInfoPair pair1 = new HeaderInfoPair(
new OuterPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22").
srcIp("10.0.0.11").
dstIp("10.0.0.22").
udpPort("1001")
.build(),
new OrginalPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22")
.srcIp("10.0.0.11")
.dstIp("10.0.0.22")
.vnid("1001")
.build() );
List<HeaderInfoPair> pairs = Arrays.asList(pair1);
V2PResponse response = new V2PResponse(pairs);
ObjectMapper mapper = new ObjectMapper();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(response);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
public static void testV2PRequest() {
OuterPacketHeader orgHeader = new OuterPacketHeader("00:00:00:00:00:01", "00:00:00:00:00:02", "10.0.0.1", "10.0.0.2", "1234");
List<OuterPacketHeader> headers= Arrays.asList(orgHeader);
P2VRequest request = new P2VRequest(headers);
// request.outerList = headers;
ObjectMapper mapper = new ObjectMapper();
List<OuterPacketHeader> switchs = new ArrayList<>();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(request);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
}
| uni2u/iNaaS | Torpedo/src/etri/sdn/controller/module/vxlanflowmapper/Tester.java | Java | apache-2.0 | 1,831 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package fuzzy.internal.functions;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.junit.Test;
/**
* Tests for Max function.
*
* @since 0.2
* @see Max
*/
public class TestMax {
@Test
public void testMax() {
Collection<Double> list = Arrays.asList(-1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, false);
assertEquals(Double.valueOf(3.5), r);
}
@Test
public void testMaxEmpty() {
Double r = Max.of(Collections.<Double>emptyList(), false);
assertEquals(Double.valueOf(0.0), r);
}
@Test
public void testMaxAbs() {
Collection<Double> list = Arrays.asList(-10.0, -1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, true);
assertEquals(Double.valueOf(-10.0), r);
}
}
| tupilabs/nebular | src/test/java/fuzzy/internal/functions/TestMax.java | Java | apache-2.0 | 1,371 |
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.orm.type.descriptor.sql.internal;
import java.time.temporal.TemporalAccessor;
import javax.persistence.TemporalType;
import org.hibernate.dialect.Dialect;
import org.hibernate.orm.type.descriptor.internal.DateTimeUtils;
import org.hibernate.orm.type.descriptor.java.spi.TemporalJavaTypeDescriptor;
import org.hibernate.orm.type.descriptor.spi.WrapperOptions;
/**
* @author Steve Ebersole
*/
public class JdbcLiteralFormatterTemporal extends BasicJdbcLiteralFormatter {
private final TemporalType precision;
public JdbcLiteralFormatterTemporal(TemporalJavaTypeDescriptor javaTypeDescriptor, TemporalType precision) {
super( javaTypeDescriptor );
this.precision = precision;
// todo : add some validation of combos between javaTypeDescrptor#getPrecision and precision - log warnings
}
@Override
protected TemporalJavaTypeDescriptor getJavaTypeDescriptor() {
return (TemporalJavaTypeDescriptor) super.getJavaTypeDescriptor();
}
@Override
public String toJdbcLiteral(Object value, Dialect dialect, WrapperOptions wrapperOptions) {
// for performance reasons, avoid conversions if we can
if ( value instanceof java.util.Date ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Date) value,
precision
);
}
else if ( value instanceof java.util.Calendar ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Calendar) value,
precision
);
}
else if ( value instanceof TemporalAccessor ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(TemporalAccessor) value,
precision
);
}
switch ( getJavaTypeDescriptor().getPrecision() ) {
case DATE: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Date.class, wrapperOptions ),
precision
);
}
case TIME: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Time.class, wrapperOptions ),
precision
);
}
default: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.util.Date.class, wrapperOptions ),
precision
);
}
}
}
}
| hibernate/hibernate-semantic-query | src/test/java/org/hibernate/orm/type/descriptor/sql/internal/JdbcLiteralFormatterTemporal.java | Java | apache-2.0 | 2,399 |
package com.etiennelawlor.loop.network.models.response;
import android.os.Parcel;
import android.os.Parcelable;
import com.google.gson.annotations.SerializedName;
/**
* Created by etiennelawlor on 5/23/15.
*/
public class Tag implements Parcelable {
// region Fields
@SerializedName("uri")
private String uri;
@SerializedName("name")
private String name;
@SerializedName("tag")
private String tag;
@SerializedName("canonical")
private String canonical;
// endregion
// region Constructors
public Tag() {
}
protected Tag(Parcel in) {
this.uri = in.readString();
this.name = in.readString();
this.tag = in.readString();
this.canonical = in.readString();
}
// endregion
// region Getters
public String getUri() {
return uri;
}
public String getName() {
return name;
}
public String getTag() {
return tag;
}
public String getCanonical() {
return canonical;
}
// endregion
// region Setters
public void setUri(String uri) {
this.uri = uri;
}
public void setName(String name) {
this.name = name;
}
public void setTag(String tag) {
this.tag = tag;
}
public void setCanonical(String canonical) {
this.canonical = canonical;
}
// endregion
// region Parcelable Methods
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(this.uri);
dest.writeString(this.name);
dest.writeString(this.tag);
dest.writeString(this.canonical);
}
// endregion
public static final Parcelable.Creator<Tag> CREATOR = new Parcelable.Creator<Tag>() {
@Override
public Tag createFromParcel(Parcel source) {
return new Tag(source);
}
@Override
public Tag[] newArray(int size) {
return new Tag[size];
}
};
}
| lawloretienne/Loop | app/src/main/java/com/etiennelawlor/loop/network/models/response/Tag.java | Java | apache-2.0 | 2,066 |
package rvc.ann;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* @author nurmuhammad
*/
@Retention(RUNTIME)
@Target(METHOD)
public @interface OPTIONS {
String value() default Constants.NULL_VALUE;
boolean absolutePath() default false;
} | nurmuhammad/rvc | src/main/java/rvc/ann/OPTIONS.java | Java | apache-2.0 | 399 |
package com.xiaojinzi.component.bean;
import javax.lang.model.element.Element;
/**
* time : 2018/07/26
*
* @author : xiaojinzi
*/
public class RouterDegradeAnnoBean {
/**
* 优先级
*/
private int priority;
/**
* 是一个类实现了 RouterDegrade 接口
*/
private Element rawType;
public int getPriority() {
return priority;
}
public void setPriority(int priority) {
this.priority = priority;
}
public Element getRawType() {
return rawType;
}
public void setRawType(Element rawType) {
this.rawType = rawType;
}
}
| xiaojinzi123/Component | ComponentCompiler/src/main/java/com/xiaojinzi/component/bean/RouterDegradeAnnoBean.java | Java | apache-2.0 | 632 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.Archiveable;
import org.apache.flink.api.common.InputDependencyConstraint;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.runtime.JobException;
import org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult;
import org.apache.flink.runtime.checkpoint.CheckpointOptions;
import org.apache.flink.runtime.checkpoint.CheckpointType;
import org.apache.flink.runtime.checkpoint.JobManagerTaskRestore;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor;
import org.apache.flink.runtime.concurrent.FutureUtils;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.instance.SlotSharingGroupId;
import org.apache.flink.runtime.io.network.partition.PartitionTracker;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.LocationPreferenceConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.jobmaster.LogicalSlot;
import org.apache.flink.runtime.jobmaster.SlotRequestId;
import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.messages.StackTraceSampleResponse;
import org.apache.flink.runtime.shuffle.PartitionDescriptor;
import org.apache.flink.runtime.shuffle.ProducerDescriptor;
import org.apache.flink.runtime.shuffle.ShuffleDescriptor;
import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.OptionalFailure;
import org.apache.flink.util.function.ThrowingRunnable;
import org.slf4j.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELED;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELING;
import static org.apache.flink.runtime.execution.ExecutionState.CREATED;
import static org.apache.flink.runtime.execution.ExecutionState.DEPLOYING;
import static org.apache.flink.runtime.execution.ExecutionState.FAILED;
import static org.apache.flink.runtime.execution.ExecutionState.FINISHED;
import static org.apache.flink.runtime.execution.ExecutionState.RUNNING;
import static org.apache.flink.runtime.execution.ExecutionState.SCHEDULED;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times
* (for recovery, re-computation, re-configuration), this class tracks the state of a single execution
* of that vertex and the resources.
*
* <h2>Lock free state transitions</h2>
*
* <p>In several points of the code, we need to deal with possible concurrent state changes and actions.
* For example, while the call to deploy a task (send it to the TaskManager) happens, the task gets cancelled.
*
* <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to running) such that
* it is guaranteed that any "cancel command" will only pick up after deployment is done and that the "cancel
* command" call will never overtake the deploying call.
*
* <p>This blocks the threads big time, because the remote calls may take long. Depending of their locking behavior, it
* may even result in distributed deadlocks (unless carefully avoided). We therefore use atomic state updates and
* occasional double-checking to ensure that the state after a completed call is as expected, and trigger correcting
* actions if it is not. Many actions are also idempotent (like canceling).
*/
public class Execution implements AccessExecution, Archiveable<ArchivedExecution>, LogicalSlot.Payload {
private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state");
private static final AtomicReferenceFieldUpdater<Execution, LogicalSlot> ASSIGNED_SLOT_UPDATER = AtomicReferenceFieldUpdater.newUpdater(
Execution.class,
LogicalSlot.class,
"assignedResource");
private static final Logger LOG = ExecutionGraph.LOG;
private static final int NUM_CANCEL_CALL_TRIES = 3;
private static final int NUM_STOP_CALL_TRIES = 3;
// --------------------------------------------------------------------------------------------
/** The executor which is used to execute futures. */
private final Executor executor;
/** The execution vertex whose task this execution executes. */
private final ExecutionVertex vertex;
/** The unique ID marking the specific execution instant of the task. */
private final ExecutionAttemptID attemptId;
/** Gets the global modification version of the execution graph when this execution was created.
* This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions. */
private final long globalModVersion;
/** The timestamps when state transitions occurred, indexed by {@link ExecutionState#ordinal()}. */
private final long[] stateTimestamps;
private final int attemptNumber;
private final Time rpcTimeout;
private final Collection<PartitionInfo> partitionInfos;
/** A future that completes once the Execution reaches a terminal ExecutionState. */
private final CompletableFuture<ExecutionState> terminalStateFuture;
private final CompletableFuture<?> releaseFuture;
private final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture;
private volatile ExecutionState state = CREATED;
private volatile LogicalSlot assignedResource;
private volatile Throwable failureCause; // once assigned, never changes
/** Information to restore the task on recovery, such as checkpoint id and task state snapshot. */
@Nullable
private volatile JobManagerTaskRestore taskRestore;
/** This field holds the allocation id once it was assigned successfully. */
@Nullable
private volatile AllocationID assignedAllocationID;
// ------------------------ Accumulators & Metrics ------------------------
/** Lock for updating the accumulators atomically.
* Prevents final accumulators to be overwritten by partial accumulators on a late heartbeat. */
private final Object accumulatorLock = new Object();
/* Continuously updated map of user-defined accumulators */
private volatile Map<String, Accumulator<?, ?>> userAccumulators;
private volatile IOMetrics ioMetrics;
private Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions;
// --------------------------------------------------------------------------------------------
/**
* Creates a new Execution attempt.
*
* @param executor
* The executor used to dispatch callbacks from futures and asynchronous RPC calls.
* @param vertex
* The execution vertex to which this Execution belongs
* @param attemptNumber
* The execution attempt number.
* @param globalModVersion
* The global modification version of the execution graph when this execution was created
* @param startTimestamp
* The timestamp that marks the creation of this Execution
* @param rpcTimeout
* The rpcTimeout for RPC calls like deploy/cancel/stop.
*/
public Execution(
Executor executor,
ExecutionVertex vertex,
int attemptNumber,
long globalModVersion,
long startTimestamp,
Time rpcTimeout) {
this.executor = checkNotNull(executor);
this.vertex = checkNotNull(vertex);
this.attemptId = new ExecutionAttemptID();
this.rpcTimeout = checkNotNull(rpcTimeout);
this.globalModVersion = globalModVersion;
this.attemptNumber = attemptNumber;
this.stateTimestamps = new long[ExecutionState.values().length];
markTimestamp(CREATED, startTimestamp);
this.partitionInfos = new ArrayList<>(16);
this.producedPartitions = Collections.emptyMap();
this.terminalStateFuture = new CompletableFuture<>();
this.releaseFuture = new CompletableFuture<>();
this.taskManagerLocationFuture = new CompletableFuture<>();
this.assignedResource = null;
}
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
public ExecutionVertex getVertex() {
return vertex;
}
@Override
public ExecutionAttemptID getAttemptId() {
return attemptId;
}
@Override
public int getAttemptNumber() {
return attemptNumber;
}
@Override
public ExecutionState getState() {
return state;
}
@Nullable
public AllocationID getAssignedAllocationID() {
return assignedAllocationID;
}
/**
* Gets the global modification version of the execution graph when this execution was created.
*
* <p>This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions.
*/
public long getGlobalModVersion() {
return globalModVersion;
}
public CompletableFuture<TaskManagerLocation> getTaskManagerLocationFuture() {
return taskManagerLocationFuture;
}
public LogicalSlot getAssignedResource() {
return assignedResource;
}
public Optional<ResultPartitionDeploymentDescriptor> getResultPartitionDeploymentDescriptor(
IntermediateResultPartitionID id) {
return Optional.ofNullable(producedPartitions.get(id));
}
/**
* Tries to assign the given slot to the execution. The assignment works only if the
* Execution is in state SCHEDULED. Returns true, if the resource could be assigned.
*
* @param logicalSlot to assign to this execution
* @return true if the slot could be assigned to the execution, otherwise false
*/
@VisibleForTesting
boolean tryAssignResource(final LogicalSlot logicalSlot) {
assertRunningInJobMasterMainThread();
checkNotNull(logicalSlot);
// only allow to set the assigned resource in state SCHEDULED or CREATED
// note: we also accept resource assignment when being in state CREATED for testing purposes
if (state == SCHEDULED || state == CREATED) {
if (ASSIGNED_SLOT_UPDATER.compareAndSet(this, null, logicalSlot)) {
if (logicalSlot.tryAssignPayload(this)) {
// check for concurrent modification (e.g. cancelling call)
if ((state == SCHEDULED || state == CREATED) && !taskManagerLocationFuture.isDone()) {
taskManagerLocationFuture.complete(logicalSlot.getTaskManagerLocation());
assignedAllocationID = logicalSlot.getAllocationId();
return true;
} else {
// free assigned resource and return false
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
// the slot already has another slot assigned
return false;
}
} else {
// do not allow resource assignment if we are not in state SCHEDULED
return false;
}
}
public InputSplit getNextInputSplit() {
final LogicalSlot slot = this.getAssignedResource();
final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null;
return this.vertex.getNextInputSplit(host);
}
@Override
public TaskManagerLocation getAssignedResourceLocation() {
// returns non-null only when a location is already assigned
final LogicalSlot currentAssignedResource = assignedResource;
return currentAssignedResource != null ? currentAssignedResource.getTaskManagerLocation() : null;
}
public Throwable getFailureCause() {
return failureCause;
}
@Override
public String getFailureCauseAsString() {
return ExceptionUtils.stringifyException(getFailureCause());
}
@Override
public long[] getStateTimestamps() {
return stateTimestamps;
}
@Override
public long getStateTimestamp(ExecutionState state) {
return this.stateTimestamps[state.ordinal()];
}
public boolean isFinished() {
return state.isTerminal();
}
@Nullable
public JobManagerTaskRestore getTaskRestore() {
return taskRestore;
}
/**
* Sets the initial state for the execution. The serialized state is then shipped via the
* {@link TaskDeploymentDescriptor} to the TaskManagers.
*
* @param taskRestore information to restore the state
*/
public void setInitialState(@Nullable JobManagerTaskRestore taskRestore) {
this.taskRestore = taskRestore;
}
/**
* Gets a future that completes once the task execution reaches a terminal state.
* The future will be completed with specific state that the execution reached.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the execution reaches a terminal state
*/
@Override
public CompletableFuture<ExecutionState> getTerminalStateFuture() {
return terminalStateFuture;
}
/**
* Gets the release future which is completed once the execution reaches a terminal
* state and the assigned resource has been released.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the assigned resource has been released
*/
public CompletableFuture<?> getReleaseFuture() {
return releaseFuture;
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
public CompletableFuture<Void> scheduleForExecution() {
final ExecutionGraph executionGraph = getVertex().getExecutionGraph();
final SlotProvider resourceProvider = executionGraph.getSlotProvider();
final boolean allowQueued = executionGraph.isQueuedSchedulingAllowed();
return scheduleForExecution(
resourceProvider,
allowQueued,
LocationPreferenceConstraint.ANY,
Collections.emptySet());
}
/**
* NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if the tasks needs
* to be scheduled immediately and no resource is available. If the task is accepted by the schedule, any
* error sets the vertex state to failed and triggers the recovery logic.
*
* @param slotProvider The slot provider to use to allocate slot for this execution attempt.
* @param queued Flag to indicate whether the scheduler may queue this task if it cannot
* immediately deploy it.
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @return Future which is completed once the Execution has been deployed
*/
public CompletableFuture<Void> scheduleForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) {
assertRunningInJobMasterMainThread();
final ExecutionGraph executionGraph = vertex.getExecutionGraph();
final Time allocationTimeout = executionGraph.getAllocationTimeout();
try {
final CompletableFuture<Execution> allocationFuture = allocateResourcesForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout);
final CompletableFuture<Void> deploymentFuture;
if (allocationFuture.isDone() || queued) {
deploymentFuture = allocationFuture.thenRun(ThrowingRunnable.unchecked(this::deploy));
} else {
deploymentFuture = FutureUtils.completedExceptionally(
new IllegalArgumentException("The slot allocation future has not been completed yet."));
}
deploymentFuture.whenComplete(
(Void ignored, Throwable failure) -> {
if (failure != null) {
final Throwable stripCompletionException = ExceptionUtils.stripCompletionException(failure);
final Throwable schedulingFailureCause;
if (stripCompletionException instanceof TimeoutException) {
schedulingFailureCause = new NoResourceAvailableException(
"Could not allocate enough slots within timeout of " + allocationTimeout + " to run the job. " +
"Please make sure that the cluster has enough resources.");
} else {
schedulingFailureCause = stripCompletionException;
}
markFailed(schedulingFailureCause);
}
});
return deploymentFuture;
} catch (IllegalExecutionStateException e) {
return FutureUtils.completedExceptionally(e);
}
}
/**
* Allocates resources for the execution.
*
* <p>Allocates following resources:
* <ol>
* <li>slot obtained from the slot provider</li>
* <li>registers produced partitions with the {@link org.apache.flink.runtime.shuffle.ShuffleMaster}</li>
* </ol>
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with this execution once the slot has been assigned
* or with an exception if an error occurred.
*/
CompletableFuture<Execution> allocateResourcesForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
return allocateAndAssignSlotForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout)
.thenCompose(slot -> registerProducedPartitions(slot.getTaskManagerLocation()));
}
/**
* Allocates and assigns a slot obtained from the slot provider to the execution.
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with the allocated slot once it has been assigned
* or with an exception if an error occurred.
*/
private CompletableFuture<LogicalSlot> allocateAndAssignSlotForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
checkNotNull(slotProvider);
assertRunningInJobMasterMainThread();
final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();
// sanity check
if (locationConstraint != null && sharingGroup == null) {
throw new IllegalStateException(
"Trying to schedule with co-location constraint but without slot sharing allowed.");
}
// this method only works if the execution is in the state 'CREATED'
if (transitionState(CREATED, SCHEDULED)) {
final SlotSharingGroupId slotSharingGroupId = sharingGroup != null ? sharingGroup.getSlotSharingGroupId() : null;
ScheduledUnit toSchedule = locationConstraint == null ?
new ScheduledUnit(this, slotSharingGroupId) :
new ScheduledUnit(this, slotSharingGroupId, locationConstraint);
// try to extract previous allocation ids, if applicable, so that we can reschedule to the same slot
ExecutionVertex executionVertex = getVertex();
AllocationID lastAllocation = executionVertex.getLatestPriorAllocation();
Collection<AllocationID> previousAllocationIDs =
lastAllocation != null ? Collections.singletonList(lastAllocation) : Collections.emptyList();
// calculate the preferred locations
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture =
calculatePreferredLocations(locationPreferenceConstraint);
final SlotRequestId slotRequestId = new SlotRequestId();
final CompletableFuture<LogicalSlot> logicalSlotFuture =
preferredLocationsFuture.thenCompose(
(Collection<TaskManagerLocation> preferredLocations) ->
slotProvider.allocateSlot(
slotRequestId,
toSchedule,
new SlotProfile(
ResourceProfile.UNKNOWN,
preferredLocations,
previousAllocationIDs,
allPreviousExecutionGraphAllocationIds),
queued,
allocationTimeout));
// register call back to cancel slot request in case that the execution gets canceled
releaseFuture.whenComplete(
(Object ignored, Throwable throwable) -> {
if (logicalSlotFuture.cancel(false)) {
slotProvider.cancelSlotRequest(
slotRequestId,
slotSharingGroupId,
new FlinkException("Execution " + this + " was released."));
}
});
// This forces calls to the slot pool back into the main thread, for normal and exceptional completion
return logicalSlotFuture.handle(
(LogicalSlot logicalSlot, Throwable failure) -> {
if (failure != null) {
throw new CompletionException(failure);
}
if (tryAssignResource(logicalSlot)) {
return logicalSlot;
} else {
// release the slot
logicalSlot.releaseSlot(new FlinkException("Could not assign logical slot to execution " + this + '.'));
throw new CompletionException(
new FlinkException(
"Could not assign slot " + logicalSlot + " to execution " + this + " because it has already been assigned "));
}
});
} else {
// call race, already deployed, or already done
throw new IllegalExecutionStateException(this, CREATED, state);
}
}
@VisibleForTesting
CompletableFuture<Execution> registerProducedPartitions(TaskManagerLocation location) {
assertRunningInJobMasterMainThread();
return FutureUtils.thenApplyAsyncIfNotDone(
registerProducedPartitions(vertex, location, attemptId),
vertex.getExecutionGraph().getJobMasterMainThreadExecutor(),
producedPartitionsCache -> {
producedPartitions = producedPartitionsCache;
startTrackingPartitions(location.getResourceID(), producedPartitionsCache.values());
return this;
});
}
@VisibleForTesting
static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions(
ExecutionVertex vertex,
TaskManagerLocation location,
ExecutionAttemptID attemptId) {
ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId);
boolean lazyScheduling = vertex.getExecutionGraph().getScheduleMode().allowLazyDeployment();
Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values();
Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations =
new ArrayList<>(partitions.size());
for (IntermediateResultPartition partition : partitions) {
PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition);
int maxParallelism = getPartitionMaxParallelism(partition);
CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex
.getExecutionGraph()
.getShuffleMaster()
.registerPartitionWithProducer(partitionDescriptor, producerDescriptor);
final boolean releasePartitionOnConsumption =
vertex.getExecutionGraph().isForcePartitionReleaseOnConsumption()
|| !partitionDescriptor.getPartitionType().isBlocking();
CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture
.thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor(
partitionDescriptor,
shuffleDescriptor,
maxParallelism,
lazyScheduling,
releasePartitionOnConsumption
? ShuffleDescriptor.ReleaseType.AUTO
: ShuffleDescriptor.ReleaseType.MANUAL));
partitionRegistrations.add(partitionRegistration);
}
return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> {
Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions =
new LinkedHashMap<>(partitions.size());
rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd));
return producedPartitions;
});
}
private static int getPartitionMaxParallelism(IntermediateResultPartition partition) {
// TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs!
final List<List<ExecutionEdge>> consumers = partition.getConsumers();
int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
if (!consumers.isEmpty()) {
List<ExecutionEdge> consumer = consumers.get(0);
ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex();
maxParallelism = consumerVertex.getMaxParallelism();
}
return maxParallelism;
}
/**
* Deploys the execution to the previously assigned resource.
*
* @throws JobException if the execution cannot be deployed to the assigned resource
*/
public void deploy() throws JobException {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
checkNotNull(slot, "In order to deploy the execution we first have to assign a resource via tryAssignResource.");
// Check if the TaskManager died in the meantime
// This only speeds up the response to TaskManagers failing concurrently to deployments.
// The more general check is the rpcTimeout of the deployment call
if (!slot.isAlive()) {
throw new JobException("Target slot (TaskManager) for deployment is no longer alive.");
}
// make sure exactly one deployment call happens from the correct state
// note: the transition from CREATED to DEPLOYING is for testing purposes only
ExecutionState previous = this.state;
if (previous == SCHEDULED || previous == CREATED) {
if (!transitionState(previous, DEPLOYING)) {
// race condition, someone else beat us to the deploying call.
// this should actually not happen and indicates a race somewhere else
throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race.");
}
}
else {
// vertex may have been cancelled, or it was already scheduled
throw new IllegalStateException("The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous);
}
if (this != slot.getPayload()) {
throw new IllegalStateException(
String.format("The execution %s has not been assigned to the assigned slot.", this));
}
try {
// race double check, did we fail/cancel and do we need to release the slot?
if (this.state != DEPLOYING) {
slot.releaseSlot(new FlinkException("Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING."));
return;
}
if (LOG.isInfoEnabled()) {
LOG.info(String.format("Deploying %s (attempt #%d) to %s", vertex.getTaskNameWithSubtaskIndex(),
attemptNumber, getAssignedResourceLocation()));
}
final TaskDeploymentDescriptor deployment = TaskDeploymentDescriptorFactory
.fromExecutionVertex(vertex, attemptNumber)
.createDeploymentDescriptor(
slot.getAllocationId(),
slot.getPhysicalSlotNumber(),
taskRestore,
producedPartitions.values());
// null taskRestore to let it be GC'ed
taskRestore = null;
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
vertex.getExecutionGraph().getJobMasterMainThreadExecutor();
// We run the submission in the future executor so that the serialization of large TDDs does not block
// the main thread and sync back to the main thread once submission is completed.
CompletableFuture.supplyAsync(() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor)
.thenCompose(Function.identity())
.whenCompleteAsync(
(ack, failure) -> {
// only respond to the failure case
if (failure != null) {
if (failure instanceof TimeoutException) {
String taskname = vertex.getTaskNameWithSubtaskIndex() + " (" + attemptId + ')';
markFailed(new Exception(
"Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation()
+ ") not responding after a rpcTimeout of " + rpcTimeout, failure));
} else {
markFailed(failure);
}
}
},
jobMasterMainThreadExecutor);
}
catch (Throwable t) {
markFailed(t);
ExceptionUtils.rethrow(t);
}
}
public void cancel() {
// depending on the previous state, we go directly to cancelled (no cancel call necessary)
// -- or to canceling (cancel call needs to be sent to the task manager)
// because of several possibly previous states, we need to again loop until we make a
// successful atomic state transition
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == CANCELING || current == CANCELED) {
// already taken care of, no need to cancel again
return;
}
// these two are the common cases where we need to send a cancel call
else if (current == RUNNING || current == DEPLOYING) {
// try to transition to canceling, if successful, send the cancel call
if (startCancelling(NUM_CANCEL_CALL_TRIES)) {
return;
}
// else: fall through the loop
}
else if (current == FINISHED || current == FAILED) {
// nothing to do any more. finished/failed before it could be cancelled.
// in any case, the task is removed from the TaskManager already
return;
}
else if (current == CREATED || current == SCHEDULED) {
// from here, we can directly switch to cancelled, because no task has been deployed
if (cancelAtomically()) {
return;
}
// else: fall through the loop
}
else {
throw new IllegalStateException(current.name());
}
}
}
public CompletableFuture<?> suspend() {
switch(state) {
case RUNNING:
case DEPLOYING:
case CREATED:
case SCHEDULED:
if (!cancelAtomically()) {
throw new IllegalStateException(
String.format("Could not directly go to %s from %s.", CANCELED.name(), state.name()));
}
break;
case CANCELING:
completeCancelling();
break;
case FINISHED:
case FAILED:
case CANCELED:
break;
default:
throw new IllegalStateException(state.name());
}
return releaseFuture;
}
private void scheduleConsumer(ExecutionVertex consumerVertex) {
try {
final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph();
consumerVertex.scheduleForExecution(
executionGraph.getSlotProvider(),
executionGraph.isQueuedSchedulingAllowed(),
LocationPreferenceConstraint.ANY, // there must be at least one known location
Collections.emptySet());
} catch (Throwable t) {
consumerVertex.fail(new IllegalStateException("Could not schedule consumer " +
"vertex " + consumerVertex, t));
}
}
void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
assertRunningInJobMasterMainThread();
final int numConsumers = allConsumers.size();
if (numConsumers > 1) {
fail(new IllegalStateException("Currently, only a single consumer group per partition is supported."));
} else if (numConsumers == 0) {
return;
}
for (ExecutionEdge edge : allConsumers.get(0)) {
final ExecutionVertex consumerVertex = edge.getTarget();
final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
final ExecutionState consumerState = consumer.getState();
// ----------------------------------------------------------------
// Consumer is created => try to schedule it and the partition info
// is known during deployment
// ----------------------------------------------------------------
if (consumerState == CREATED) {
// Schedule the consumer vertex if its inputs constraint is satisfied, otherwise skip the scheduling.
// A shortcut of input constraint check is added for InputDependencyConstraint.ANY since
// at least one of the consumer vertex's inputs is consumable here. This is to avoid the
// O(N) complexity introduced by input constraint check for InputDependencyConstraint.ANY,
// as we do not want the default scheduling performance to be affected.
if (consumerVertex.getInputDependencyConstraint() == InputDependencyConstraint.ANY ||
consumerVertex.checkInputDependencyConstraints()) {
scheduleConsumer(consumerVertex);
}
}
// ----------------------------------------------------------------
// Consumer is running => send update message now
// Consumer is deploying => cache the partition info which would be
// sent after switching to running
// ----------------------------------------------------------------
else if (consumerState == DEPLOYING || consumerState == RUNNING) {
final PartitionInfo partitionInfo = createPartitionInfo(edge);
if (consumerState == DEPLOYING) {
consumerVertex.cachePartitionInfo(partitionInfo);
} else {
consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(partitionInfo));
}
}
}
}
private static PartitionInfo createPartitionInfo(ExecutionEdge executionEdge) {
IntermediateDataSetID intermediateDataSetID = executionEdge.getSource().getIntermediateResult().getId();
ShuffleDescriptor shuffleDescriptor = getConsumedPartitionShuffleDescriptor(executionEdge, false);
return new PartitionInfo(intermediateDataSetID, shuffleDescriptor);
}
/**
* This method fails the vertex due to an external condition. The task will move to state FAILED.
* If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager.
*
* @param t The exception that caused the task to fail.
*/
@Override
public void fail(Throwable t) {
processFail(t, false);
}
/**
* Request a stack trace sample from the task of this execution.
*
* @param sampleId of the stack trace sample
* @param numSamples the sample should contain
* @param delayBetweenSamples to wait
* @param maxStackTraceDepth of the samples
* @param timeout until the request times out
* @return Future stack trace sample response
*/
public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample(
int sampleId,
int numSamples,
Time delayBetweenSamples,
int maxStackTraceDepth,
Time timeout) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
return taskManagerGateway.requestStackTraceSample(
attemptId,
sampleId,
numSamples,
delayBetweenSamples,
maxStackTraceDepth,
timeout);
} else {
return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned."));
}
}
/**
* Notify the task of this execution about a completed checkpoint.
*
* @param checkpointId of the completed checkpoint
* @param timestamp of the completed checkpoint
*/
public void notifyCheckpointComplete(long checkpointId, long timestamp) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.notifyCheckpointComplete(attemptId, getVertex().getJobId(), checkpointId, timestamp);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is " +
"no longer running.");
}
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
*/
public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, false);
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers
*/
public void triggerSynchronousSavepoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
}
private void triggerCheckpointHelper(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
final CheckpointType checkpointType = checkpointOptions.getCheckpointType();
if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) {
throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX.");
}
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is no longer running.");
}
}
// --------------------------------------------------------------------------------------------
// Callbacks
// --------------------------------------------------------------------------------------------
/**
* This method marks the task as failed, but will make no attempt to remove task execution from the task manager.
* It is intended for cases where the task is known not to be running, or then the TaskManager reports failure
* (in which case it has already removed the task).
*
* @param t The exception that caused the task to fail.
*/
void markFailed(Throwable t) {
processFail(t, true);
}
void markFailed(Throwable t, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
processFail(t, true, userAccumulators, metrics);
}
@VisibleForTesting
void markFinished() {
markFinished(null, null);
}
void markFinished(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
assertRunningInJobMasterMainThread();
// this call usually comes during RUNNING, but may also come while still in deploying (very fast tasks!)
while (true) {
ExecutionState current = this.state;
if (current == RUNNING || current == DEPLOYING) {
if (transitionState(current, FINISHED)) {
try {
for (IntermediateResultPartition finishedPartition
: getVertex().finishAllBlockingPartitions()) {
IntermediateResultPartition[] allPartitions = finishedPartition
.getIntermediateResult().getPartitions();
for (IntermediateResultPartition partition : allPartitions) {
scheduleOrUpdateConsumers(partition.getConsumers());
}
}
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(null);
vertex.getExecutionGraph().deregisterExecution(this);
}
finally {
vertex.executionFinished(this);
}
return;
}
}
else if (current == CANCELING) {
// we sent a cancel call, and the task manager finished before it arrived. We
// will never get a CANCELED call back from the job manager
completeCancelling(userAccumulators, metrics);
return;
}
else if (current == CANCELED || current == FAILED) {
if (LOG.isDebugEnabled()) {
LOG.debug("Task FINISHED, but concurrently went to state " + state);
}
return;
}
else {
// this should not happen, we need to fail this
markFailed(new Exception("Vertex received FINISHED message while being in state " + state));
return;
}
}
}
private boolean cancelAtomically() {
if (startCancelling(0)) {
completeCancelling();
return true;
} else {
return false;
}
}
private boolean startCancelling(int numberCancelRetries) {
if (transitionState(state, CANCELING)) {
taskManagerLocationFuture.cancel(false);
sendCancelRpcCall(numberCancelRetries);
return true;
} else {
return false;
}
}
void completeCancelling() {
completeCancelling(null, null);
}
void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// the taskmanagers can themselves cancel tasks without an external trigger, if they find that the
// network stack is canceled (for example by a failing / canceling receiver or sender
// this is an artifact of the old network runtime, but for now we need to support task transitions
// from running directly to canceled
while (true) {
ExecutionState current = this.state;
if (current == CANCELED) {
return;
}
else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {
updateAccumulatorsAndMetrics(userAccumulators, metrics);
if (transitionState(current, CANCELED)) {
finishCancellation();
return;
}
// else fall through the loop
}
else {
// failing in the meantime may happen and is no problem.
// anything else is a serious problem !!!
if (current != FAILED) {
String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state);
LOG.error(message);
vertex.getExecutionGraph().failGlobal(new Exception(message));
}
return;
}
}
}
private void finishCancellation() {
releaseAssignedResource(new FlinkException("Execution " + this + " was cancelled."));
vertex.getExecutionGraph().deregisterExecution(this);
// release partitions on TM in case the Task finished while we where already CANCELING
stopTrackingAndReleasePartitions();
}
void cachePartitionInfo(PartitionInfo partitionInfo) {
partitionInfos.add(partitionInfo);
}
private void sendPartitionInfos() {
if (!partitionInfos.isEmpty()) {
sendUpdatePartitionInfoRpcCall(new ArrayList<>(partitionInfos));
partitionInfos.clear();
}
}
// --------------------------------------------------------------------------------------------
// Internal Actions
// --------------------------------------------------------------------------------------------
private boolean processFail(Throwable t, boolean isCallback) {
return processFail(t, isCallback, null, null);
}
private boolean processFail(Throwable t, boolean isCallback, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// damn, we failed. This means only that we keep our books and notify our parent JobExecutionVertex
// the actual computation on the task manager is cleaned up by the TaskManager that noticed the failure
// we may need to loop multiple times (in the presence of concurrent calls) in order to
// atomically switch to failed
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == FAILED) {
// already failed. It is enough to remember once that we failed (its sad enough)
return false;
}
if (current == CANCELED || current == FINISHED) {
// we are already aborting or are already aborted or we are already finished
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
if (current == CANCELING) {
completeCancelling(userAccumulators, metrics);
return false;
}
if (transitionState(current, FAILED, t)) {
// success (in a manner of speaking)
this.failureCause = t;
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(t);
vertex.getExecutionGraph().deregisterExecution(this);
stopTrackingAndReleasePartitions();
if (!isCallback && (current == RUNNING || current == DEPLOYING)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending out cancel request, to remove task execution from TaskManager.");
}
try {
if (assignedResource != null) {
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
} catch (Throwable tt) {
// no reason this should ever happen, but log it to be safe
LOG.error("Error triggering cancel call while marking task {} as failed.", getVertex().getTaskNameWithSubtaskIndex(), tt);
}
}
// leave the loop
return true;
}
}
}
boolean switchToRunning() {
if (transitionState(DEPLOYING, RUNNING)) {
sendPartitionInfos();
return true;
}
else {
// something happened while the call was in progress.
// it can mean:
// - canceling, while deployment was in progress. state is now canceling, or canceled, if the response overtook
// - finishing (execution and finished call overtook the deployment answer, which is possible and happens for fast tasks)
// - failed (execution, failure, and failure message overtook the deployment answer)
ExecutionState currentState = this.state;
if (currentState == FINISHED || currentState == CANCELED) {
// do nothing, the task was really fast (nice)
// or it was canceled really fast
}
else if (currentState == CANCELING || currentState == FAILED) {
if (LOG.isDebugEnabled()) {
// this log statement is guarded because the 'getVertexWithAttempt()' method
// performs string concatenations
LOG.debug("Concurrent canceling/failing of {} while deployment was in progress.", getVertexWithAttempt());
}
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
else {
String message = String.format("Concurrent unexpected state transition of task %s to %s while deployment was in progress.",
getVertexWithAttempt(), currentState);
if (LOG.isDebugEnabled()) {
LOG.debug(message);
}
// undo the deployment
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
// record the failure
markFailed(new Exception(message));
}
return false;
}
}
/**
* This method sends a CancelTask message to the instance of the assigned slot.
*
* <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
*/
private void sendCancelRpcCall(int numberRetries) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
CompletableFuture<Acknowledge> cancelResultFuture = FutureUtils.retry(
() -> taskManagerGateway.cancelTask(attemptId, rpcTimeout),
numberRetries,
jobMasterMainThreadExecutor);
cancelResultFuture.whenComplete(
(ack, failure) -> {
if (failure != null) {
fail(new Exception("Task could not be canceled.", failure));
}
});
}
}
private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) {
PartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker();
for (ResultPartitionDeploymentDescriptor partition : partitions) {
partitionTracker.startTrackingPartition(
taskExecutorId,
partition);
}
}
void stopTrackingAndReleasePartitions() {
LOG.info("Discarding the results produced by task execution {}.", attemptId);
if (producedPartitions != null && producedPartitions.size() > 0) {
final PartitionTracker partitionTracker = getVertex().getExecutionGraph().getPartitionTracker();
final List<ResultPartitionID> producedPartitionIds = producedPartitions.values().stream()
.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
.map(ShuffleDescriptor::getResultPartitionID)
.collect(Collectors.toList());
partitionTracker.stopTrackingAndReleasePartitions(producedPartitionIds);
}
}
/**
* Update the partition infos on the assigned resource.
*
* @param partitionInfos for the remote task
*/
private void sendUpdatePartitionInfoRpcCall(
final Iterable<PartitionInfo> partitionInfos) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation();
CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout);
updatePartitionsResultFuture.whenCompleteAsync(
(ack, failure) -> {
// fail if there was a failure
if (failure != null) {
fail(new IllegalStateException("Update task on TaskManager " + taskManagerLocation +
" failed due to:", failure));
}
}, getVertex().getExecutionGraph().getJobMasterMainThreadExecutor());
}
}
/**
* Releases the assigned resource and completes the release future
* once the assigned resource has been successfully released.
*
* @param cause for the resource release, null if none
*/
private void releaseAssignedResource(@Nullable Throwable cause) {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
if (slot != null) {
ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
slot.releaseSlot(cause)
.whenComplete((Object ignored, Throwable throwable) -> {
jobMasterMainThreadExecutor.assertRunningInMainThread();
if (throwable != null) {
releaseFuture.completeExceptionally(throwable);
} else {
releaseFuture.complete(null);
}
});
} else {
// no assigned resource --> we can directly complete the release future
releaseFuture.complete(null);
}
}
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
/**
* Calculates the preferred locations based on the location preference constraint.
*
* @param locationPreferenceConstraint constraint for the location preference
* @return Future containing the collection of preferred locations. This might not be completed if not all inputs
* have been a resource assigned.
*/
@VisibleForTesting
public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) {
final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations();
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture;
switch(locationPreferenceConstraint) {
case ALL:
preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures);
break;
case ANY:
final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size());
for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) {
if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) {
final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null);
if (taskManagerLocation == null) {
throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug.");
}
completedTaskManagerLocations.add(taskManagerLocation);
}
}
preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations);
break;
default:
throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.');
}
return preferredLocationsFuture;
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState) {
return transitionState(currentState, targetState, null);
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) {
// sanity check
if (currentState.isTerminal()) {
throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + '.');
}
if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
markTimestamp(targetState);
if (error == null) {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState);
} else {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState, error);
}
if (targetState.isTerminal()) {
// complete the terminal state future
terminalStateFuture.complete(targetState);
}
// make sure that the state transition completes normally.
// potential errors (in listeners may not affect the main logic)
try {
vertex.notifyStateTransition(this, targetState, error);
}
catch (Throwable t) {
LOG.error("Error while notifying execution graph of execution state transition.", t);
}
return true;
} else {
return false;
}
}
private void markTimestamp(ExecutionState state) {
markTimestamp(state, System.currentTimeMillis());
}
private void markTimestamp(ExecutionState state, long timestamp) {
this.stateTimestamps[state.ordinal()] = timestamp;
}
public String getVertexWithAttempt() {
return vertex.getTaskNameWithSubtaskIndex() + " - execution #" + attemptNumber;
}
// ------------------------------------------------------------------------
// Accumulators
// ------------------------------------------------------------------------
/**
* Update accumulators (discarded when the Execution has already been terminated).
* @param userAccumulators the user accumulators
*/
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
synchronized (accumulatorLock) {
if (!state.isTerminal()) {
this.userAccumulators = userAccumulators;
}
}
}
public Map<String, Accumulator<?, ?>> getUserAccumulators() {
return userAccumulators;
}
@Override
public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulators =
userAccumulators == null ?
null :
userAccumulators.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> OptionalFailure.of(entry.getValue())));
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulators);
}
@Override
public int getParallelSubtaskIndex() {
return getVertex().getParallelSubtaskIndex();
}
@Override
public IOMetrics getIOMetrics() {
return ioMetrics;
}
private void updateAccumulatorsAndMetrics(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
if (userAccumulators != null) {
synchronized (accumulatorLock) {
this.userAccumulators = userAccumulators;
}
}
if (metrics != null) {
this.ioMetrics = metrics;
}
}
// ------------------------------------------------------------------------
// Standard utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
final LogicalSlot slot = assignedResource;
return String.format("Attempt #%d (%s) @ %s - [%s]", attemptNumber, vertex.getTaskNameWithSubtaskIndex(),
(slot == null ? "(unassigned)" : slot), state);
}
@Override
public ArchivedExecution archive() {
return new ArchivedExecution(this);
}
private void assertRunningInJobMasterMainThread() {
vertex.getExecutionGraph().assertRunningInJobMasterMainThread();
}
}
| shaoxuan-wang/flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java | Java | apache-2.0 | 59,232 |
package water;
import java.io.*;
import java.lang.reflect.Array;
import java.net.*;
import java.nio.*;
import java.nio.channels.*;
import java.util.ArrayList;
import java.util.Random;
import water.network.SocketChannelUtils;
import water.util.Log;
import water.util.StringUtils;
import water.util.TwoDimTable;
/** A ByteBuffer backed mixed Input/Output streaming class, using Iced serialization.
*
* Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it
* we go to the ByteChannel for more/less. Because DirectByteBuffers are
* expensive to make, we keep a few pooled.
*
* When talking to a remote H2O node, switches between UDP and TCP transport
* protocols depending on the message size. The TypeMap is not included, and
* is assumed to exist on the remote H2O node.
*
* Supports direct NIO FileChannel read/write to disk, used during user-mode
* swapping. The TypeMap is not included on write, and is assumed to be the
* current map on read.
*
* Support read/write from byte[] - and this defeats the purpose of a
* Streaming protocol, but is frequently handy for small structures. The
* TypeMap is not included, and is assumed to be the current map on read.
*
* Supports read/write from a standard Stream, which by default assumes it is
* NOT going in and out of the same Cloud, so the TypeMap IS included. The
* serialized object can only be read back into the same minor version of H2O.
*
* @author <a href="mailto:cliffc@h2o.ai"></a>
*/
public final class AutoBuffer {
// Maximum size of an array we allow to allocate (the value is designed
// to mimic the behavior of OpenJDK libraries)
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// The direct ByteBuffer for schlorping data about.
// Set to null to indicate the AutoBuffer is closed.
ByteBuffer _bb;
public String sourceName = "???";
public boolean isClosed() { return _bb == null ; }
// The ByteChannel for moving data in or out. Could be a SocketChannel (for
// a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel
// (for a UDP connection). Null on closed AutoBuffers. Null on initial
// remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null
// for open AutoBuffers doing file i/o or reading any TCP/UDP or having
// written at least one buffer to TCP/UDP.
private Channel _chan;
// A Stream for moving data in. Null unless this AutoBuffer is
// stream-based, in which case _chan field is null. This path supports
// persistance: reading and writing objects from different H2O cluster
// instances (but exactly the same H2O revision). The only required
// similarity is same-classes-same-fields; changes here will probably
// silently crash. If the fields are named the same but the semantics
// differ, then again the behavior is probably silent crash.
private InputStream _is;
private short[] _typeMap; // Mapping from input stream map to current map, or null
// If we need a SocketChannel, raise the priority so we get the I/O over
// with. Do not want to have some TCP socket open, blocking the TCP channel
// and then have the thread stalled out. If we raise the priority - be sure
// to lower it again. Note this is for TCP channels ONLY, and only because
// we are blocking another Node with I/O.
private int _oldPrior = -1;
// Where to send or receive data via TCP or UDP (choice made as we discover
// how big the message is); used to lazily create a Channel. If NULL, then
// _chan should be a pre-existing Channel, such as a FileChannel.
final H2ONode _h2o;
// TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround.
private boolean _read;
// TRUE if this AutoBuffer has never advanced past the first "page" of data.
// The UDP-flavor, port# and task fields are only valid until we read over
// them when flipping the ByteBuffer to the next chunk of data. Used in
// asserts all over the place.
private boolean _firstPage;
// Total size written out from 'new' to 'close'. Only updated when actually
// reading or writing data, or after close(). For profiling only.
int _size;
//int _zeros, _arys;
// More profiling: start->close msec, plus nano's spent in blocking I/O
// calls. The difference between (close-start) and i/o msec is the time the
// i/o thread spends doing other stuff (e.g. allocating Java objects or
// (de)serializing).
long _time_start_ms, _time_close_ms, _time_io_ns;
// I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time.
final byte _persist;
// The assumed max UDP packetsize
static final int MTU = 1500-8/*UDP packet header size*/;
// Enable this to test random TCP fails on open or write
static final Random RANDOM_TCP_DROP = null; //new Random();
static final java.nio.charset.Charset UTF_8 = java.nio.charset.Charset.forName("UTF-8");
/** Incoming UDP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read. */
AutoBuffer( DatagramChannel sock ) throws IOException {
_chan = null;
_bb = BBP_SML.make(); // Get a small / UDP-sized ByteBuffer
_read = true; // Reading by default
_firstPage = true;
// Read a packet; can get H2ONode from 'sad'?
Inet4Address addr = null;
SocketAddress sad = sock.receive(_bb);
if( sad instanceof InetSocketAddress ) {
InetAddress address = ((InetSocketAddress) sad).getAddress();
if( address instanceof Inet4Address ) {
addr = (Inet4Address) address;
}
}
_size = _bb.position();
_bb.flip(); // Set limit=amount read, and position==0
if( addr == null ) throw new RuntimeException("Unhandled socket type: " + sad);
// Read Inet from socket, port from the stream, figure out H2ONode
_h2o = H2ONode.intern(addr, getPort());
_firstPage = true;
assert _h2o != null;
_persist = 0; // No persistance
}
/** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read.
*
* remoteAddress set to null means that the communication is originating from non-h2o node, non-null value
* represents the case where the communication is coming from h2o node.
* */
AutoBuffer( ByteChannel sock, InetAddress remoteAddress ) throws IOException {
_chan = sock;
raisePriority(); // Make TCP priority high
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_bb.flip();
_read = true; // Reading by default
_firstPage = true;
// Read Inet from socket, port from the stream, figure out H2ONode
if(remoteAddress!=null) {
_h2o = H2ONode.intern(remoteAddress, getPort());
}else{
// In case the communication originates from non-h2o node, we set _h2o node to null.
// It is done for 2 reasons:
// - H2ONode.intern creates a new thread and if there's a lot of connections
// from non-h2o environment, it could end up with too many open files exception.
// - H2OIntern also reads port (getPort()) and additional information which we do not send
// in communication originating from non-h2o nodes
_h2o = null;
}
_firstPage = true; // Yes, must reset this.
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
}
/** Make an AutoBuffer to write to an H2ONode. Requests for full buffer will
* open a TCP socket and roll through writing to the target. Smaller
* requests will send via UDP. Small requests get ordered by priority, so
* that e.g. NACK and ACKACK messages have priority over most anything else.
* This helps in UDP floods to shut down flooding senders. */
private byte _msg_priority;
AutoBuffer( H2ONode h2o, byte priority ) {
// If UDP goes via UDP, we write into a DBB up front - because we plan on
// sending it out via a Datagram socket send call. If UDP goes via batched
// TCP, we write into a HBB up front, because this will be copied again
// into a large outgoing buffer.
_bb = H2O.ARGS.useUDP // Actually use UDP?
? BBP_SML.make() // Make DirectByteBuffers to start with
: ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null; // Channel made lazily only if we write alot
_h2o = h2o;
_read = false; // Writing by default
_firstPage = true; // Filling first page
assert _h2o != null;
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
_msg_priority = priority;
}
/** Spill-to/from-disk request. */
public AutoBuffer( FileChannel fc, boolean read, byte persist ) {
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_chan = fc; // Write to read/write
_h2o = null; // File Channels never have an _h2o
_read = read; // Mostly assert reading vs writing
if( read ) _bb.flip();
_time_start_ms = System.currentTimeMillis();
_persist = persist; // One of Value.ICE, NFS, S3, HDFS
}
/** Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O. */
AutoBuffer( DatagramPacket pack ) {
_size = pack.getLength();
_bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder());
_bb.position(0);
_read = true;
_firstPage = true;
_chan = null;
_h2o = H2ONode.intern(pack.getAddress(), getPort());
_persist = 0; // No persistance
}
/** Read from a UDP_TCP buffer; could be in the middle of a large buffer */
AutoBuffer( H2ONode h2o, byte[] buf, int off, int len ) {
assert buf != null : "null fed to ByteBuffer.wrap";
_h2o = h2o;
_bb = ByteBuffer.wrap(buf,off,len).order(ByteOrder.nativeOrder());
_chan = null;
_read = true;
_firstPage = true;
_persist = 0; // No persistance
_size = len;
}
/** Read from a fixed byte[]; should not be closed. */
public AutoBuffer( byte[] buf ) { this(null,buf,0, buf.length); }
/** Write to an ever-expanding byte[]. Instead of calling {@link #close()},
* call {@link #buf()} to retrieve the final byte[]. */
public AutoBuffer( ) {
_bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a known sized byte[]. Instead of calling close(), call
* {@link #bufClose()} to retrieve the final byte[]. */
public AutoBuffer( int len ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a persistent Stream, including all TypeMap info to allow later
* reloading (by the same exact rev of H2O). */
public AutoBuffer( OutputStream os, boolean persist ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_read = false;
_chan = Channels.newChannel(os);
_h2o = null;
_firstPage = true;
_persist = 0;
if( persist ) put1(0x1C).put1(0xED).putStr(H2O.ABV.projectVersion()).putAStr(TypeMap.CLAZZES);
else put1(0);
}
/** Read from a persistent Stream (including all TypeMap info) into same
* exact rev of H2O). */
public AutoBuffer( InputStream is ) {
_chan = null;
_h2o = null;
_firstPage = true;
_persist = 0;
_read = true;
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_bb.flip();
_is = is;
int b = get1U();
if( b==0 ) return; // No persistence info
int magic = get1U();
if( b!=0x1C || magic != 0xED ) throw new IllegalArgumentException("Missing magic number 0x1CED at stream start");
String version = getStr();
if( !version.equals(H2O.ABV.projectVersion()) )
throw new IllegalArgumentException("Found version "+version+", but running version "+H2O.ABV.projectVersion());
String[] typeMap = getAStr();
_typeMap = new short[typeMap.length];
for( int i=0; i<typeMap.length; i++ )
_typeMap[i] = (short)(typeMap[i]==null ? 0 : TypeMap.onIce(typeMap[i]));
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[AB ").append(_read ? "read " : "write ");
sb.append(_firstPage?"first ":"2nd ").append(_h2o);
sb.append(" ").append(Value.nameOfPersist(_persist));
if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit());
if( _bb != null ) sb.append(" <= ").append(_bb.capacity());
return sb.append("]").toString();
}
// Fetch a DBB from an object pool... they are fairly expensive to make
// because a native call is required to get the backing memory. I've
// included BB count tracking code to help track leaks. As of 12/17/2012 the
// leaks are under control, but figure this may happen again so keeping these
// counters around.
//
// We use 2 pool sizes: lots of small UDP packet-sized buffers and fewer
// larger TCP-sized buffers.
private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks");
private static long HWM=0;
static class BBPool {
long _made, _cached, _freed;
long _numer, _denom, _goal=4*H2O.NUMCPUS, _lastGoal;
final ArrayList<ByteBuffer> _bbs = new ArrayList<>();
final int _size; // Big or small size of ByteBuffers
BBPool( int sz) { _size=sz; }
private ByteBuffer stats( ByteBuffer bb ) {
if( !DEBUG ) return bb;
if( ((_made+_cached)&255)!=255 ) return bb; // Filter printing to 1 in 256
long now = System.currentTimeMillis();
if( now < HWM ) return bb;
HWM = now+1000;
water.util.SB sb = new water.util.SB();
sb.p("BB").p(this==BBP_BIG?1:0).p(" made=").p(_made).p(" -freed=").p(_freed).p(", cache hit=").p(_cached).p(" ratio=").p(_numer/_denom).p(", goal=").p(_goal).p(" cache size=").p(_bbs.size()).nl();
for( int i=0; i<H2O.MAX_PRIORITY; i++ ) {
int x = H2O.getWrkQueueSize(i);
if( x > 0 ) sb.p('Q').p(i).p('=').p(x).p(' ');
}
Log.warn(sb.nl().toString());
return bb;
}
ByteBuffer make() {
while( true ) { // Repeat loop for DBB OutOfMemory errors
ByteBuffer bb=null;
synchronized(_bbs) {
int sz = _bbs.size();
if( sz > 0 ) { bb = _bbs.remove(sz-1); _cached++; _numer++; }
}
if( bb != null ) return stats(bb);
// Cache empty; go get one from C/Native memory
try {
bb = ByteBuffer.allocateDirect(_size).order(ByteOrder.nativeOrder());
synchronized(this) { _made++; _denom++; _goal = Math.max(_goal,_made-_freed); _lastGoal=System.nanoTime(); } // Goal was too low, raise it
return stats(bb);
} catch( OutOfMemoryError oome ) {
// java.lang.OutOfMemoryError: Direct buffer memory
if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome;
System.out.println("OOM DBB - Sleeping & retrying");
try { Thread.sleep(100); } catch( InterruptedException ignore ) { }
}
}
}
void free(ByteBuffer bb) {
// Heuristic: keep the ratio of BB's made to cache-hits at a fixed level.
// Free to GC if ratio is high, free to internal cache if low.
long ratio = _numer/(_denom+1);
synchronized(_bbs) {
if( ratio < 100 || _bbs.size() < _goal ) { // low hit/miss ratio or below goal
bb.clear(); // Clear-before-add
_bbs.add(bb);
} else _freed++; // Toss the extras (above goal & ratio)
long now = System.nanoTime();
if( now-_lastGoal > 1000000000L ) { // Once/sec, drop goal by 10%
_lastGoal = now;
if( ratio > 110 ) // If ratio is really high, lower goal
_goal=Math.max(4*H2O.NUMCPUS,(long)(_goal*0.99));
// Once/sec, lower numer/denom... means more recent activity outweighs really old stuff
long denom = (long) (0.99 * _denom); // Proposed reduction
if( denom > 10 ) { // Keep a little precision
_numer = (long) (0.99 * _numer); // Keep ratio between made & cached the same
_denom = denom; // ... by lowering both by 10%
}
}
}
}
static int FREE( ByteBuffer bb ) {
if(bb.isDirect())
(bb.capacity()==BBP_BIG._size ? BBP_BIG : BBP_SML).free(bb);
return 0; // Flow coding
}
}
static BBPool BBP_SML = new BBPool( 2*1024); // Bytebuffer "common small size", for UDP
static BBPool BBP_BIG = new BBPool(64*1024); // Bytebuffer "common big size", for TCP
public static int TCP_BUF_SIZ = BBP_BIG._size;
private int bbFree() {
if(_bb != null && _bb.isDirect())
BBPool.FREE(_bb);
_bb = null;
return 0; // Flow-coding
}
// You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the
// time under heavy network load. Connection-reset-by-peer & connection
// timeouts abound, even after a socket open and after a 1st successful
// ByteBuffer write. It *appears* that the reader is unaware that a writer
// was told "go ahead and write" by the TCP stack, so all these fails are
// only on the writer-side.
public static class AutoBufferException extends RuntimeException {
public final IOException _ioe;
AutoBufferException( IOException ioe ) { _ioe = ioe; }
}
// For reads, just assert all was read and close and release resources.
// (release ByteBuffer back to the common pool). For writes, force any final
// bytes out. If the write is to an H2ONode and is short, send via UDP.
// AutoBuffer close calls order; i.e. a reader close() will block until the
// writer does a close().
public final int close() {
//if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys);
if( isClosed() ) return 0; // Already closed
assert _h2o != null || _chan != null || _is != null; // Byte-array backed should not be closed
try {
if( _chan == null ) { // No channel?
if( _read ) {
if( _is != null ) _is.close();
return 0;
} else { // Write
// For small-packet write, send via UDP. Since nothing is sent until
// now, this close() call trivially orders - since the reader will not
// even start (much less close()) until this packet is sent.
if( _bb.position() < MTU) return udpSend();
// oops - Big Write, switch to TCP and finish out there
}
}
// Force AutoBuffer 'close' calls to order; i.e. block readers until
// writers do a 'close' - by writing 1 more byte in the close-call which
// the reader will have to wait for.
if( hasTCP()) { // TCP connection?
try {
if( _read ) { // Reader?
int x = get1U(); // Read 1 more byte
assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this;
assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP
// Write the reader-handshake-byte.
SocketChannelUtils.underlyingSocketChannel(_chan).socket().getOutputStream().write(0xcd);
// do not close actually reader socket; recycle it in TCPReader thread
} else { // Writer?
put1(0xab); // Write one-more byte ; might set _chan from null to not-null
sendPartial(); // Finish partial writes; might set _chan from null to not-null
assert _chan != null; // _chan is set not-null now!
// Read the writer-handshake-byte.
int x = SocketChannelUtils.underlyingSocketChannel(_chan).socket().getInputStream().read();
// either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled).
if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read");
assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x;
}
} catch( IOException ioe ) {
try { _chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now, since i/o error
throw ioe; // Rethrow after close
} finally {
if( !_read ) _h2o.freeTCPSocket((ByteChannel) _chan); // Recycle writable TCP channel
restorePriority(); // And if we raised priority, lower it back
}
} else { // FileChannel
if( !_read ) sendPartial(); // Finish partial file-system writes
_chan.close();
_chan = null; // Closed file channel
}
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
throw new AutoBufferException(e);
} finally {
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
return 0;
}
// Need a sock for a big read or write operation.
// See if we got one already, else open a new socket.
private void tcpOpen() throws IOException {
assert _firstPage && _bb.limit() >= 1+2+4; // At least something written
assert _chan == null;
// assert _bb.position()==0;
_chan = _h2o.getTCPSocket();
raisePriority();
}
// Just close the channel here without reading anything. Without the task
// object at hand we do not know what (how many bytes) should we read from
// the channel. And since the other side will try to read confirmation from
// us before closing the channel, we can not read till the end. So we just
// close the channel and let the other side to deal with it and figure out
// the task has been cancelled (still sending ack ack back).
void drainClose() {
if( isClosed() ) return; // Already closed
final Channel chan = _chan; // Read before closing
assert _h2o != null || chan != null; // Byte-array backed should not be closed
if( chan != null ) { // Channel assumed sick from prior IOException
try { chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now!
if( !_read && SocketChannelUtils.isSocketChannel(chan)) _h2o.freeTCPSocket((ByteChannel) chan); // Recycle writable TCP channel
}
restorePriority(); // And if we raised priority, lower it back
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
// True if we opened a TCP channel, or will open one to close-and-send
boolean hasTCP() { assert !isClosed(); return SocketChannelUtils.isSocketChannel(_chan) || (_h2o!=null && _bb.position() >= MTU); }
// Size in bytes sent, after a close()
int size() { return _size; }
//int zeros() { return _zeros; }
public int position () { return _bb.position(); }
public AutoBuffer position(int p) {_bb.position(p); return this;}
/** Skip over some bytes in the byte buffer. Caller is responsible for not
* reading off end of the bytebuffer; generally this is easy for
* array-backed autobuffers and difficult for i/o-backed bytebuffers. */
public void skip(int skip) { _bb.position(_bb.position()+skip); }
// Return byte[] from a writable AutoBuffer
public final byte[] buf() {
assert _h2o==null && _chan==null && !_read && !_bb.isDirect();
return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position());
}
public final byte[] bufClose() {
byte[] res = _bb.array();
bbFree();
return res;
}
// For TCP sockets ONLY, raise the thread priority. We assume we are
// blocking other Nodes with our network I/O, so try to get the I/O
// over with.
private void raisePriority() {
if(_oldPrior == -1){
assert SocketChannelUtils.isSocketChannel(_chan);
_oldPrior = Thread.currentThread().getPriority();
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
}
}
private void restorePriority() {
if( _oldPrior == -1 ) return;
Thread.currentThread().setPriority(_oldPrior);
_oldPrior = -1;
}
// Send via UDP socket. Unlike eg TCP sockets, we only need one for sending
// so we keep a global one. Also, we do not close it when done, and we do
// not connect it up-front to a target - but send the entire packet right now.
private int udpSend() throws IOException {
assert _chan == null;
TimeLine.record_send(this,false);
_size = _bb.position();
assert _size < AutoBuffer.BBP_SML._size;
_bb.flip(); // Flip for sending
if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal
water.init.NetworkInit.multicast(_bb, _msg_priority);
} else { // Else single-cast send
if(H2O.ARGS.useUDP) // Send via UDP directly
water.init.NetworkInit.CLOUD_DGRAM.send(_bb, _h2o._key);
else // Send via bulk TCP
_h2o.sendMessage(_bb, _msg_priority);
}
return 0; // Flow-coding
}
// Flip to write-mode
AutoBuffer clearForWriting(byte priority) {
assert _read;
_read = false;
_msg_priority = priority;
_bb.clear();
_firstPage = true;
return this;
}
// Flip to read-mode
public AutoBuffer flipForReading() {
assert !_read;
_read = true;
_bb.flip();
_firstPage = true;
return this;
}
/** Ensure the buffer has space for sz more bytes */
private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; }
/** Ensure buffer has at least sz bytes in it.
* - Also, set position just past this limit for future reading. */
private ByteBuffer getSz(int sz) {
assert _firstPage : "getSz() is only valid for early UDP bytes";
if( sz > _bb.limit() ) getImpl(sz);
_bb.position(sz);
return _bb;
}
private ByteBuffer getImpl( int sz ) {
assert _read : "Reading from a buffer in write mode";
_bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading
// Its got to fit or we asked for too much
assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")";
long ns = System.nanoTime();
while( _bb.position() < sz ) { // Read until we got enuf
try {
int res = readAnInt(); // Read more
// Readers are supposed to be strongly typed and read the exact expected bytes.
// However, if a TCP connection fails mid-read we'll get a short-read.
// This is indistinguishable from a mis-alignment between the writer and reader!
if( res <= 0 )
throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this));
if( _is != null ) _bb.position(_bb.position()+res); // Advance BB for Streams manually
_size += res; // What we read
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
// Linux/Ubuntu message for a reset-channel
if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") )
throw new AutoBufferException(e);
// Windows message for a reset-channel
if( e.getMessage().equals("An established connection was aborted by the software in your host machine") )
throw new AutoBufferException(e);
throw Log.throwErr(e);
}
}
_time_io_ns += (System.nanoTime()-ns);
_bb.flip(); // Prep for handing out bytes
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
_firstPage = false; // First page of data is gone gone gone
return _bb;
}
private int readAnInt() throws IOException {
if (_is == null) return ((ReadableByteChannel) _chan).read(_bb);
final byte[] array = _bb.array();
final int position = _bb.position();
final int remaining = _bb.remaining();
try {
return _is.read(array, position, remaining);
} catch (IOException ioe) {
throw new IOException("Failed reading " + remaining + " bytes into buffer[" + array.length + "] at " + position + " from " + sourceName + " " + _is, ioe);
}
}
/** Put as needed to keep from overflowing the ByteBuffer. */
private ByteBuffer putSp( int sz ) {
assert !_read;
if (sz > _bb.remaining()) {
if ((_h2o == null && _chan == null) || (_bb.hasArray() && _bb.capacity() < BBP_BIG._size))
expandByteBuffer(sz);
else
sendPartial();
assert sz <= _bb.remaining();
}
return _bb;
}
// Do something with partial results, because the ByteBuffer is full.
// If we are doing I/O, ship the bytes we have now and flip the ByteBuffer.
private ByteBuffer sendPartial() {
// Doing I/O with the full ByteBuffer - ship partial results
_size += _bb.position();
if( _chan == null )
TimeLine.record_send(this, true);
_bb.flip(); // Prep for writing.
try {
if( _chan == null )
tcpOpen(); // This is a big operation. Open a TCP socket as-needed.
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
long ns = System.nanoTime();
while( _bb.hasRemaining() ) {
((WritableByteChannel) _chan).write(_bb);
if( RANDOM_TCP_DROP != null && SocketChannelUtils.isSocketChannel(_chan) && RANDOM_TCP_DROP.nextInt(100) == 0 )
throw new IOException("Random TCP Write Fail");
}
_time_io_ns += (System.nanoTime()-ns);
} catch( IOException e ) { // Some kind of TCP fail?
// Change to an unchecked exception (so we don't have to annotate every
// frick'n put1/put2/put4/read/write call). Retry & recovery happens at
// a higher level. AutoBuffers are used for many things including e.g.
// disk i/o & UDP writes; this exception only happens on a failed TCP
// write - and we don't want to make the other AutoBuffer users have to
// declare (and then ignore) this exception.
throw new AutoBufferException(e);
}
_firstPage = false;
_bb.clear();
return _bb;
}
// Called when the byte buffer doesn't have enough room
// If buffer is array backed, and the needed room is small,
// increase the size of the backing array,
// otherwise dump into a large direct buffer
private ByteBuffer expandByteBuffer(int sizeHint) {
final long needed = (long) sizeHint - _bb.remaining() + _bb.capacity(); // Max needed is 2G
if ((_h2o==null && _chan == null) || (_bb.hasArray() && needed < MTU)) {
if (needed > MAX_ARRAY_SIZE) {
throw new IllegalArgumentException("Cannot allocate more than 2GB array: sizeHint="+sizeHint+", "
+ "needed="+needed
+ ", bb.remaining()=" + _bb.remaining() + ", bb.capacity()="+_bb.capacity());
}
byte[] ary = _bb.array();
// just get twice what is currently needed but not more then max array size (2G)
// Be careful not to overflow because of integer math!
int newLen = (int) Math.min(1L << (water.util.MathUtils.log2(needed)+1), MAX_ARRAY_SIZE);
int oldpos = _bb.position();
_bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newLen),oldpos,newLen-oldpos)
.order(ByteOrder.nativeOrder());
} else if (_bb.capacity() != BBP_BIG._size) { //avoid expanding existing BBP items
int oldPos = _bb.position();
_bb.flip();
_bb = BBP_BIG.make().put(_bb);
_bb.position(oldPos);
}
return _bb;
}
@SuppressWarnings("unused") public String getStr(int off, int len) {
return new String(_bb.array(), _bb.arrayOffset()+off, len, UTF_8);
}
// -----------------------------------------------
// Utility functions to get various Java primitives
@SuppressWarnings("unused") public boolean getZ() { return get1()!=0; }
@SuppressWarnings("unused") public byte get1 () { return getSp(1).get (); }
@SuppressWarnings("unused") public int get1U() { return get1() & 0xFF; }
@SuppressWarnings("unused") public char get2 () { return getSp(2).getChar (); }
@SuppressWarnings("unused") public short get2s () { return getSp(2).getShort (); }
@SuppressWarnings("unused") public int get3 () { getSp(3); return get1U() | get1U() << 8 | get1U() << 16; }
@SuppressWarnings("unused") public int get4 () { return getSp(4).getInt (); }
@SuppressWarnings("unused") public float get4f() { return getSp(4).getFloat (); }
@SuppressWarnings("unused") public long get8 () { return getSp(8).getLong (); }
@SuppressWarnings("unused") public double get8d() { return getSp(8).getDouble(); }
int get1U(int off) { return _bb.get (off)&0xFF; }
int get4 (int off) { return _bb.getInt (off); }
long get8 (int off) { return _bb.getLong(off); }
@SuppressWarnings("unused") public AutoBuffer putZ (boolean b){ return put1(b?1:0); }
@SuppressWarnings("unused") public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte";
putSp(1).put((byte)b); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; }
@SuppressWarnings("unused") public AutoBuffer put2s ( short s) { return put2(s); }
@SuppressWarnings("unused") public AutoBuffer put3( int x ) { assert (-1<<24) <= x && x < (1<<24);
return put1((x)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16); }
@SuppressWarnings("unused") public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; }
@SuppressWarnings("unused") public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; }
@SuppressWarnings("unused") public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; }
@SuppressWarnings("unused") public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; }
public AutoBuffer put(Freezable f) {
if( f == null ) return putInt(TypeMap.NULL);
assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName();
putInt(f.frozenType());
return f.write(this);
}
public <T extends Freezable> T get() {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
return (T)TypeMap.newFreezable(id).read(this);
}
public <T extends Freezable> T get(Class<T> tc) {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
assert tc.isInstance(TypeMap.theFreezable(id)):tc.getName() + " != " + TypeMap.theFreezable(id).getClass().getName() + ", id = " + id;
return (T)TypeMap.newFreezable(id).read(this);
}
// Write Key's target IFF the Key is not null; target can be null.
public AutoBuffer putKey(Key k) {
if( k==null ) return this; // Key is null ==> write nothing
Keyed kd = DKV.getGet(k);
put(kd);
return kd == null ? this : kd.writeAll_impl(this);
}
public Keyed getKey(Key k, Futures fs) {
return k==null ? null : getKey(fs); // Key is null ==> read nothing
}
public Keyed getKey(Futures fs) {
Keyed kd = get(Keyed.class);
if( kd == null ) return null;
DKV.put(kd,fs);
return kd.readAll_impl(this,fs);
}
// Put a (compressed) integer. Specifically values in the range -1 to ~250
// will take 1 byte, values near a Short will take 1+2 bytes, values near an
// Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is
// optimized for small integers (including -1 which is often used as a "array
// is null" flag when passing the array length).
public AutoBuffer putInt(int x) {
if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1);
if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x);
return put1(254).put4(x);
}
// Get a (compressed) integer. See above for the compression strategy and reasoning.
int getInt( ) {
int x = get1U();
if( x <= 253 ) return x-1;
if( x==255 ) return (short)get2();
assert x==254;
return get4();
}
// Put a zero-compressed array. Compression is:
// If null : putInt(-1)
// Else
// putInt(# of leading nulls)
// putInt(# of non-nulls)
// If # of non-nulls is > 0, putInt( # of trailing nulls)
long putZA( Object[] A ) {
if( A==null ) { putInt(-1); return 0; }
int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break;
int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break;
putInt(x); // Leading zeros to skip
putInt(y-x); // Mixed non-zero guts in middle
if( y > x ) // If any trailing nulls
putInt(A.length-y); // Trailing zeros
return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros
}
// Get the lengths of a zero-compressed array.
// Returns -1 if null.
// Returns a long of (leading zeros | middle non-zeros).
// If there are non-zeros, caller has to read the trailing zero-length.
long getZA( ) {
int x=getInt(); // Length of leading zeros
if( x == -1 ) return -1; // or a null
int nz=getInt(); // Non-zero in the middle
return ((long)x<<32)|(long)nz; // Return both ints
}
// TODO: untested. . .
@SuppressWarnings("unused")
public AutoBuffer putAEnum(Enum[] enums) {
//_arys++;
long xy = putZA(enums);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putEnum(enums[i]);
return this;
}
@SuppressWarnings("unused")
public <E extends Enum> E[] getAEnum(E[] values) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
E[] ts = (E[]) Array.newInstance(values.getClass().getComponentType(), x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getEnum(values);
return ts;
}
@SuppressWarnings("unused")
public AutoBuffer putA(Freezable[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) put(fs[i]);
return this;
}
public AutoBuffer putAA(Freezable[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAA(Freezable[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA(fs[i]);
return this;
}
public <T extends Freezable> T[] getA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = get(tc);
return ts;
}
public <T extends Freezable> T[][] getAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass();
T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getA(tc);
return ts;
}
@SuppressWarnings("unused") public <T extends Freezable> T[][][] getAAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass();
Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass();
T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc);
return ts;
}
public AutoBuffer putAStr(String[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putStr(fs[i]);
return this;
}
public String[] getAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[] ts = new String[x+y+z];
for( int i = x; i < x+y; ++i ) ts[i] = getStr();
return ts;
}
@SuppressWarnings("unused") public AutoBuffer putAAStr(String[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAStr(fs[i]);
return this;
}
@SuppressWarnings("unused") public String[][] getAAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[][] ts = new String[x+y+z][];
for( int i = x; i < x+y; ++i ) ts[i] = getAStr();
return ts;
}
// Read the smaller of _bb.remaining() and len into buf.
// Return bytes read, which could be zero.
int read( byte[] buf, int off, int len ) {
int sz = Math.min(_bb.remaining(),len);
_bb.get(buf,off,sz);
return sz;
}
// -----------------------------------------------
// Utility functions to handle common UDP packet tasks.
// Get the 1st control byte
int getCtrl( ) { return getSz(1).get(0)&0xFF; }
// Get the port in next 2 bytes
int getPort( ) { return getSz(1+2).getChar(1); }
// Get the task# in the next 4 bytes
int getTask( ) { return getSz(1+2+4).getInt(1+2); }
// Get the flag in the next 1 byte
int getFlag( ) { return getSz(1+2+4+1).get(1+2+4); }
// Set the ctrl, port, task. Ready to write more bytes afterwards
AutoBuffer putUdp (UDP.udp type) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2);
_bb.put ((byte)type.ordinal());
_bb.putChar((char)H2O.H2O_PORT ); // Outgoing port is always the sender's (me) port
return this;
}
AutoBuffer putTask(UDP.udp type, int tasknum) {
return putUdp(type).put4(tasknum);
}
AutoBuffer putTask(int ctrl, int tasknum) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2+4);
_bb.put((byte)ctrl).putChar((char)H2O.H2O_PORT).putInt(tasknum);
return this;
}
// -----------------------------------------------
// Utility functions to read & write arrays
public boolean[] getAZ() {
int len = getInt();
if (len == -1) return null;
boolean[] r = new boolean[len];
for (int i=0;i<len;++i) r[i] = getZ();
return r;
}
public byte[] getA1( ) {
//_arys++;
int len = getInt();
return len == -1 ? null : getA1(len);
}
public byte[] getA1( int len ) {
byte[] buf = MemoryManager.malloc1(len);
int sofar = 0;
while( sofar < len ) {
int more = Math.min(_bb.remaining(), len - sofar);
_bb.get(buf, sofar, more);
sofar += more;
if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar));
}
return buf;
}
public short[] getA2( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
short[] buf = MemoryManager.malloc2(len);
int sofar = 0;
while( sofar < buf.length ) {
ShortBuffer as = _bb.asShortBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*2);
if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2));
}
return buf;
}
public int[] getA4( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
int[] buf = MemoryManager.malloc4(len);
int sofar = 0;
while( sofar < buf.length ) {
IntBuffer as = _bb.asIntBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public float[] getA4f( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
float[] buf = MemoryManager.malloc4f(len);
int sofar = 0;
while( sofar < buf.length ) {
FloatBuffer as = _bb.asFloatBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public long[] getA8( ) {
//_arys++;
// Get the lengths of lead & trailing zero sections, and the non-zero
// middle section.
int x = getInt(); if( x == -1 ) return null;
int y = getInt(); // Non-zero in the middle
int z = y==0 ? 0 : getInt();// Trailing zeros
long[] buf = MemoryManager.malloc8(x+y+z);
switch( get1U() ) { // 1,2,4 or 8 for how the middle section is passed
case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1U(); return buf;
case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf;
case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf;
case 8: break;
default: throw H2O.fail();
}
int sofar = x;
while( sofar < x+y ) {
LongBuffer as = _bb.asLongBuffer();
int more = Math.min(as.remaining(), x+y - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8));
}
return buf;
}
public double[] getA8d( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
double[] buf = MemoryManager.malloc8d(len);
int sofar = 0;
while( sofar < len ) {
DoubleBuffer as = _bb.asDoubleBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8));
}
return buf;
}
@SuppressWarnings("unused")
public byte[][] getAA1( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
byte[][] ary = new byte[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA1();
return ary;
}
@SuppressWarnings("unused")
public short[][] getAA2( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
short[][] ary = new short[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA2();
return ary;
}
public int[][] getAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][] ary = new int[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4();
return ary;
}
@SuppressWarnings("unused") public float[][] getAA4f( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
float[][] ary = new float[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4f();
return ary;
}
public long[][] getAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][] ary = new long[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8();
return ary;
}
@SuppressWarnings("unused") public double[][] getAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][] ary = new double[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8d();
return ary;
}
@SuppressWarnings("unused") public int[][][] getAAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][][] ary = new int[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA4();
return ary;
}
@SuppressWarnings("unused") public long[][][] getAAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][][] ary = new long[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8();
return ary;
}
public double[][][] getAAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][][] ary = new double[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8d();
return ary;
}
public String getStr( ) {
int len = getInt();
return len == -1 ? null : new String(getA1(len), UTF_8);
}
public <E extends Enum> E getEnum(E[] values ) {
int idx = get1();
return idx == -1 ? null : values[idx];
}
public AutoBuffer putAZ( boolean[] ary ) {
if( ary == null ) return putInt(-1);
putInt(ary.length);
for (boolean anAry : ary) putZ(anAry);
return this;
}
public AutoBuffer putA1( byte[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
return putA1(ary,ary.length);
}
public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); }
public AutoBuffer putA1( byte[] ary, int sofar, int length ) {
if (length - sofar > _bb.remaining()) expandByteBuffer(length-sofar);
while( sofar < length ) {
int len = Math.min(length - sofar, _bb.remaining());
_bb.put(ary, sofar, len);
sofar += len;
if( sofar < length ) sendPartial();
}
return this;
}
AutoBuffer putA2( short[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*2 > _bb.remaining()) expandByteBuffer(ary.length*2);
int sofar = 0;
while( sofar < ary.length ) {
ShortBuffer sb = _bb.asShortBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*2);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA4( int[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
// Note: based on Brandon commit this should improve performance during parse (7d950d622ee3037555ecbab0e39404f8f0917652)
if (ary.length*4 > _bb.remaining()) {
expandByteBuffer(ary.length*4); // Try to expand BB buffer to fit input array
}
int sofar = 0;
while( sofar < ary.length ) {
IntBuffer ib = _bb.asIntBuffer();
int len = Math.min(ary.length - sofar, ib.remaining());
ib.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + ib.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8( long[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
// Trim leading & trailing zeros. Pass along the length of leading &
// trailing zero sections, and the non-zero section in the middle.
int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break;
int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break;
int nzlen = y-x;
putInt(x);
putInt(nzlen);
if( nzlen > 0 ) // If any trailing nulls
putInt(ary.length-y); // Trailing zeros
// Size trim the NZ section: pass as bytes or shorts if possible.
long min=Long.MAX_VALUE, max=Long.MIN_VALUE;
for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; }
if( 0 <= min && max < 256 ) { // Ship as unsigned bytes
put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]);
return this;
}
if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts
put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]);
return this;
}
if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints
put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]);
return this;
}
put1(8); // Ship as full longs
int sofar = x;
if ((y-sofar)*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
while( sofar < y ) {
LongBuffer lb = _bb.asLongBuffer();
int len = Math.min(y - sofar, lb.remaining());
lb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + lb.position() * 8);
if( sofar < y ) sendPartial();
}
return this;
}
public AutoBuffer putA4f( float[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*4 > _bb.remaining()) expandByteBuffer(ary.length*4);
int sofar = 0;
while( sofar < ary.length ) {
FloatBuffer fb = _bb.asFloatBuffer();
int len = Math.min(ary.length - sofar, fb.remaining());
fb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + fb.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8d( double[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
int sofar = 0;
while( sofar < ary.length ) {
DoubleBuffer db = _bb.asDoubleBuffer();
int len = Math.min(ary.length - sofar, db.remaining());
db.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + db.position()*8);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putAA1( byte[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA1(ary[i]);
return this;
}
@SuppressWarnings("unused") AutoBuffer putAA2( short[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA2(ary[i]);
return this;
}
public AutoBuffer putAA4( int[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4(ary[i]);
return this;
}
@SuppressWarnings("unused")
public AutoBuffer putAA4f( float[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4f(ary[i]);
return this;
}
public AutoBuffer putAA8( long[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8(ary[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAA8d( double[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8d(ary[i]);
return this;
}
public AutoBuffer putAAA4( int[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA4(ary[i]);
return this;
}
public AutoBuffer putAAA8( long[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8(ary[i]);
return this;
}
public AutoBuffer putAAA8d( double[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8d(ary[i]);
return this;
}
// Put a String as bytes (not chars!)
public AutoBuffer putStr( String s ) {
if( s==null ) return putInt(-1);
return putA1(StringUtils.bytesOf(s));
}
@SuppressWarnings("unused") public AutoBuffer putEnum( Enum x ) {
return put1(x==null ? -1 : x.ordinal());
}
public static byte[] javaSerializeWritePojo(Object o) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream out = null;
try {
out = new ObjectOutputStream(bos);
out.writeObject(o);
out.close();
return bos.toByteArray();
} catch (IOException e) {
throw Log.throwErr(e);
}
}
public static Object javaSerializeReadPojo(byte [] bytes) {
try {
final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
Object o = ois.readObject();
return o;
} catch (IOException e) {
String className = nameOfClass(bytes);
throw Log.throwErr(new RuntimeException("Failed to deserialize " + className, e));
} catch (ClassNotFoundException e) {
throw Log.throwErr(e);
}
}
static String nameOfClass(byte[] bytes) {
if (bytes == null) return "(null)";
if (bytes.length < 11) return "(no name)";
int nameSize = Math.min(40, Math.max(3, bytes[7]));
return new String(bytes, 8, Math.min(nameSize, bytes.length - 8));
}
// ==========================================================================
// Java Serializable objects
// Note: These are heck-a-lot more expensive than their Freezable equivalents.
@SuppressWarnings("unused") public AutoBuffer putSer( Object obj ) {
if (obj == null) return putA1(null);
return putA1(javaSerializeWritePojo(obj));
}
@SuppressWarnings("unused") public AutoBuffer putASer(Object[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putSer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAASer(Object[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAASer(Object[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public Object getSer() {
byte[] ba = getA1();
return ba == null ? null : javaSerializeReadPojo(ba);
}
@SuppressWarnings("unused") public <T> T getSer(Class<T> tc) {
return (T)getSer();
}
@SuppressWarnings("unused") public <T> T[] getASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getSer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][] getAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][] ts = (T[][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getASer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][][] getAAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][][] ts = (T[][][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAASer(tc);
return ts;
}
// ==========================================================================
// JSON AutoBuffer printers
public AutoBuffer putJNULL( ) { return put1('n').put1('u').put1('l').put1('l'); }
// Escaped JSON string
private AutoBuffer putJStr( String s ) {
byte[] b = StringUtils.bytesOf(s);
int off=0;
for( int i=0; i<b.length; i++ ) {
if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes
putA1(b,off,i); // Everything so far (no backslashes)
put1('\\'); // The extra backslash
off=i; // Advance the "so far" variable
}
// Handle remaining special cases in JSON
// if( b[i] == '/' ) { putA1(b,off,i); put1('\\'); put1('/'); off=i+1; continue;}
if( b[i] == '\b' ) { putA1(b,off,i); put1('\\'); put1('b'); off=i+1; continue;}
if( b[i] == '\f' ) { putA1(b,off,i); put1('\\'); put1('f'); off=i+1; continue;}
if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; continue;}
if( b[i] == '\r' ) { putA1(b,off,i); put1('\\'); put1('r'); off=i+1; continue;}
if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; continue;}
// ASCII Control characters
if( b[i] == 127 ) { putA1(b,off,i); put1('\\'); put1('u'); put1('0'); put1('0'); put1('7'); put1('f'); off=i+1; continue;}
if( b[i] >= 0 && b[i] < 32 ) {
String hexStr = Integer.toHexString(b[i]);
putA1(b, off, i); put1('\\'); put1('u');
for (int j = 0; j < 4 - hexStr.length(); j++) put1('0');
for (int j = 0; j < hexStr.length(); j++) put1(hexStr.charAt(hexStr.length()-j-1));
off=i+1;
}
}
return putA1(b,off,b.length);
}
public AutoBuffer putJSONStrUnquoted ( String s ) { return s==null ? putJNULL() : putJStr(s); }
public AutoBuffer putJSONStrUnquoted ( String name, String s ) { return s==null ? putJSONStr(name).put1(':').putJNULL() : putJSONStr(name).put1(':').putJStr(s); }
public AutoBuffer putJSONName( String s ) { return put1('"').putJStr(s).put1('"'); }
public AutoBuffer putJSONStr ( String s ) { return s==null ? putJNULL() : putJSONName(s); }
public AutoBuffer putJSONAStr(String[] ss) {
if( ss == null ) return putJNULL();
put1('[');
for( int i=0; i<ss.length; i++ ) {
if( i>0 ) put1(',');
putJSONStr(ss[i]);
}
return put1(']');
}
private AutoBuffer putJSONAAStr( String[][] sss) {
if( sss == null ) return putJNULL();
put1('[');
for( int i=0; i<sss.length; i++ ) {
if( i>0 ) put1(',');
putJSONAStr(sss[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSONStr (String name, String s ) { return putJSONStr(name).put1(':').putJSONStr(s); }
@SuppressWarnings("unused") public AutoBuffer putJSONAStr (String name, String[] ss ) { return putJSONStr(name).put1(':').putJSONAStr(ss); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAStr(String name, String[][]sss) { return putJSONStr(name).put1(':').putJSONAAStr(sss); }
@SuppressWarnings("unused") public AutoBuffer putJSONSer (String name, Object o ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONASer (String name, Object[] oo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAASer (String name, Object[][] ooo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAASer(String name, Object[][][] oooo) { return putJSONStr(name).put1(':').putJNULL(); }
public AutoBuffer putJSONAZ( String name, boolean[] f) { return putJSONStr(name).put1(':').putJSONAZ(f); }
public AutoBuffer putJSON(Freezable ice) { return ice == null ? putJNULL() : ice.writeJSON(this); }
public AutoBuffer putJSONA( Freezable fs[] ) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSON(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA( Freezable fs[][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONA(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAA( Freezable fs[][][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA(fs[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON ( String name, Freezable f ) { return putJSONStr(name).put1(':').putJSON (f); }
public AutoBuffer putJSONA ( String name, Freezable f[] ) { return putJSONStr(name).put1(':').putJSONA (f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA( String name, Freezable f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAA( String name, Freezable f[][][]){ return putJSONStr(name).put1(':').putJSONAAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONZ( String name, boolean value ) { return putJSONStr(name).put1(':').putJStr("" + value); }
private AutoBuffer putJSONAZ(boolean [] b) {
if (b == null) return putJNULL();
put1('[');
for( int i = 0; i < b.length; ++i) {
if (i > 0) put1(',');
putJStr(""+b[i]);
}
return put1(']');
}
// Most simple integers
private AutoBuffer putJInt( int i ) {
byte b[] = StringUtils.toBytes(i);
return putA1(b,b.length);
}
public AutoBuffer putJSON1( byte b ) { return putJInt(b); }
public AutoBuffer putJSONA1( byte ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON1(ary[i]);
}
return put1(']');
}
private AutoBuffer putJSONAA1(byte ary[][]) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA1(ary[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON1 (String name, byte b ) { return putJSONStr(name).put1(':').putJSON1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONA1 (String name, byte b[] ) { return putJSONStr(name).put1(':').putJSONA1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA1(String name, byte b[][]) { return putJSONStr(name).put1(':').putJSONAA1(b); }
public AutoBuffer putJSONAEnum(String name, Enum[] enums) {
return putJSONStr(name).put1(':').putJSONAEnum(enums);
}
public AutoBuffer putJSONAEnum( Enum[] enums ) {
if( enums == null ) return putJNULL();
put1('[');
for( int i=0; i<enums.length; i++ ) {
if( i>0 ) put1(',');
putJSONEnum(enums[i]);
}
return put1(']');
}
AutoBuffer putJSON2( char c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, char c ) { return putJSONStr(name).put1(':').putJSON2(c); }
AutoBuffer putJSON2( short c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, short c ) { return putJSONStr(name).put1(':').putJSON2(c); }
public AutoBuffer putJSONA2( String name, short ary[] ) { return putJSONStr(name).put1(':').putJSONA2(ary); }
AutoBuffer putJSONA2( short ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON2(ary[i]);
}
return put1(']');
}
AutoBuffer putJSON8 ( long l ) { return putJStr(Long.toString(l)); }
AutoBuffer putJSONA8( long ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAA8( long ary[][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8( long ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONEnum( Enum e ) {
return e==null ? putJNULL() : put1('"').putJStr(e.toString()).put1('"');
}
public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); }
public AutoBuffer putJSONEnum( String name, Enum e ) { return putJSONStr(name).put1(':').putJSONEnum(e); }
public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); }
public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); }
public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); }
public AutoBuffer putJSON4(int i) { return putJStr(Integer.toString(i)); }
AutoBuffer putJSONA4( int[] a) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAA4( int[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA4( int[][][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA4(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); }
public AutoBuffer putJSONA4( String name, int[] a) { return putJSONStr(name).put1(':').putJSONA4(a); }
public AutoBuffer putJSONAA4( String name, int[][] a ) { return putJSONStr(name).put1(':').putJSONAA4(a); }
public AutoBuffer putJSONAAA4( String name, int[][][] a ) { return putJSONStr(name).put1(':').putJSONAAA4(a); }
AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putJStr(Float .toString(f)))); }
public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); }
AutoBuffer putJSONA4f( float[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4f(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA4f(String name, float[] a) {
putJSONStr(name).put1(':');
return putJSONA4f(a);
}
AutoBuffer putJSONAA4f(String name, float[][] a) {
putJSONStr(name).put1(':');
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4f(a[i]);
}
return put1(']');
}
AutoBuffer putJSON8d( double d ) {
if (TwoDimTable.isEmpty(d)) return putJNULL();
return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putJStr(Double.toString(d))));
}
public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); }
public AutoBuffer putJSONA8d( String name, double[] a ) {
return putJSONStr(name).put1(':').putJSONA8d(a);
}
public AutoBuffer putJSONAA8d( String name, double[][] a) {
return putJSONStr(name).put1(':').putJSONAA8d(a);
}
public AutoBuffer putJSONAAA8d( String name, double[][][] a) { return putJSONStr(name).put1(':').putJSONAAA8d(a); }
public AutoBuffer putJSONA8d( double[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON8d(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA8d( double[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8d(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8d( double ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8d(ary[i]);
}
return put1(']');
}
static final String JSON_NAN = "NaN";
static final String JSON_POS_INF = "Infinity";
static final String JSON_NEG_INF = "-Infinity";
}
| mathemage/h2o-3 | h2o-core/src/main/java/water/AutoBuffer.java | Java | apache-2.0 | 74,621 |
/*
* Copyright (c) 2010-2013 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.model.impl.lens;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import javax.xml.namespace.QName;
import com.evolveum.midpoint.prism.*;
import com.evolveum.midpoint.schema.DeltaConvertor;
import com.evolveum.midpoint.schema.result.OperationResult;
import com.evolveum.midpoint.util.exception.*;
import com.evolveum.midpoint.xml.ns._public.model.model_context_3.LensProjectionContextType;
import org.apache.commons.lang.StringUtils;
import org.jvnet.jaxb2_commons.lang.Validate;
import com.evolveum.midpoint.common.crypto.CryptoUtil;
import com.evolveum.midpoint.common.refinery.RefinedObjectClassDefinition;
import com.evolveum.midpoint.common.refinery.RefinedResourceSchema;
import com.evolveum.midpoint.common.refinery.ResourceShadowDiscriminator;
import com.evolveum.midpoint.model.api.context.ModelProjectionContext;
import com.evolveum.midpoint.model.api.context.SynchronizationPolicyDecision;
import com.evolveum.midpoint.prism.delta.ChangeType;
import com.evolveum.midpoint.prism.delta.DeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ObjectDelta;
import com.evolveum.midpoint.prism.delta.PrismValueDeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ReferenceDelta;
import com.evolveum.midpoint.prism.path.ItemPath;
import com.evolveum.midpoint.schema.processor.ResourceAttribute;
import com.evolveum.midpoint.schema.processor.ResourceSchema;
import com.evolveum.midpoint.schema.util.MiscSchemaUtil;
import com.evolveum.midpoint.schema.util.ShadowUtil;
import com.evolveum.midpoint.schema.util.ResourceTypeUtil;
import com.evolveum.midpoint.schema.util.SchemaDebugUtil;
import com.evolveum.midpoint.util.Cloner;
import com.evolveum.midpoint.util.DebugUtil;
import com.evolveum.midpoint.xml.ns._public.common.common_3.AssignmentPolicyEnforcementType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.FocusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.LayerType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultStatusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ProjectionPolicyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDefinitionType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDependencyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowAssociationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowDiscriminatorType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowKindType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.SynchronizationSituationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ValuePolicyType;
/**
* @author semancik
*
*/
public class LensProjectionContext extends LensElementContext<ShadowType> implements ModelProjectionContext {
private ObjectDelta<ShadowType> syncDelta;
/**
* If set to true: absolute state of this projection was detected by the synchronization.
* This is mostly for debugging and visibility. It is not used by projection logic.
*/
private boolean syncAbsoluteTrigger = false;
/**
* The wave in which this resource should be processed. Initial value of -1 means "undetermined".
*/
private int wave = -1;
/**
* Indicates that the wave computation is still in progress.
*/
private transient boolean waveIncomplete = false;
/**
* Definition of account type.
*/
private ResourceShadowDiscriminator resourceShadowDiscriminator;
private boolean fullShadow = false;
/**
* True if the account is "legal" (assigned to the user). It may be false for accounts that are either
* found to be illegal by live sync, were unassigned from user, etc.
* If set to null the situation is not yet known. Null is a typical value when the context is constructed.
*/
private boolean isAssigned;
/**
* True if the account should be part of the synchronization. E.g. outbound expression should be applied to it.
*/
private boolean isActive;
/**
* True if there is a valid assignment for this projection and/or the policy allows such project to exist.
*/
private Boolean isLegal = null;
private Boolean isLegalOld = null;
private boolean isExists;
/**
* Decision regarding the account. It indicated what the engine has DECIDED TO DO with the context.
* If set to null no decision was made yet. Null is also a typical value when the context is created.
*/
private SynchronizationPolicyDecision synchronizationPolicyDecision;
/**
* True if we want to reconcile account in this context.
*/
private boolean doReconciliation;
/**
* Synchronization situation as it was originally detected by the synchronization code (SynchronizationService).
* This is mostly for debug purposes. Projector and clockwork do not need to care about this.
* The synchronization intent is used instead.
*/
private SynchronizationSituationType synchronizationSituationDetected = null;
/**
* Synchronization situation which was the result of synchronization reaction (projector and clockwork run).
* This is mostly for debug purposes. Projector and clockwork do not care about this (except for setting it).
* The synchronization decision is used instead.
*/
private SynchronizationSituationType synchronizationSituationResolved = null;
/**
* Delta set triple for accounts. Specifies which accounts should be added, removed or stay as they are.
* It tells almost nothing about attributes directly although the information about attributes are inside
* each account construction (in a form of ValueConstruction that contains attribute delta triples).
*
* Intermediary computation result. It is stored to allow re-computing of account constructions during
* iterative computations.
*/
private transient PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple;
private transient Construction outboundConstruction;
private transient Collection<ResourceObjectTypeDependencyType> dependencies = null;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations;
private ValuePolicyType accountPasswordPolicy;
/**
* Resource that hosts this projection.
*/
transient private ResourceType resource;
LensProjectionContext(LensContext<? extends ObjectType> lensContext, ResourceShadowDiscriminator resourceAccountType) {
super(ShadowType.class, lensContext);
this.resourceShadowDiscriminator = resourceAccountType;
this.isAssigned = false;
}
public ObjectDelta<ShadowType> getSyncDelta() {
return syncDelta;
}
public void setSyncDelta(ObjectDelta<ShadowType> syncDelta) {
this.syncDelta = syncDelta;
}
public boolean isSyncAbsoluteTrigger() {
return syncAbsoluteTrigger;
}
public void setSyncAbsoluteTrigger(boolean syncAbsoluteTrigger) {
this.syncAbsoluteTrigger = syncAbsoluteTrigger;
}
public int getWave() {
return wave;
}
public void setWave(int wave) {
this.wave = wave;
}
public boolean isWaveIncomplete() {
return waveIncomplete;
}
public void setWaveIncomplete(boolean waveIncomplete) {
this.waveIncomplete = waveIncomplete;
}
public boolean isDoReconciliation() {
return doReconciliation;
}
public void setDoReconciliation(boolean doReconciliation) {
this.doReconciliation = doReconciliation;
}
public ResourceShadowDiscriminator getResourceShadowDiscriminator() {
return resourceShadowDiscriminator;
}
public void setResourceShadowDiscriminator(ResourceShadowDiscriminator resourceShadowDiscriminator) {
this.resourceShadowDiscriminator = resourceShadowDiscriminator;
}
public boolean compareResourceShadowDiscriminator(ResourceShadowDiscriminator rsd, boolean compareOrder) {
Validate.notNull(rsd.getResourceOid());
if (resourceShadowDiscriminator == null) {
// This may be valid case e.g. in case of broken contexts or if a context is just loading
return false;
}
if (!rsd.getResourceOid().equals(resourceShadowDiscriminator.getResourceOid())) {
return false;
}
if (!rsd.getKind().equals(resourceShadowDiscriminator.getKind())) {
return false;
}
if (rsd.isThombstone() != resourceShadowDiscriminator.isThombstone()) {
return false;
}
if (rsd.getIntent() == null) {
try {
if (!getRefinedAccountDefinition().isDefaultInAKind()) {
return false;
}
} catch (SchemaException e) {
throw new SystemException("Internal error: "+e.getMessage(), e);
}
} else if (!rsd.getIntent().equals(resourceShadowDiscriminator.getIntent())) {
return false;
}
if (compareOrder && rsd.getOrder() != resourceShadowDiscriminator.getOrder()) {
return false;
}
return true;
}
public boolean isThombstone() {
if (resourceShadowDiscriminator == null) {
return false;
}
return resourceShadowDiscriminator.isThombstone();
}
public void addAccountSyncDelta(ObjectDelta<ShadowType> delta) throws SchemaException {
if (syncDelta == null) {
syncDelta = delta;
} else {
syncDelta.merge(delta);
}
}
public boolean isAdd() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isAdd();
}
public boolean isModify() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.KEEP) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isModify();
}
public boolean isDelete() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.DELETE) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
if (syncDelta != null && syncDelta.isDelete()) {
return true;
}
return super.isDelete();
}
public ResourceType getResource() {
return resource;
}
public void setResource(ResourceType resource) {
this.resource = resource;
}
public boolean isAssigned() {
return isAssigned;
}
public void setAssigned(boolean isAssigned) {
this.isAssigned = isAssigned;
}
public boolean isActive() {
return isActive;
}
public void setActive(boolean isActive) {
this.isActive = isActive;
}
public Boolean isLegal() {
return isLegal;
}
public void setLegal(Boolean isLegal) {
this.isLegal = isLegal;
}
public Boolean isLegalOld() {
return isLegalOld;
}
public void setLegalOld(Boolean isLegalOld) {
this.isLegalOld = isLegalOld;
}
public boolean isExists() {
return isExists;
}
public void setExists(boolean exists) {
this.isExists = exists;
}
public SynchronizationPolicyDecision getSynchronizationPolicyDecision() {
return synchronizationPolicyDecision;
}
public void setSynchronizationPolicyDecision(SynchronizationPolicyDecision policyDecision) {
this.synchronizationPolicyDecision = policyDecision;
}
public SynchronizationSituationType getSynchronizationSituationDetected() {
return synchronizationSituationDetected;
}
public void setSynchronizationSituationDetected(
SynchronizationSituationType synchronizationSituationDetected) {
this.synchronizationSituationDetected = synchronizationSituationDetected;
}
public SynchronizationSituationType getSynchronizationSituationResolved() {
return synchronizationSituationResolved;
}
public void setSynchronizationSituationResolved(
SynchronizationSituationType synchronizationSituationResolved) {
this.synchronizationSituationResolved = synchronizationSituationResolved;
}
public boolean isFullShadow() {
return fullShadow;
}
/**
* Returns true if full shadow is available, either loaded or in a create delta.
*/
public boolean hasFullShadow() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
}
return isFullShadow();
}
public void setFullShadow(boolean fullShadow) {
this.fullShadow = fullShadow;
}
public ShadowKindType getKind() {
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
return discr.getKind();
}
if (getObjectOld()!=null) {
return getObjectOld().asObjectable().getKind();
}
if (getObjectCurrent()!=null) {
return getObjectCurrent().asObjectable().getKind();
}
if (getObjectNew()!=null) {
return getObjectNew().asObjectable().getKind();
}
return ShadowKindType.ACCOUNT;
}
public PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> getConstructionDeltaSetTriple() {
return constructionDeltaSetTriple;
}
public void setConstructionDeltaSetTriple(
PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple) {
this.constructionDeltaSetTriple = constructionDeltaSetTriple;
}
public Construction getOutboundConstruction() {
return outboundConstruction;
}
public void setOutboundConstruction(Construction outboundConstruction) {
this.outboundConstruction = outboundConstruction;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> getSqueezedAttributes() {
return squeezedAttributes;
}
public void setSqueezedAttributes(Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes) {
this.squeezedAttributes = squeezedAttributes;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> getSqueezedAssociations() {
return squeezedAssociations;
}
public void setSqueezedAssociations(
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations) {
this.squeezedAssociations = squeezedAssociations;
}
public ResourceObjectTypeDefinitionType getResourceObjectTypeDefinitionType() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return null;
}
ResourceObjectTypeDefinitionType def = ResourceTypeUtil.getResourceObjectTypeDefinitionType(
resource, getResourceShadowDiscriminator().getKind(), resourceShadowDiscriminator.getIntent());
return def;
}
private ResourceSchema getResourceSchema() throws SchemaException {
return RefinedResourceSchema.getResourceSchema(resource, getNotNullPrismContext());
}
public RefinedResourceSchema getRefinedResourceSchema() throws SchemaException {
if (resource == null) {
return null;
}
return RefinedResourceSchema.getRefinedSchema(resource, LayerType.MODEL, getNotNullPrismContext());
}
public RefinedObjectClassDefinition getRefinedAccountDefinition() throws SchemaException {
RefinedResourceSchema refinedSchema = getRefinedResourceSchema();
if (refinedSchema == null) {
return null;
}
return refinedSchema.getRefinedDefinition(getResourceShadowDiscriminator().getKind(), getResourceShadowDiscriminator().getIntent());
}
public Collection<ResourceObjectTypeDependencyType> getDependencies() {
if (dependencies == null) {
ResourceObjectTypeDefinitionType resourceAccountTypeDefinitionType = getResourceObjectTypeDefinitionType();
if (resourceAccountTypeDefinitionType == null) {
// No dependencies. But we cannot set null as that means "unknown". So let's set empty collection instead.
dependencies = new ArrayList<ResourceObjectTypeDependencyType>();
} else {
dependencies = resourceAccountTypeDefinitionType.getDependency();
}
}
return dependencies;
}
public ValuePolicyType getAccountPasswordPolicy() {
return accountPasswordPolicy;
}
public void setAccountPasswordPolicy(ValuePolicyType accountPasswordPolicy) {
this.accountPasswordPolicy = accountPasswordPolicy;
}
public ValuePolicyType getEffectivePasswordPolicy() {
if (accountPasswordPolicy != null) {
return accountPasswordPolicy;
}
if (getLensContext().getFocusContext().getOrgPasswordPolicy() != null){
return getLensContext().getFocusContext().getOrgPasswordPolicy();
}
return getLensContext().getGlobalPasswordPolicy();
}
public AssignmentPolicyEnforcementType getAssignmentPolicyEnforcementType() {
// TODO: per-resource assignment enforcement
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
AssignmentPolicyEnforcementType globalAssignmentPolicyEnforcement = MiscSchemaUtil.getAssignmentPolicyEnforcementType(globalAccountSynchronizationSettings);
return globalAssignmentPolicyEnforcement;
}
public boolean isLegalize(){
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
if (globalAccountSynchronizationSettings == null){
return false;
}
if (globalAccountSynchronizationSettings.isLegalize() == null){
return false;
}
return globalAccountSynchronizationSettings.isLegalize();
}
/**
* Recomputes the new state of account (accountNew). It is computed by applying deltas to the old state (accountOld).
* Assuming that oldAccount is already set (or is null if it does not exist)
*/
public void recompute() throws SchemaException {
ObjectDelta<ShadowType> accDelta = getDelta();
PrismObject<ShadowType> base = getObjectCurrent();
if (base == null) {
base = getObjectOld();
}
ObjectDelta<ShadowType> syncDelta = getSyncDelta();
if (base == null && syncDelta != null
&& ChangeType.ADD.equals(syncDelta.getChangeType())) {
PrismObject<ShadowType> objectToAdd = syncDelta.getObjectToAdd();
if (objectToAdd != null) {
PrismObjectDefinition<ShadowType> objectDefinition = objectToAdd.getDefinition();
// TODO: remove constructor, use some factory method instead
base = new PrismObject<ShadowType>(objectToAdd.getElementName(), objectDefinition, getNotNullPrismContext());
base = syncDelta.computeChangedObject(base);
}
}
if (accDelta == null) {
// No change
setObjectNew(base);
return;
}
if (base == null && accDelta.isModify()) {
RefinedObjectClassDefinition rAccountDef = getRefinedAccountDefinition();
if (rAccountDef != null) {
base = (PrismObject<ShadowType>) rAccountDef.createBlankShadow();
}
}
setObjectNew(accDelta.computeChangedObject(base));
}
public void clearIntermediateResults() {
constructionDeltaSetTriple = null;
outboundConstruction = null;
squeezedAttributes = null;
}
/**
* Distribute the resource that's in the context into all the prism objects (old, new) and deltas.
* The resourceRef will not just contain the OID but also full resource object. This may optimize handling
* of the objects in upper layers (e.g. GUI).
*/
public void distributeResource() {
ResourceType resourceType = getResource();
if (resourceType == null) {
return;
}
PrismObject<ResourceType> resource = resourceType.asPrismObject();
distributeResourceObject(getObjectOld(), resource);
distributeResourceObject(getObjectCurrent(), resource);
distributeResourceObject(getObjectNew(), resource);
distributeResourceDelta(getPrimaryDelta(), resource);
distributeResourceDelta(getSecondaryDelta(), resource);
}
private void distributeResourceObject(PrismObject<ShadowType> object, PrismObject<ResourceType> resource) {
if (object == null) {
return;
}
PrismReference resourceRef = object.findReference(ShadowType.F_RESOURCE_REF);
if (resourceRef != null) {
distributeResourceValues(resourceRef.getValues(), resource);
}
}
private void distributeResourceValue(PrismReferenceValue resourceRefVal, PrismObject<ResourceType> resource) {
if (resourceRefVal != null) {
resourceRefVal.setObject(resource);
}
}
private void distributeResourceDelta(ObjectDelta<ShadowType> delta, PrismObject<ResourceType> resource) {
if (delta == null) {
return;
}
if (delta.isAdd()) {
distributeResourceObject(delta.getObjectToAdd(), resource);
} else if (delta.isModify()) {
ReferenceDelta referenceDelta = delta.findReferenceModification(ShadowType.F_RESOURCE_REF);
if (referenceDelta != null) {
distributeResourceValues(referenceDelta.getValuesToAdd(), resource);
distributeResourceValues(referenceDelta.getValuesToDelete(), resource);
distributeResourceValues(referenceDelta.getValuesToReplace(), resource);
}
} // Nothing to do for DELETE delta
}
private void distributeResourceValues(Collection<PrismReferenceValue> values, PrismObject<ResourceType> resource) {
if (values == null) {
return;
}
for(PrismReferenceValue pval: values) {
distributeResourceValue(pval, resource);
}
}
/**
* Returns delta suitable for execution. The primary and secondary deltas may not make complete sense all by themselves.
* E.g. they may both be MODIFY deltas even in case that the account should be created. The deltas begin to make sense
* only if combined with sync decision. This method provides the deltas all combined and ready for execution.
*/
public ObjectDelta<ShadowType> getExecutableDelta() throws SchemaException {
SynchronizationPolicyDecision policyDecision = getSynchronizationPolicyDecision();
ObjectDelta<ShadowType> origDelta = getDelta();
if (policyDecision == SynchronizationPolicyDecision.ADD) {
if (origDelta == null || origDelta.isModify()) {
// We need to convert modify delta to ADD
ObjectDelta<ShadowType> addDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.ADD, getPrismContext());
RefinedObjectClassDefinition rAccount = getRefinedAccountDefinition();
if (rAccount == null) {
throw new IllegalStateException("Definition for account type " + getResourceShadowDiscriminator()
+ " not found in the context, but it should be there");
}
PrismObject<ShadowType> newAccount = (PrismObject<ShadowType>) rAccount.createBlankShadow();
addDelta.setObjectToAdd(newAccount);
if (origDelta != null) {
addDelta.merge(origDelta);
}
return addDelta;
}
} else if (policyDecision == SynchronizationPolicyDecision.KEEP) {
// Any delta is OK
} else if (policyDecision == SynchronizationPolicyDecision.DELETE) {
ObjectDelta<ShadowType> deleteDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.DELETE, getPrismContext());
String oid = getOid();
if (oid == null) {
throw new IllegalStateException(
"Internal error: account context OID is null during attempt to create delete secondary delta; context="
+this);
}
deleteDelta.setOid(oid);
return deleteDelta;
} else {
// This is either UNLINK or null, both are in fact the same as KEEP
// Any delta is OK
}
return origDelta;
}
public void checkConsistence() {
checkConsistence(null, true, false);
}
public void checkConsistence(String contextDesc, boolean fresh, boolean force) {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
// No not check these. they may be quite wild.
return;
}
super.checkConsistence(contextDesc);
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return;
}
if (fresh && !force) {
if (resource == null) {
throw new IllegalStateException("Null resource in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
if (resourceShadowDiscriminator == null) {
throw new IllegalStateException("Null resource account type in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
}
if (syncDelta != null) {
try {
syncDelta.checkConsistence(true, true, true);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
} catch (IllegalStateException e) {
throw new IllegalStateException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
}
}
}
protected boolean isRequireSecondardyDeltaOid() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
return false;
}
if (getResourceShadowDiscriminator() != null && getResourceShadowDiscriminator().getOrder() > 0) {
// These may not have the OID yet
return false;
}
return super.isRequireSecondardyDeltaOid();
}
@Override
public void cleanup() {
super.cleanup();
synchronizationPolicyDecision = null;
// isLegal = null;
// isLegalOld = null;
isAssigned = false;
isActive = false;
}
@Override
public void normalize() {
super.normalize();
if (syncDelta != null) {
syncDelta.normalize();
}
}
@Override
public void reset() {
super.reset();
wave = -1;
fullShadow = false;
isAssigned = false;
isActive = false;
synchronizationPolicyDecision = null;
constructionDeltaSetTriple = null;
outboundConstruction = null;
dependencies = null;
squeezedAttributes = null;
accountPasswordPolicy = null;
}
@Override
public void adopt(PrismContext prismContext) throws SchemaException {
super.adopt(prismContext);
if (syncDelta != null) {
prismContext.adopt(syncDelta);
}
}
@Override
public LensProjectionContext clone(LensContext<? extends ObjectType> lensContext) {
LensProjectionContext clone = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
copyValues(clone, lensContext);
return clone;
}
protected void copyValues(LensProjectionContext clone, LensContext<? extends ObjectType> lensContext) {
super.copyValues(clone, lensContext);
// do NOT clone transient values such as accountConstructionDeltaSetTriple
// these are not meant to be cloned and they are also not directly clonnable
clone.dependencies = this.dependencies;
clone.doReconciliation = this.doReconciliation;
clone.fullShadow = this.fullShadow;
clone.isAssigned = this.isAssigned;
clone.outboundConstruction = this.outboundConstruction;
clone.synchronizationPolicyDecision = this.synchronizationPolicyDecision;
clone.resource = this.resource;
clone.resourceShadowDiscriminator = this.resourceShadowDiscriminator;
clone.squeezedAttributes = cloneSqueezedAttributes();
if (this.syncDelta != null) {
clone.syncDelta = this.syncDelta.clone();
}
clone.wave = this.wave;
}
private Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> cloneSqueezedAttributes() {
if (squeezedAttributes == null) {
return null;
}
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> clonedMap
= new HashMap<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>>();
Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>> cloner = new Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>>() {
@Override
public ItemValueWithOrigin<PrismPropertyValue<?>> clone(ItemValueWithOrigin<PrismPropertyValue<?>> original) {
return original.clone();
}
};
for (Entry<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> entry: squeezedAttributes.entrySet()) {
clonedMap.put(entry.getKey(), entry.getValue().clone(cloner));
}
return clonedMap;
}
/**
* Returns true if the projection has any value for specified attribute.
*/
public boolean hasValueForAttribute(QName attributeName) throws SchemaException {
ItemPath attrPath = new ItemPath(ShadowType.F_ATTRIBUTES, attributeName);
if (getObjectNew() != null) {
PrismProperty<?> attrNew = getObjectNew().findProperty(attrPath);
if (attrNew != null && !attrNew.isEmpty()) {
return true;
}
}
return false;
}
private boolean hasValueForAttribute(QName attributeName, Collection<PrismPropertyValue<Construction>> acPpvSet) {
if (acPpvSet == null) {
return false;
}
for (PrismPropertyValue<Construction> acPpv: acPpvSet) {
Construction ac = acPpv.getValue();
if (ac.hasValueForAttribute(attributeName)) {
return true;
}
}
return false;
}
public AccountOperation getOperation() {
if (isAdd()) {
return AccountOperation.ADD;
}
if (isDelete()) {
return AccountOperation.DELETE;
}
return AccountOperation.MODIFY;
}
@Override
public void checkEncrypted() {
super.checkEncrypted();
if (syncDelta != null) {
CryptoUtil.checkEncrypted(syncDelta);
}
}
public String getHumanReadableName() {
StringBuilder sb = new StringBuilder();
sb.append("account(");
String humanReadableAccountIdentifier = getHumanReadableIdentifier();
if (StringUtils.isEmpty(humanReadableAccountIdentifier)) {
sb.append("no ID");
} else {
sb.append("ID ");
sb.append(humanReadableAccountIdentifier);
}
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
sb.append(", type '");
sb.append(discr.getIntent());
sb.append("', ");
if (discr.getOrder() != 0) {
sb.append("order ").append(discr.getOrder()).append(", ");
}
} else {
sb.append(" (no discriminator) ");
}
sb.append(getResource());
sb.append(")");
return sb.toString();
}
private String getHumanReadableIdentifier() {
PrismObject<ShadowType> object = getObjectNew();
if (object == null) {
object = getObjectOld();
}
if (object == null) {
object = getObjectCurrent();
}
if (object == null) {
return null;
}
if (object.canRepresent(ShadowType.class)) {
PrismObject<ShadowType> shadow = (PrismObject<ShadowType>)object;
Collection<ResourceAttribute<?>> identifiers = ShadowUtil.getIdentifiers(shadow);
if (identifiers == null) {
return null;
}
StringBuilder sb = new StringBuilder();
Iterator<ResourceAttribute<?>> iterator = identifiers.iterator();
while (iterator.hasNext()) {
ResourceAttribute<?> id = iterator.next();
sb.append(id.toHumanReadableString());
if (iterator.hasNext()) {
sb.append(",");
}
}
return sb.toString();
} else {
return object.toString();
}
}
@Override
public String debugDump() {
return debugDump(0);
}
@Override
public String debugDump(int indent) {
return debugDump(indent, true);
}
public String debugDump(int indent, boolean showTriples) {
StringBuilder sb = new StringBuilder();
SchemaDebugUtil.indentDebugDump(sb, indent);
sb.append("PROJECTION ");
sb.append(getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName());
sb.append(" ");
sb.append(getResourceShadowDiscriminator());
if (resource != null) {
sb.append(" : ");
sb.append(resource.getName().getOrig());
}
sb.append("\n");
SchemaDebugUtil.indentDebugDump(sb, indent + 1);
sb.append("OID: ").append(getOid());
sb.append(", wave ").append(wave);
if (fullShadow) {
sb.append(", full");
} else {
sb.append(", shadow");
}
sb.append(", exists=").append(isExists);
sb.append(", assigned=").append(isAssigned);
sb.append(", active=").append(isActive);
sb.append(", legal=").append(isLegalOld).append("->").append(isLegal);
sb.append(", recon=").append(doReconciliation);
sb.append(", syncIntent=").append(getSynchronizationIntent());
sb.append(", decision=").append(synchronizationPolicyDecision);
if (!isFresh()) {
sb.append(", NOT FRESH");
}
if (resourceShadowDiscriminator != null && resourceShadowDiscriminator.isThombstone()) {
sb.append(", THOMBSTONE");
}
if (syncAbsoluteTrigger) {
sb.append(", SYNC TRIGGER");
}
if (getIteration() != 0) {
sb.append(", iteration=").append(getIteration()).append(" (").append(getIterationToken()).append(")");
}
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("old"), getObjectOld(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("current"), getObjectCurrent(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("new"), getObjectNew(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("primary delta"), getPrimaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("secondary delta"), getSecondaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("sync delta"), getSyncDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("executed deltas"), getExecutedDeltas(), indent+1);
if (showTriples) {
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("constructionDeltaSetTriple"), constructionDeltaSetTriple, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("outbound account construction"), outboundConstruction, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed attributes"), squeezedAttributes, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed associations"), squeezedAssociations, indent + 1);
// This is just a debug thing
// sb.append("\n");
// DebugUtil.indentDebugDump(sb, indent);
// sb.append("ACCOUNT dependencies\n");
// sb.append(DebugUtil.debugDump(dependencies, indent + 1));
}
return sb.toString();
}
@Override
protected String getElementDefaultDesc() {
return "projection";
}
@Override
public String toString() {
return "LensProjectionContext(" + (getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName()) + ":" + getOid() +
( resource == null ? "" : " on " + resource ) + ")";
}
/**
* Return a human readable name of the projection object suitable for logs.
*/
public String toHumanReadableString() {
if (resourceShadowDiscriminator == null) {
return "(null" + resource + ")";
}
if (resource != null) {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resource + ")";
} else {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resourceShadowDiscriminator.getResourceOid() + ")";
}
}
public String getHumanReadableKind() {
if (resourceShadowDiscriminator == null) {
return "resource object";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
private String getKindValue(ShadowKindType kind) {
if (kind == null) {
return "null";
}
return kind.value();
}
@Override
protected String getElementDesc() {
if (resourceShadowDiscriminator == null) {
return "shadow";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
public void addToPrismContainer(PrismContainer<LensProjectionContextType> lensProjectionContextTypeContainer) throws SchemaException {
LensProjectionContextType lensProjectionContextType = lensProjectionContextTypeContainer.createNewValue().asContainerable();
super.storeIntoLensElementContextType(lensProjectionContextType);
lensProjectionContextType.setSyncDelta(syncDelta != null ? DeltaConvertor.toObjectDeltaType(syncDelta) : null);
lensProjectionContextType.setWave(wave);
lensProjectionContextType.setResourceShadowDiscriminator(resourceShadowDiscriminator != null ?
resourceShadowDiscriminator.toResourceShadowDiscriminatorType() : null);
lensProjectionContextType.setFullShadow(fullShadow);
lensProjectionContextType.setIsAssigned(isAssigned);
lensProjectionContextType.setIsActive(isActive);
lensProjectionContextType.setIsLegal(isLegal);
lensProjectionContextType.setIsLegalOld(isLegalOld);
lensProjectionContextType.setIsExists(isExists);
lensProjectionContextType.setSynchronizationPolicyDecision(synchronizationPolicyDecision != null ? synchronizationPolicyDecision.toSynchronizationPolicyDecisionType() : null);
lensProjectionContextType.setDoReconciliation(doReconciliation);
lensProjectionContextType.setSynchronizationSituationDetected(synchronizationSituationDetected);
lensProjectionContextType.setSynchronizationSituationResolved(synchronizationSituationResolved);
lensProjectionContextType.setAccountPasswordPolicy(accountPasswordPolicy);
lensProjectionContextType.setSyncAbsoluteTrigger(syncAbsoluteTrigger);
}
public static LensProjectionContext fromLensProjectionContextType(LensProjectionContextType projectionContextType, LensContext lensContext, OperationResult result) throws SchemaException, ConfigurationException, ObjectNotFoundException, CommunicationException {
String objectTypeClassString = projectionContextType.getObjectTypeClass();
if (StringUtils.isEmpty(objectTypeClassString)) {
throw new SystemException("Object type class is undefined in LensProjectionContextType");
}
ResourceShadowDiscriminator resourceShadowDiscriminator = ResourceShadowDiscriminator.fromResourceShadowDiscriminatorType(projectionContextType.getResourceShadowDiscriminator());
LensProjectionContext projectionContext = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
projectionContext.retrieveFromLensElementContextType(projectionContextType, result);
if (projectionContextType.getSyncDelta() != null) {
projectionContext.syncDelta = DeltaConvertor.createObjectDelta(projectionContextType.getSyncDelta(), lensContext.getPrismContext());
} else {
projectionContext.syncDelta = null;
}
projectionContext.wave = projectionContextType.getWave() != null ? projectionContextType.getWave() : 0;
projectionContext.fullShadow = projectionContextType.isFullShadow() != null ? projectionContextType.isFullShadow() : false;
projectionContext.isAssigned = projectionContextType.isIsAssigned() != null ? projectionContextType.isIsAssigned() : false;
projectionContext.isActive = projectionContextType.isIsActive() != null ? projectionContextType.isIsActive() : false;
projectionContext.isLegal = projectionContextType.isIsLegal();
projectionContext.isExists = projectionContextType.isIsExists() != null ? projectionContextType.isIsExists() : false;
projectionContext.synchronizationPolicyDecision = SynchronizationPolicyDecision.fromSynchronizationPolicyDecisionType(projectionContextType.getSynchronizationPolicyDecision());
projectionContext.doReconciliation = projectionContextType.isDoReconciliation() != null ? projectionContextType.isDoReconciliation() : false;
projectionContext.synchronizationSituationDetected = projectionContextType.getSynchronizationSituationDetected();
projectionContext.synchronizationSituationResolved = projectionContextType.getSynchronizationSituationResolved();
projectionContext.accountPasswordPolicy = projectionContextType.getAccountPasswordPolicy();
projectionContext.syncAbsoluteTrigger = projectionContextType.isSyncAbsoluteTrigger();
return projectionContext;
}
// determines whether full shadow is present, based on operation result got from provisioning
public void determineFullShadowFlag(OperationResultType fetchResult) {
if (fetchResult != null
&& (fetchResult.getStatus() == OperationResultStatusType.PARTIAL_ERROR
|| fetchResult.getStatus() == OperationResultStatusType.FATAL_ERROR)) { // todo what about other kinds of status? [e.g. in-progress]
setFullShadow(false);
} else {
setFullShadow(true);
}
}
}
| sabriarabacioglu/engerek | model/model-impl/src/main/java/com/evolveum/midpoint/model/impl/lens/LensProjectionContext.java | Java | apache-2.0 | 42,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.kafka;
import static org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult;
import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem;
import static org.hamcrest.Matchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
import org.apache.beam.sdk.Pipeline.PipelineExecutionException;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.coders.BigEndianIntegerCoder;
import org.apache.beam.sdk.coders.BigEndianLongCoder;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.InstantCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.apache.beam.sdk.io.Read;
import org.apache.beam.sdk.io.UnboundedSource;
import org.apache.beam.sdk.io.UnboundedSource.UnboundedReader;
import org.apache.beam.sdk.io.kafka.serialization.InstantDeserializer;
import org.apache.beam.sdk.metrics.GaugeResult;
import org.apache.beam.sdk.metrics.MetricName;
import org.apache.beam.sdk.metrics.MetricNameFilter;
import org.apache.beam.sdk.metrics.MetricQueryResults;
import org.apache.beam.sdk.metrics.MetricResult;
import org.apache.beam.sdk.metrics.MetricsFilter;
import org.apache.beam.sdk.metrics.SinkMetrics;
import org.apache.beam.sdk.metrics.SourceMetrics;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.testing.PAssert;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Distinct;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.Max;
import org.apache.beam.sdk.transforms.Min;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.Values;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.MockProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.utils.Utils;
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
import org.hamcrest.collection.IsIterableWithSize;
import org.joda.time.Instant;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of {@link KafkaIO}.
* Run with 'mvn test -Dkafka.clients.version=0.10.1.1',
* or 'mvn test -Dkafka.clients.version=0.9.0.1' for either Kafka client version.
*/
@RunWith(JUnit4.class)
public class KafkaIOTest {
private static final Logger LOG = LoggerFactory.getLogger(KafkaIOTest.class);
/*
* The tests below borrow code and structure from CountingSourceTest. In addition verifies
* the reader interleaves the records from multiple partitions.
*
* Other tests to consider :
* - test KafkaRecordCoder
*/
@Rule
public final transient TestPipeline p = TestPipeline.create();
@Rule
public ExpectedException thrown = ExpectedException.none();
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(
List<String> topics, int partitionsPerTopic, int numElements,
OffsetResetStrategy offsetResetStrategy) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
records.get(tp).add(
new ConsumerRecord<>(
tp.topic(),
tp.partition(),
offsets[pIdx]++,
ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key is 4 byte record id
ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value is 8 byte record id
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions =
new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer =
new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
// override assign() in order to set offset limits & to save assigned partitions.
//remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as:
//1. SpEL can find this function, either input is List or Collection;
//2. List extends Collection, so super.assign() could find either assign(List)
// or assign(Collection).
public void assign(final List<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
// Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have
// this method.
// Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions
// does not have the OffsetAndTimestamp class. So return a raw type and use reflection
// here.
@SuppressWarnings("unchecked")
public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
HashMap<TopicPartition, Object> result = new HashMap<>();
try {
Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp");
// OffsetAndTimestamp(long offset, long timestamp)
Constructor constructor = cls.getDeclaredConstructor(long.class, long.class);
// In test scope, timestamp == offset.
for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
long maxOffset = offsets[partitions.indexOf(entry.getKey())];
Long offset = entry.getValue();
if (offset >= maxOffset) {
offset = null;
}
result.put(
entry.getKey(), constructor.newInstance(entry.getValue(), offset));
}
return result;
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
}
}
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
private static class ConsumerFactoryFn
implements SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> {
private final List<String> topics;
private final int partitionsPerTopic;
private final int numElements;
private final OffsetResetStrategy offsetResetStrategy;
public ConsumerFactoryFn(List<String> topics,
int partitionsPerTopic,
int numElements,
OffsetResetStrategy offsetResetStrategy) {
this.topics = topics;
this.partitionsPerTopic = partitionsPerTopic;
this.numElements = numElements;
this.offsetResetStrategy = offsetResetStrategy;
}
@Override
public Consumer<byte[], byte[]> apply(Map<String, Object> config) {
return mkMockConsumer(topics, partitionsPerTopic, numElements, offsetResetStrategy);
}
}
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
return mkKafkaReadTransform(numElements, numElements, timestampFn);
}
/**
* Creates a consumer with two topics, with 10 partitions each.
* numElements are (round-robin) assigned all the 20 partitions.
*/
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
int maxNumRecords,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
List<String> topics = ImmutableList.of("topic_a", "topic_b");
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(maxNumRecords);
if (timestampFn != null) {
return reader.withTimestampFn(timestampFn);
} else {
return reader;
}
}
private static class AssertMultipleOf implements SerializableFunction<Iterable<Long>, Void> {
private final int num;
public AssertMultipleOf(int num) {
this.num = num;
}
@Override
public Void apply(Iterable<Long> values) {
for (Long v : values) {
assertEquals(0, v % num);
}
return null;
}
}
public static void addCountingAsserts(PCollection<Long> input, long numElements) {
// Count == numElements
// Unique count == numElements
// Min == 0
// Max == numElements-1
addCountingAsserts(input, numElements, numElements, 0L, numElements - 1);
}
public static void addCountingAsserts(
PCollection<Long> input, long count, long uniqueCount, long min, long max) {
PAssert
.thatSingleton(input.apply("Count", Count.<Long>globally()))
.isEqualTo(count);
PAssert
.thatSingleton(input.apply(Distinct.<Long>create())
.apply("UniqueCount", Count.<Long>globally()))
.isEqualTo(uniqueCount);
PAssert
.thatSingleton(input.apply("Min", Min.<Long>globally()))
.isEqualTo(min);
PAssert
.thatSingleton(input.apply("Max", Max.<Long>globally()))
.isEqualTo(max);
}
@Test
public void testUnboundedSource() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnreachableKafkaBrokers() {
// Expect an exception when the Kafka brokers are not reachable on the workers.
// We specify partitions explicitly so that splitting does not involve server interaction.
// Set request timeout to 10ms so that test does not take long.
thrown.expect(Exception.class);
thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'");
int numElements = 1000;
PCollection<Long> input = p
.apply(KafkaIO.<Integer, Long>read()
.withBootstrapServers("8.8.8.8:9092") // Google public DNS ip.
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0)))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.updateConsumerProperties(ImmutableMap.<String, Object>of(
ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10,
ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8,
ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8))
.withMaxNumRecords(10)
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithSingleTopic() {
// same as testUnboundedSource, but with single topic
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(numElements)
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithExplicitPartitions() {
int numElements = 1000;
List<String> topics = ImmutableList.of("test");
KafkaIO.Read<byte[], Long> reader = KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements / 10);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
// assert that every element is a multiple of 5.
PAssert
.that(input)
.satisfies(new AssertMultipleOf(5));
PAssert
.thatSingleton(input.apply(Count.<Long>globally()))
.isEqualTo(numElements / 10L);
p.run();
}
private static class ElementValueDiff extends DoFn<Long, Long> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
c.output(c.element() - c.timestamp().getMillis());
}
}
@Test
public void testUnboundedSourceTimestamps() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
PCollection<Long> diffs = input
.apply("TimestampDiff", ParDo.of(new ElementValueDiff()))
.apply("DistinctTimestamps", Distinct.<Long>create());
// This assert also confirms that diffs only has one unique value.
PAssert.thatSingleton(diffs).isEqualTo(0L);
p.run();
}
private static class RemoveKafkaMetadata<K, V> extends DoFn<KafkaRecord<K, V>, KV<K, V>> {
@ProcessElement
public void processElement(ProcessContext ctx) throws Exception {
ctx.output(ctx.element().getKV());
}
}
@Test
public void testUnboundedSourceSplits() throws Exception {
int numElements = 1000;
int numSplits = 10;
// Coders must be specified explicitly here due to the way the transform
// is used in the test.
UnboundedSource<KafkaRecord<Integer, Long>, ?> initial =
mkKafkaReadTransform(numElements, null)
.withKeyDeserializerAndCoder(IntegerDeserializer.class, BigEndianIntegerCoder.of())
.withValueDeserializerAndCoder(LongDeserializer.class, BigEndianLongCoder.of())
.makeSource();
List<? extends UnboundedSource<KafkaRecord<Integer, Long>, ?>> splits =
initial.split(numSplits, p.getOptions());
assertEquals("Expected exact splitting", numSplits, splits.size());
long elementsPerSplit = numElements / numSplits;
assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits);
PCollectionList<Long> pcollections = PCollectionList.empty(p);
for (int i = 0; i < splits.size(); ++i) {
pcollections = pcollections.and(
p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit))
.apply("Remove Metadata " + i, ParDo.of(new RemoveKafkaMetadata<Integer, Long>()))
.apply("collection " + i, Values.<Long>create()));
}
PCollection<Long> input = pcollections.apply(Flatten.<Long>pCollections());
addCountingAsserts(input, numElements);
p.run();
}
/**
* A timestamp function that uses the given value as the timestamp.
*/
private static class ValueAsTimestampFn
implements SerializableFunction<KV<Integer, Long>, Instant> {
@Override
public Instant apply(KV<Integer, Long> input) {
return new Instant(input.getValue());
}
}
// Kafka records are read in a separate thread inside the reader. As a result advance() might not
// read any records even from the mock consumer, especially for the first record.
// This is a helper method to loop until we read a record.
private static void advanceOnce(UnboundedReader<?> reader, boolean isStarted) throws IOException {
if (!isStarted && reader.start()) {
return;
}
while (!reader.advance()) {
// very rarely will there be more than one attempts.
// In case of a bug we might end up looping forever, and test will fail with a timeout.
// Avoid hard cpu spinning in case of a test failure.
try {
Thread.sleep(1);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testUnboundedSourceCheckpointMark() throws Exception {
int numElements = 85; // 85 to make sure some partitions have more records than other.
// create a single split:
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
final int numToSkip = 20; // one from each partition.
// advance numToSkip elements
for (int i = 0; i < numToSkip; ++i) {
advanceOnce(reader, i > 0);
}
// Confirm that we get the expected element in sequence before checkpointing.
assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue());
assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis());
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
reader = source.createReader(null, mark);
// Confirm that we get the next elements in sequence.
// This also confirms that Reader interleaves records from each partitions by the reader.
for (int i = numToSkip; i < numElements; i++) {
advanceOnce(reader, i > numToSkip);
assertEquals(i, (long) reader.getCurrent().getKV().getValue());
assertEquals(i, reader.getCurrentTimestamp().getMillis());
}
}
@Test
public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception {
// Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes
// properly from empty partitions, without missing messages added since checkpoint.
// Initialize consumer with fewer elements than number of partitions so that some are empty.
int initialNumElements = 5;
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
for (int l = 0; l < initialNumElements; ++l) {
advanceOnce(reader, l > 0);
}
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
// Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that
// the reader need to explicitly need to seek to first offset for partitions that were empty.
int numElements = 100; // all the 20 partitions will have elements
List<String> topics = ImmutableList.of("topic_a", "topic_b");
source = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.LATEST))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements)
.withTimestampFn(new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
reader = source.createReader(null, mark);
// Verify in any order. As the partitions are unevenly read, the returned records are not in a
// simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder.
List<Long> expected = new ArrayList<>();
List<Long> actual = new ArrayList<>();
for (long i = initialNumElements; i < numElements; i++) {
advanceOnce(reader, i > initialNumElements);
expected.add(i);
actual.add(reader.getCurrent().getKV().getValue());
}
assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray()));
}
@Test
public void testUnboundedSourceMetrics() {
int numElements = 1000;
String readStep = "readFromKafka";
p.apply(readStep,
mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata());
PipelineResult result = p.run();
String splitId = "0";
MetricName elementsRead = SourceMetrics.elementsRead().getName();
MetricName elementsReadBySplit = SourceMetrics.elementsReadBySplit(splitId).getName();
MetricName bytesRead = SourceMetrics.bytesRead().getName();
MetricName bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId).getName();
MetricName backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId).getName();
MetricName backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId).getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder().build());
Iterable<MetricResult<Long>> counters = metrics.counters();
assertThat(counters, hasItem(attemptedMetricsResult(
elementsRead.namespace(),
elementsRead.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
elementsReadBySplit.namespace(),
elementsReadBySplit.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesRead.namespace(),
bytesRead.name(),
readStep,
12000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesReadBySplit.namespace(),
bytesReadBySplit.name(),
readStep,
12000L)));
MetricQueryResults backlogElementsMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogElementsOfSplit.namespace(),
backlogElementsOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogElementsMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
MetricQueryResults backlogBytesMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogBytesOfSplit.namespace(),
backlogBytesOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogBytesMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
}
@Test
public void testSink() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the records
// are correctly published to mock kafka producer.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testValuesSink() throws Exception {
// similar to testSink(), but use values()' interface.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create()) // there are no keys
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
.values());
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true);
}
}
@Test
public void testEOSink() {
// testSink() with EOS enabled.
// This does not actually inject retries in a stage to test exactly-once-semantics.
// It mainly exercises the code in normal flow without retries.
// Ideally we should test EOS Sink by triggering replays of a messages between stages.
// It is not feasible to test such retries with direct runner. When DoFnTester supports
// state, we can test KafkaEOWriter DoFn directly to ensure it handles retries correctly.
if (!ProducerSpEL.supportsTransactions()) {
LOG.warn("testEOSink() is disabled as Kafka client version does not support transactions.");
return;
}
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withEOS(1, "test")
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST))
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testSinkWithSendErrors() throws Throwable {
// similar to testSink(), except that up to 10 of the send calls to producer will fail
// asynchronously.
// TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail.
// We limit the number of errors injected to 10 below. This would reflect a real streaming
// pipeline. But I am sure how to achieve that. For now expect an exception:
thrown.expect(InjectedErrorException.class);
thrown.expectMessage("Injected Error #1");
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThreadWithErrors =
new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
try {
p.run();
} catch (PipelineExecutionException e) {
// throwing inner exception helps assert that first exception is thrown from the Sink
throw e.getCause().getCause();
} finally {
completionThreadWithErrors.shutdown();
}
}
}
@Test
public void testUnboundedSourceStartReadTime() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can read half elements.
int startTime = numElements / 20 / 2;
int maxNumRecords = numElements / 2;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, maxNumRecords, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, maxNumRecords, maxNumRecords, maxNumRecords, numElements - 1);
p.run();
}
@Rule public ExpectedException noMessagesException = ExpectedException.none();
@Test
public void testUnboundedSourceStartReadTimeException() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
noMessagesException.expect(RuntimeException.class);
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can not read any element.
int startTime = numElements / 20;
p.apply(mkKafkaReadTransform(numElements, numElements, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
p.run();
}
@Test
public void testSourceDisplayData() {
KafkaIO.Read<Integer, Long> read = mkKafkaReadTransform(10, null);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topics", "topic_a,topic_b"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
KafkaIO.Read<byte[], Long> read = KafkaIO.<byte[], Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5),
new TopicPartition("test", 6)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSinkDisplayData() {
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
KafkaIO.Write<Integer, Long> write = KafkaIO.<Integer, Long>write()
.withBootstrapServers("myServerA:9092,myServerB:9092")
.withTopic("myTopic")
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey));
DisplayData displayData = DisplayData.from(write);
assertThat(displayData, hasDisplayItem("topic", "myTopic"));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092"));
assertThat(displayData, hasDisplayItem("retries", 3));
}
}
// interface for testing coder inference
private interface DummyInterface<T> {
}
// interface for testing coder inference
private interface DummyNonparametricInterface {
}
// class for testing coder inference
private static class DeserializerWithInterfaces
implements DummyInterface<String>, DummyNonparametricInterface,
Deserializer<Long> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Long deserialize(String topic, byte[] bytes) {
return 0L;
}
@Override
public void close() {
}
}
// class for which a coder cannot be infered
private static class NonInferableObject {
}
// class for testing coder inference
private static class NonInferableObjectDeserializer
implements Deserializer<NonInferableObject> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public NonInferableObject deserialize(String topic, byte[] bytes) {
return new NonInferableObject();
}
@Override
public void close() {
}
}
@Test
public void testInferKeyCoder() {
CoderRegistry registry = CoderRegistry.createDefault();
assertTrue(KafkaIO.inferCoder(registry, LongDeserializer.class).getValueCoder()
instanceof VarLongCoder);
assertTrue(KafkaIO.inferCoder(registry, StringDeserializer.class).getValueCoder()
instanceof StringUtf8Coder);
assertTrue(KafkaIO.inferCoder(registry, InstantDeserializer.class).getValueCoder()
instanceof InstantCoder);
assertTrue(KafkaIO.inferCoder(registry, DeserializerWithInterfaces.class).getValueCoder()
instanceof VarLongCoder);
}
@Rule public ExpectedException cannotInferException = ExpectedException.none();
@Test
public void testInferKeyCoderFailure() throws Exception {
cannotInferException.expect(RuntimeException.class);
CoderRegistry registry = CoderRegistry.createDefault();
KafkaIO.inferCoder(registry, NonInferableObjectDeserializer.class);
}
@Test
public void testSinkMetrics() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the metrics are reported.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply("writeToKafka", KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
PipelineResult result = p.run();
MetricName elementsWritten = SinkMetrics.elementsWritten().getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.inNamespace(elementsWritten.namespace()))
.build());
assertThat(metrics.counters(), hasItem(
attemptedMetricsResult(
elementsWritten.namespace(),
elementsWritten.name(),
"writeToKafka",
1000L)));
completionThread.shutdown();
}
}
private static void verifyProducerRecords(MockProducer<Integer, Long> mockProducer,
String topic, int numElements, boolean keyIsAbsent) {
// verify that appropriate messages are written to kafka
List<ProducerRecord<Integer, Long>> sent = mockProducer.history();
// sort by values
Collections.sort(sent, new Comparator<ProducerRecord<Integer, Long>>() {
@Override
public int compare(ProducerRecord<Integer, Long> o1, ProducerRecord<Integer, Long> o2) {
return Long.compare(o1.value(), o2.value());
}
});
for (int i = 0; i < numElements; i++) {
ProducerRecord<Integer, Long> record = sent.get(i);
assertEquals(topic, record.topic());
if (keyIsAbsent) {
assertNull(record.key());
} else {
assertEquals(i, record.key().intValue());
}
assertEquals(i, record.value().longValue());
}
}
/**
* This wrapper over MockProducer. It also places the mock producer in global MOCK_PRODUCER_MAP.
* The map is needed so that the producer returned by ProducerFactoryFn during pipeline can be
* used in verification after the test. We also override {@code flush()} method in MockProducer
* so that test can control behavior of {@code send()} method (e.g. to inject errors).
*/
private static class MockProducerWrapper implements AutoCloseable {
final String producerKey;
final MockProducer<Integer, Long> mockProducer;
// MockProducer has "closed" method starting version 0.11.
private static Method closedMethod;
static {
try {
closedMethod = MockProducer.class.getMethod("closed");
} catch (NoSuchMethodException e) {
closedMethod = null;
}
}
MockProducerWrapper() {
producerKey = String.valueOf(ThreadLocalRandom.current().nextLong());
mockProducer = new MockProducer<Integer, Long>(
false, // disable synchronous completion of send. see ProducerSendCompletionThread below.
new IntegerSerializer(),
new LongSerializer()) {
// override flush() so that it does not complete all the waiting sends, giving a chance to
// ProducerCompletionThread to inject errors.
@Override
public void flush() {
while (completeNext()) {
// there are some uncompleted records. let the completion thread handle them.
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
};
// Add the producer to the global map so that producer factory function can access it.
assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer));
}
public void close() {
MOCK_PRODUCER_MAP.remove(producerKey);
try {
if (closedMethod == null || !((Boolean) closedMethod.invoke(mockProducer))) {
mockProducer.close();
}
} catch (Exception e) { // Not expected.
throw new RuntimeException(e);
}
}
}
private static final ConcurrentMap<String, MockProducer<Integer, Long>> MOCK_PRODUCER_MAP =
new ConcurrentHashMap<>();
private static class ProducerFactoryFn
implements SerializableFunction<Map<String, Object>, Producer<Integer, Long>> {
final String producerKey;
ProducerFactoryFn(String producerKey) {
this.producerKey = producerKey;
}
@SuppressWarnings("unchecked")
@Override
public Producer<Integer, Long> apply(Map<String, Object> config) {
// Make sure the config is correctly set up for serializers.
// There may not be a key serializer if we're interested only in values.
if (config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) != null) {
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, true);
}
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, false);
// Returning same producer in each instance in a pipeline seems to work fine currently.
// If DirectRunner creates multiple DoFn instances for sinks, we might need to handle
// it appropriately. I.e. allow multiple producers for each producerKey and concatenate
// all the messages written to each producer for verification after the pipeline finishes.
return MOCK_PRODUCER_MAP.get(producerKey);
}
}
private static class InjectedErrorException extends RuntimeException {
InjectedErrorException(String message) {
super(message);
}
}
/**
* We start MockProducer with auto-completion disabled. That implies a record is not marked sent
* until #completeNext() is called on it. This class starts a thread to asynchronously 'complete'
* the the sends. During completion, we can also make those requests fail. This error injection
* is used in one of the tests.
*/
private static class ProducerSendCompletionThread {
private final MockProducer<Integer, Long> mockProducer;
private final int maxErrors;
private final int errorFrequency;
private final AtomicBoolean done = new AtomicBoolean(false);
private final ExecutorService injectorThread;
private int numCompletions = 0;
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer) {
// complete everything successfully
this(mockProducer, 0, 0);
}
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer,
int maxErrors,
int errorFrequency) {
this.mockProducer = mockProducer;
this.maxErrors = maxErrors;
this.errorFrequency = errorFrequency;
injectorThread = Executors.newSingleThreadExecutor();
}
ProducerSendCompletionThread start() {
injectorThread.submit(new Runnable() {
@Override
public void run() {
int errorsInjected = 0;
while (!done.get()) {
boolean successful;
if (errorsInjected < maxErrors && ((numCompletions + 1) % errorFrequency) == 0) {
successful = mockProducer.errorNext(
new InjectedErrorException("Injected Error #" + (errorsInjected + 1)));
if (successful) {
errorsInjected++;
}
} else {
successful = mockProducer.completeNext();
}
if (successful) {
numCompletions++;
} else {
// wait a bit since there are no unsent records
try {
Thread.sleep(1);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
}
});
return this;
}
void shutdown() {
done.set(true);
injectorThread.shutdown();
try {
assertTrue(injectorThread.awaitTermination(10, TimeUnit.SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}
| wangyum/beam | sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOTest.java | Java | apache-2.0 | 49,154 |
/*
* Copyright 2017 Exorath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.exorath.plugin.game.cakewars.rewards;
import com.exorath.plugin.game.cakewars.Main;
import com.exorath.service.currency.api.CurrencyServiceAPI;
import com.exorath.victoryHandler.rewards.CurrencyReward;
import net.md_5.bungee.api.ChatColor;
/**
* Created by toonsev on 5/31/2017.
*/
public class KillsReward extends CurrencyReward{
public static final int CRUMBS_PER_KILL = 2;
private int kills;
public KillsReward(CurrencyServiceAPI currencyServiceAPI) {
super(null, currencyServiceAPI, Main.CRUMBS_CURRENCY, 0);
setCurrencyColor(ChatColor.GOLD);
setCurrencyName("Crumbs");
}
public void addKill(){
kills++;
setAmount(kills*CRUMBS_PER_KILL);
setReason("Killing " + kills + " Players");
}
}
| Exorath/CakeWarsGamePlugin | src/main/java/com/exorath/plugin/game/cakewars/rewards/KillsReward.java | Java | apache-2.0 | 1,398 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.irc;
import java.util.ArrayList;
import java.util.Dictionary;
import java.util.Hashtable;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.schwering.irc.lib.IRCConnection;
import org.schwering.irc.lib.IRCEventAdapter;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class IrcEndpointTest {
private IrcComponent component;
private IrcConfiguration configuration;
private IRCConnection connection;
private IrcEndpoint endpoint;
@Before
public void doSetup() {
component = mock(IrcComponent.class);
configuration = mock(IrcConfiguration.class);
connection = mock(IRCConnection.class);
List<String> channels = new ArrayList<String>();
Dictionary<String, String> keys = new Hashtable<String, String>();
channels.add("chan1");
channels.add("chan2");
keys.put("chan1", "");
keys.put("chan2", "chan2key");
when(configuration.getChannels()).thenReturn(channels);
when(configuration.getKey("chan1")).thenReturn("");
when(configuration.getKey("chan2")).thenReturn("chan2key");
when(component.getIRCConnection(configuration)).thenReturn(connection);
endpoint = new IrcEndpoint("foo", component, configuration);
}
@Test
public void doJoinChannelTestNoKey() throws Exception {
endpoint.joinChannel("chan1");
verify(connection).doJoin("chan1");
}
@Test
public void doJoinChannelTestKey() throws Exception {
endpoint.joinChannel("chan2");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doJoinChannels() throws Exception {
endpoint.joinChannels();
verify(connection).doJoin("chan1");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doHandleIrcErrorNickInUse() throws Exception {
when(connection.getNick()).thenReturn("nick");
endpoint.handleIrcError(IRCEventAdapter.ERR_NICKNAMEINUSE, "foo");
verify(connection).doNick("nick-");
when(connection.getNick()).thenReturn("nick---");
// confirm doNick was not called
verify(connection, never()).doNick("foo");
}
}
| everttigchelaar/camel-svn | components/camel-irc/src/test/java/org/apache/camel/component/irc/IrcEndpointTest.java | Java | apache-2.0 | 3,292 |
package com.zxinsight.classifier.ruleengine.admin;
import java.rmi.RemoteException;
import java.util.Map;
import javax.rules.admin.LocalRuleExecutionSetProvider;
import javax.rules.admin.RuleAdministrator;
import javax.rules.admin.RuleExecutionSet;
import javax.rules.admin.RuleExecutionSetDeregistrationException;
import javax.rules.admin.RuleExecutionSetProvider;
import javax.rules.admin.RuleExecutionSetRegisterException;
@SuppressWarnings("rawtypes")
public class RuleAdministratorImpl implements RuleAdministrator {
@Override
public void deregisterRuleExecutionSet(String bindUri, Map properties)
throws RuleExecutionSetDeregistrationException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
if (repository.getRuleExecutionSet(bindUri) == null) {
throw new RuleExecutionSetDeregistrationException(
"no execution set bound to: " + bindUri);
}
repository.unregisterRuleExecutionSet(bindUri);
}
@Override
public LocalRuleExecutionSetProvider getLocalRuleExecutionSetProvider(
Map properties) throws RemoteException {
return new LocalRuleExecutionSetProviderImple();
}
@Override
public RuleExecutionSetProvider getRuleExecutionSetProvider(Map properties)
throws RemoteException {
return new RuleExecutionSetProviderImpl();
}
@Override
public void registerRuleExecutionSet(String bindUri,
RuleExecutionSet ruleExecutionSet, Map properties)
throws RuleExecutionSetRegisterException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
repository.registerRuleExecutionSet(bindUri, ruleExecutionSet);
}
}
| kevin-ww/commentClassifier | src/main/java/com/zxinsight/classifier/ruleengine/admin/RuleAdministratorImpl.java | Java | apache-2.0 | 1,738 |
package trendli.me.makhana.common.entities;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public enum ActionType
{
MOVE( "Moving", "newTile" ), FABRICATING( "Fabricating" );
private final String verb;
private final List< String > dataKeys;
private ActionType( String verb, String... dataKeys )
{
this.verb = verb;
if ( dataKeys != null )
{
this.dataKeys = Arrays.asList( dataKeys );
}
else
{
this.dataKeys = Collections.emptyList( );
}
}
/**
* @return the dataKeys
*/
public List< String > getDataKeys( )
{
return dataKeys;
}
/**
* @return the verb
*/
public String getVerb( )
{
return verb;
}
}
| elliottmb/makhana | common/src/main/java/trendli/me/makhana/common/entities/ActionType.java | Java | apache-2.0 | 806 |
/*
* Copyright 2014-2015 Nikos Grammatikos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://raw.githubusercontent.com/nikosgram13/OglofusProtection/master/LICENSE
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.nikosgram.oglofus.protection;
import com.google.common.base.Optional;
import com.sk89q.intake.argument.ArgumentException;
import com.sk89q.intake.argument.ArgumentParseException;
import com.sk89q.intake.argument.CommandArgs;
import com.sk89q.intake.parametric.ProvisionException;
import me.nikosgram.oglofus.protection.api.ActionResponse;
import me.nikosgram.oglofus.protection.api.CommandExecutor;
import me.nikosgram.oglofus.protection.api.entity.User;
import me.nikosgram.oglofus.protection.api.message.MessageType;
import me.nikosgram.oglofus.protection.api.region.ProtectionRank;
import me.nikosgram.oglofus.protection.api.region.ProtectionRegion;
import me.nikosgram.oglofus.protection.api.region.ProtectionStaff;
import org.apache.commons.lang3.ClassUtils;
import org.spongepowered.api.entity.player.Player;
import org.spongepowered.api.service.user.UserStorage;
import org.spongepowered.api.util.command.CommandSource;
import javax.annotation.Nullable;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class OglofusProtectionStaff implements ProtectionStaff {
private final List<User> staff = new ArrayList<User>();
private final Map<UUID, ProtectionRank> ranks = new HashMap<UUID, ProtectionRank>();
private final User owner;
private final ProtectionRegion region;
private final OglofusSponge sponge;
protected OglofusProtectionStaff(ProtectionRegion region, OglofusSponge sponge) {
this.region = region;
this.sponge = sponge;
owner = sponge.getUserManager().getUser(UUID.fromString(sponge.connector.getString(
"oglofus_regions", "uuid", region.getUuid().toString(), "owner"
).get())).get();
Map<String, String> staff = sponge.connector.getStringMap(
"oglofus_regions", "uuid", region.getUuid().toString(), new String[]{"player", "rank"}
);
for (String uid : staff.keySet()) {
UUID uuid = UUID.fromString(uid);
this.staff.add(sponge.getUserManager().getUser(uuid).get());
ranks.put(uuid, ProtectionRank.valueOf(staff.get(uid)));
}
}
@Override
public UUID getOwnerUuid() {
return owner.getUuid();
}
@Override
public User getOwner() {
return owner;
}
@Override
@SuppressWarnings("unchecked")
public <T> Optional<T> getOwnerAs(Class<T> tClass) {
if (ClassUtils.isAssignable(tClass, Player.class)) {
return (Optional<T>) sponge.server.getPlayer(owner.getUuid());
} else if (ClassUtils.isAssignable(tClass, User.class)) {
UserStorage storage;
if ((storage = sponge.game.getServiceManager().provide(UserStorage.class).orNull()) !=
null) {
return (Optional<T>) storage.get(owner.getUuid()).orNull();
}
}
return Optional.absent();
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getOfficersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getOfficersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getOfficersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getOfficers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getOfficers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Officer)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getMembersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getMembersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getMembersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getMembers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getMembers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Member)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getStaffAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (User user : this) {
Player player;
if ((player = sponge.server.getPlayer(user.getUuid()).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getStaffUuid() {
Collection<UUID> returned = new ArrayList<UUID>();
for (User user : this) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public boolean isOwner(UUID target) {
return owner.getUuid().equals(target);
}
@Override
public boolean isOwner(User target) {
return owner.getUuid().equals(target.getUuid());
}
@Override
public boolean isOfficer(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Officer);
}
@Override
public boolean isOfficer(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Officer);
}
@Override
public boolean isMember(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Member);
}
@Override
public boolean isMember(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Member);
}
@Override
public boolean isStaff(UUID target) {
return ranks.containsKey(target);
}
@Override
public boolean isStaff(User target) {
return ranks.containsKey(target.getUuid());
}
@Override
public boolean hasOwnerAccess(UUID target) {
return isOwner(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOwnerAccess(User target) {
return isOwner(target) || target.hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOfficerAccess(UUID target) {
return isOfficer(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasOfficerAccess(User target) {
return isOfficer(target) || target.hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(UUID target) {
return isMember(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(User target) {
return isMember(target) || target.hasPermission("oglofus.protection.bypass.member");
}
@Override
public ProtectionRank getRank(UUID target) {
return ranks.containsKey(target) ? ranks.get(target) : ProtectionRank.None;
}
@Override
public ProtectionRank getRank(User target) {
return ranks.containsKey(target.getUuid()) ? ranks.get(target.getUuid()) : ProtectionRank.None;
}
@Override
public void broadcast(String message) {
broadcast(MessageType.CHAT, message);
}
@Override
public void broadcast(String message, ProtectionRank rank) {
broadcast(MessageType.CHAT, message, rank);
}
@Override
public void broadcast(MessageType type, String message) {
for (User user : this) {
user.sendMessage(type, message);
}
}
@Override
public void broadcast(MessageType type, String message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(type, message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(type, message);
}
break;
case Owner:
owner.sendMessage(type, message);
break;
}
}
@Override
public void broadcastRaw(Object message) {
for (User user : this) {
user.sendMessage(message);
}
}
@Override
public void broadcastRaw(Object message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(message);
}
break;
case Owner:
owner.sendMessage(message);
break;
}
}
@Override
public void broadcastRaw(MessageType type, Object message) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void broadcastRaw(MessageType type, Object message, ProtectionRank rank) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public ActionResponse reFlag() {
//TODO: make it.
return null;
}
@Override
public ActionResponse invite(Object sender, UUID target) {
return sponge.getUserManager().invite(sender, target, region);
}
@Override
public ActionResponse invite(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse invite(Object sender, User target) {
return null;
}
@Override
public ActionResponse invite(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse invite(UUID target) {
return sponge.getUserManager().invite(target, region);
}
@Override
public ActionResponse invite(User target) {
return null;
}
@Override
public ActionResponse kick(Object sender, UUID target) {
if (sender instanceof CommandSource) {
if (sender instanceof Player) {
if (region.getProtectionStaff().hasOwnerAccess(((Player) sender).getUniqueId())) {
//TODO: call the handler PlayerKickHandler.
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
if (((CommandSource) sender).hasPermission("oglofus.protection.bypass")) {
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
return ActionResponse.Failure.setMessage("object");
}
@Override
public ActionResponse kick(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse kick(Object sender, User target) {
return null;
}
@Override
public ActionResponse kick(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse kick(UUID target) {
//TODO: call the handler PlayerKickHandler.
return null;
}
@Override
public ActionResponse kick(User target) {
return null;
}
@Override
public ActionResponse promote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(Object sender, User target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse promote(UUID target) {
return null;
}
@Override
public ActionResponse promote(User target) {
return null;
}
@Override
public ActionResponse demote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(Object sender, User target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse demote(UUID target) {
return null;
}
@Override
public ActionResponse demote(User target) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(User target, ProtectionRank rank) {
return null;
}
@Override
public Iterator<User> iterator() {
return staff.iterator();
}
@Override
public boolean isProvided() {
return false;
}
@Nullable
@Override
public User get(CommandArgs arguments, List<? extends Annotation> modifiers) throws ArgumentException, ProvisionException {
String name = arguments.next();
Optional<User> user = sponge.getUserManager().getUser(name);
if (user.isPresent() && isStaff(user.get())) {
return user.get();
} else {
throw new ArgumentParseException(String.format("I can't find the Staff with name '%s'.", name));
}
}
@Override
public List<String> getSuggestions(String prefix) {
List<String> returned = new ArrayList<String>();
for (User user : this) {
if (user.getName().startsWith(prefix)) {
returned.add(user.getName());
}
}
return returned;
}
}
| Oglofus/OglofusProtection | sponge/src/main/java/me/nikosgram/oglofus/protection/OglofusProtectionStaff.java | Java | apache-2.0 | 16,214 |
package commons;
import org.makagiga.commons.ConfigFile;
import org.makagiga.test.AbstractEnumTest;
import org.makagiga.test.Test;
import org.makagiga.test.TestMethod;
import org.makagiga.test.Tester;
@Test(className = ConfigFile.Format.class)
public final class TestConfigFile_Format extends AbstractEnumTest<ConfigFile.Format> {
// public
public TestConfigFile_Format() {
super(
ConfigFile.Format.values(),
ConfigFile.Format.DESKTOP, ConfigFile.Format.INI
);
}
@Test
public void test_commons() {
for (final ConfigFile.Format i : ConfigFile.Format.values()) {
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup("");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey("");
}
} );
}
final String LONG_VALUE = "AZaz09-";
final String SHORT_VALUE = "X";
// DESKTOP
ConfigFile.Format f = ConfigFile.Format.DESKTOP;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("[");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("]");
}
} );
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateKey("=");
}
} );
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
f.validateGroup(" ");
f.validateGroup("Foo Bar");
// INI
f = ConfigFile.Format.INI;
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
}
@Test(
methods = @TestMethod(name = "equals", parameters = "String, String")
)
public void test_equals() {
ConfigFile.Format f;
f = ConfigFile.Format.DESKTOP;
assert f.equals("foo", "foo");
assert !f.equals("foo", "FOO");
f = ConfigFile.Format.INI;
assert f.equals("foo", "foo");
assert f.equals("foo", "FOO");
}
@Test(
methods = @TestMethod(name = "escape", parameters = "String")
)
public void test_escape() {
assertNull(ConfigFile.Format.escape(null));
assertEmpty(ConfigFile.Format.escape(""));
assertEquals("\\tFoo\\sBar\\r\\nBaz\\\\", ConfigFile.Format.escape("\tFoo Bar\r\nBaz\\"));
}
@Test(
methods = @TestMethod(name = "unescape", parameters = "String")
)
public void test_unescape() {
assertNull(ConfigFile.Format.unescape(null));
assertEmpty(ConfigFile.Format.unescape(""));
assertEquals("Foo Bar", ConfigFile.Format.unescape("Foo Bar"));
assertEquals("\tFoo Bar\r\nBaz\\", ConfigFile.Format.unescape("\\tFoo\\sBar\\r\\nBaz\\\\"));
assertEquals("\n\n \\\\", ConfigFile.Format.unescape("\\n\\n\\s\\s\\\\\\\\"));
}
@Test(
methods = @TestMethod(name = "getComment")
)
public void test_getComment() {
assert ConfigFile.Format.DESKTOP.getComment().equals("#");
assert ConfigFile.Format.INI.getComment().equals(";");
}
@Test(
methods = @TestMethod(name = "getEOL")
)
public void test_getEOL() {
assert ConfigFile.Format.DESKTOP.getEOL().equals("\n");
assert ConfigFile.Format.INI.getEOL().equals("\r\n");
}
@Test(
methods = @TestMethod(name = "getSuffix")
)
public void test_getSuffix() {
assert ConfigFile.Format.DESKTOP.getSuffix().equals(".desktop");
assert ConfigFile.Format.INI.getSuffix().equals(".ini");
}
@Test(
methods = @TestMethod(name = "isCaseSensitive")
)
public void test_isCaseSensitive() {
assert ConfigFile.Format.DESKTOP.isCaseSensitive();
assert !ConfigFile.Format.INI.isCaseSensitive();
}
}
| stuffer2325/Makagiga | test/src/commons/TestConfigFile_Format.java | Java | apache-2.0 | 4,186 |
package org.apache.rave.portal.service.impl;
import org.apache.rave.model.ExcercicesHasTrainingPlan;
import org.apache.rave.model.Serie;
import org.apache.rave.model.TrainingPlan;
import org.apache.rave.portal.repository.ExcercicesHasTrainingPlanRepository;
import org.apache.rave.portal.repository.SerieRepository;
import org.apache.rave.portal.repository.TrainingPlanRepository;
import org.apache.rave.portal.service.TrainingPlanService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.Collection;
/**
* Created by fhernandez on 23/09/14.
*/
@Service
public class DefaultTrainingPlanService implements TrainingPlanService {
private final Logger logger = LoggerFactory.getLogger(DefaultTrainingPlanService.class);
private final TrainingPlanRepository trainingPlanRepository;
private final ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository;
private final SerieRepository serieRepository;
@Autowired
public DefaultTrainingPlanService(TrainingPlanRepository trainingPlanRepository,ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository,SerieRepository serieRepository) {
this.trainingPlanRepository = trainingPlanRepository;
this.exercisesHasTrainingPlanRepository = exercisesHasTrainingPlanRepository;
this.serieRepository = serieRepository;
}
@Override
@Transactional
public TrainingPlan getById(Long trainingPlanId) {
TrainingPlan trainingPlan =trainingPlanRepository.getById(trainingPlanId);
if(trainingPlan!=null) {
trainingPlan.getExercisesHasTrainingplans().size();
}
return trainingPlan;
}
@Transactional
public TrainingPlan save(TrainingPlan newPlan) {
Collection<ExcercicesHasTrainingPlan> exerciseList=newPlan.getExercisesHasTrainingplans();
try {
if(newPlan.getEntityId()==null) {
newPlan = trainingPlanRepository.save(newPlan);
}
for (ExcercicesHasTrainingPlan exerciseHasTraining : exerciseList) {
Serie serie = serieRepository.save(exerciseHasTraining.getSerie());
exerciseHasTraining.setSerie(serie);
exerciseHasTraining.setSerieId(serie.getEntityId());
exerciseHasTraining.setTrainingplanId(newPlan.getEntityId());
exerciseHasTraining.setTrainingPlan(newPlan);
}
exercisesHasTrainingPlanRepository.saveList(exerciseList);
}catch(Exception e){
logger.error("Exception saving plan " + e);
}
return newPlan;
}
public Collection<TrainingPlan> getByTrainerID(Long trainerId){
return trainingPlanRepository.getByTrainerID(trainerId);
}
}
| lletsica/my_test_repo | rave-components/rave-core/src/main/java/org/apache/rave/portal/service/impl/DefaultTrainingPlanService.java | Java | apache-2.0 | 2,990 |
/*
===========================================================================
Copyright 2002-2010 Martin Dvorak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package com.mindcognition.mindraider.ui.swing.trash;
import java.awt.BorderLayout;
import java.awt.GridLayout;
import java.awt.Toolkit;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.FocusEvent;
import java.util.HashMap;
import javax.swing.JButton;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JToolBar;
import javax.swing.JTree;
import javax.swing.event.TreeExpansionEvent;
import javax.swing.event.TreeExpansionListener;
import javax.swing.event.TreeModelEvent;
import javax.swing.event.TreeModelListener;
import javax.swing.event.TreeSelectionEvent;
import javax.swing.event.TreeSelectionListener;
import javax.swing.event.TreeWillExpandListener;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.DefaultTreeModel;
import javax.swing.tree.ExpandVetoException;
import javax.swing.tree.MutableTreeNode;
import javax.swing.tree.TreePath;
import javax.swing.tree.TreeSelectionModel;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import com.emental.mindraider.core.MindRaider;
import com.emental.mindraider.core.rest.Metadata;
import com.emental.mindraider.core.rest.ResourceDescriptor;
import com.emental.mindraider.core.rest.resource.FolderResource;
import com.emental.mindraider.core.rest.resource.OutlineResource;
import com.emental.mindraider.ui.dialogs.ProgressDialogJFrame;
import com.emental.mindraider.ui.gfx.IconsRegistry;
import com.mindcognition.mindraider.application.model.label.LabelCustodianListener;
import com.mindcognition.mindraider.l10n.Messages;
import com.mindcognition.mindraider.ui.swing.dialogs.RestoreNotebookJDialog;
import com.mindcognition.mindraider.ui.swing.explorer.ExplorerJPanel;
import com.mindcognition.mindraider.utils.SwingWorker;
public class TrashJPanel extends JPanel implements
TreeWillExpandListener, TreeExpansionListener, LabelCustodianListener {
private static final Logger logger = Logger.getLogger(TrashJPanel.class);
public static final int LEVEL_ROOT = 0;
public static final int LEVEL_FOLDERS = 1;
public static final int LEVEL_NOTEBOOKS = 2;
/*
* UI components
*/
protected DefaultMutableTreeNode rootNode;
protected DefaultTreeModel treeModel;
protected final JTree tree;
protected JButton undoButton, emptyButton, deleteButton;
private Toolkit toolkit = Toolkit.getDefaultToolkit();
/*
* model
*/
private HashMap treeNodeToResourceUriMap;
/*
* singleton
*/
private static TrashJPanel singleton;
public static TrashJPanel getInstance() {
if (singleton == null) {
singleton = new TrashJPanel();
}
return singleton;
}
private ResourceDescriptor[] discardedNotebooksDescriptors;
/**
* Constructor.
*/
private TrashJPanel() {
treeNodeToResourceUriMap = new HashMap();
rootNode = new DefaultMutableTreeNode(Messages.getString("TrashJPanel.notebookArchive"));
treeModel = new DefaultTreeModel(rootNode);
treeModel.addTreeModelListener(new MyTreeModelListener());
tree = new JTree(treeModel);
tree.setEditable(false);
tree.getSelectionModel().setSelectionMode(
TreeSelectionModel.SINGLE_TREE_SELECTION);
tree.addTreeExpansionListener(this);
tree.addTreeWillExpandListener(this);
tree.setShowsRootHandles(true);
tree.putClientProperty("JTree.lineStyle", "Angled");
// tree rendered
// TODO implement own renderer in order to tooltips
tree.setCellRenderer(new TrashTreeCellRenderer(IconsRegistry
.getImageIcon("trashFull.png"), IconsRegistry
.getImageIcon("explorerNotebookIcon.png")));
setLayout(new BorderLayout());
// control panel
JToolBar tp = new JToolBar();
tp.setLayout(new GridLayout(1, 6));
undoButton = new JButton("", IconsRegistry
.getImageIcon("trashUndo.png"));
undoButton.setEnabled(false);
undoButton.setToolTipText("Restore Outline");
undoButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
new RestoreNotebookJDialog(
(String)treeNodeToResourceUriMap.get(node),
"Restore Outline",
"Restore",
true);
}
});
tp.add(undoButton);
deleteButton = new JButton("", IconsRegistry
.getImageIcon("explorerDeleteSmall.png"));
deleteButton.setToolTipText("Delete Outline");
deleteButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
int result = JOptionPane.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE this Outline?",
"Delete Outline", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
MindRaider.labelCustodian
.deleteOutline((String) treeNodeToResourceUriMap
.get(node));
refresh();
ExplorerJPanel.getInstance().refresh();
}
}
});
tp.add(deleteButton);
emptyButton = new JButton("", IconsRegistry
.getImageIcon("trashEmpty.png"));
emptyButton.setToolTipText(Messages.getString("TrashJPanel.emptyArchive"));
emptyButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
int result = JOptionPane
.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE all discarded Outlines?",
"Empty Trash", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
final SwingWorker worker = new SwingWorker() {
public Object construct() {
ProgressDialogJFrame progressDialogJFrame = new ProgressDialogJFrame(
"Empty Trash",
"<html><br> <b>Deleting:</b> </html>");
try {
ResourceDescriptor[] resourceDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (resourceDescriptors != null) {
for (int i = 0; i < resourceDescriptors.length; i++) {
MindRaider.labelCustodian
.deleteOutline(resourceDescriptors[i]
.getUri());
}
refresh();
}
} finally {
if (progressDialogJFrame != null) {
progressDialogJFrame.dispose();
}
}
return null;
}
};
worker.start();
}
}
});
tp.add(emptyButton);
add(tp, BorderLayout.NORTH);
// add the tree
JScrollPane scrollPane = new JScrollPane(tree);
add(scrollPane);
// build the whole tree
buildTree();
// click handler
tree.addTreeSelectionListener(new TreeSelectionListener() {
public void valueChanged(TreeSelectionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
logger.debug("Tree selection path: "
+ node.getPath()[node.getLevel()]);
enableDisableToolbarButtons(node.getLevel());
}
});
}
/**
* Build tree. This method is called on startup and tree refresh in order to
* reload disc content. Adding/removing of particular nodes during the
* program run is performed on individual nodes.
*/
void buildTree() {
discardedNotebooksDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (!ArrayUtils.isEmpty(discardedNotebooksDescriptors)) {
for (int i = 0; i < discardedNotebooksDescriptors.length; i++) {
addDiscardedNotebookNode(discardedNotebooksDescriptors[i]
.getLabel(), discardedNotebooksDescriptors[i].getUri());
}
// now expland all rows
for (int i = 0; i < tree.getRowCount(); i++) {
tree.expandRow(i);
}
}
tree.setSelectionRow(0);
enableDisableToolbarButtons(0);
}
/**
* Add discarded notebook node.
*
* @param uri
* notebook node.
* @return the node.
*/
public DefaultMutableTreeNode addDiscardedNotebookNode(String label,
String uri) {
DefaultMutableTreeNode parent = null;
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Call this method in order to update the tree.
*/
public void refresh() {
clear();
buildTree();
}
/**
* Move notebook up in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookUp(String folderUri, String notebookUri) {
logger.debug(" moveNotebookUp: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
// add notebook to folder
boolean result = MindRaider.labelCustodian.moveNotebookUp(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookUp(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline up: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Move notebook down in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookDown(String folderUri, String notebookUri) {
logger.debug(" moveNotebookDown: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
boolean result = MindRaider.labelCustodian.moveNotebookDown(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookDown(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline down: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Add notebook node to folder node (on new notebook creation).
*
* @param notebookUri
* newly created notebook URI.
*/
public void addNotebookToFolder(String notebookUri) {
logger.debug(" URI of created notebook is: " + notebookUri);
if (notebookUri != null) {
// add notebook to selected folder
TreePath treePath = tree.getSelectionPath();
String folderUri = (String) treeNodeToResourceUriMap.get(treePath
.getLastPathComponent());
logger.debug("Enclosing folder URI is: " + folderUri);
if (folderUri != null) {
try {
// add notebook to folder
MindRaider.labelCustodian.addOutline(folderUri,
notebookUri);
// now add it in the tree
OutlineResource notebookResource = MindRaider.outlineCustodian
.getActiveOutlineResource();
addNotebookNode((DefaultMutableTreeNode) treePath
.getLastPathComponent(), notebookResource.resource
.getMetadata().getUri().toASCIIString(),
notebookResource.getLabel());
} catch (Exception e1) {
logger.error("addNotebookToFolder(String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Creation Error",
"Unable to add Outline to folder: "
+ e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return;
}
}
} else {
logger
.debug("Outline wont be added to folder - it's URI is null!");
}
}
/**
* Remove all nodes except the root node.
*/
public void clear() {
rootNode.removeAllChildren();
treeModel.reload();
treeNodeToResourceUriMap.clear();
}
/**
* Remove the currently selected node.
*/
public void removeCurrentNode() {
TreePath currentSelection = tree.getSelectionPath();
if (currentSelection != null) {
DefaultMutableTreeNode currentNode = (DefaultMutableTreeNode) (currentSelection
.getLastPathComponent());
MutableTreeNode parent = (MutableTreeNode) (currentNode.getParent());
if (parent != null) {
treeModel.removeNodeFromParent(currentNode);
return;
}
}
// Either there was no selection, or the root was selected.
toolkit.beep();
}
/**
* Add child to the currently selected node.
*/
public DefaultMutableTreeNode addObject(Object child) {
DefaultMutableTreeNode parentNode = null;
TreePath parentPath = tree.getSelectionPath();
if (parentPath == null) {
parentNode = rootNode;
} else {
parentNode = (DefaultMutableTreeNode) (parentPath
.getLastPathComponent());
}
return addObject(parentNode, child, true);
}
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child) {
return addObject(parent, child, false);
}
/**
* Add folder node.
*
* @param uri
* folder URI.
* @return the node.
*/
public DefaultMutableTreeNode addFolderNode(String uri) {
DefaultMutableTreeNode parent = null;
// get label from URI
FolderResource resource = new FolderResource(MindRaider.labelCustodian
.get(uri));
Object child = resource.getLabel();
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add notebook node.
*
* @param parent
* folder node.
* @param uri
* notebook URI.
* @param label
* notebook label.
* @return the node.
*/
public DefaultMutableTreeNode addNotebookNode(
DefaultMutableTreeNode parent, String uri, String label) {
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add an child object to a parent object.
*
* @param parent
* the parent object.
* @param child
* the child object.
* @param shouldBeVisible
* if <code>true</code> the object should be visible.
* @return Returns a <code>DefaultMutableTreeNode</code>
*/
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child, boolean shouldBeVisible) {
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
// Make sure the user can see the lovely new node.
if (shouldBeVisible) {
tree.scrollPathToVisible(new TreePath(childNode.getPath()));
}
return childNode;
}
/**
* Custom MyTreeModelListerer class.
*/
class MyTreeModelListener implements TreeModelListener {
/**
* Logger for this class.
*/
private final Logger logger = Logger
.getLogger(MyTreeModelListener.class);
/**
* @see javax.swing.event.TreeModelListener#treeNodesChanged(javax.swing.event.TreeModelEvent)
*/
public void treeNodesChanged(TreeModelEvent e) {
DefaultMutableTreeNode node;
node = (DefaultMutableTreeNode) (e.getTreePath()
.getLastPathComponent());
/*
* If the event lists children, then the changed node is the child
* of the node we've already gotten. Otherwise, the changed node and
* the specified node are the same.
*/
// ToDo
try {
int index = e.getChildIndices()[0];
node = (DefaultMutableTreeNode) (node.getChildAt(index));
} catch (NullPointerException exc) {
//
}
logger.debug("The user has finished editing the node.");
logger.debug("New value: " + node.getUserObject());
}
public void treeNodesInserted(TreeModelEvent e) {
}
public void treeNodesRemoved(TreeModelEvent e) {
}
public void treeStructureChanged(TreeModelEvent e) {
}
}
public void treeCollapsed(TreeExpansionEvent e) {
logger.debug("Tree colapsed event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeExpansionListener#treeExpanded(javax.swing.event.TreeExpansionEvent)
*/
public void treeExpanded(TreeExpansionEvent e) {
logger.debug("Tree expanded event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillCollapse(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillCollapse(TreeExpansionEvent e)
throws ExpandVetoException {
logger.debug("Tree will collapse " + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillExpand(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillExpand(TreeExpansionEvent e) throws ExpandVetoException {
logger.debug("Tree will expand " + e.getPath());
/*
* DefaultMutableTreeNode node = (DefaultMutableTreeNode)
* tree.getLastSelectedPathComponent(); if (node == null) { return; }
* logger.debug(""+node.getPath()[node.getLevel()]); // buttons
* disabling switch(node.getLevel()) { case LEVEL_FOLDERS: // disconnect
* childrens from the node Enumeration enumeration=node.children(); //
* delete nodes itself while (enumeration.hasMoreElements()) { Object
* object=enumeration.nextElement();
* treeNodeToResourceUriMap.remove(object);
* treeModel.removeNodeFromParent((MutableTreeNode)object); } // get
* folder URI logger.debug("Expanding folder:
* "+treeNodeToResourceUriMap.get(node)); FolderResource folder =new
* FolderResource(MindRaider.folderCustodian.get((String)treeNodeToResourceUriMap.get(node)));
* String[] notebookUris=folder.getNotebookUris(); if (notebookUris !=
* null) { for (int i= 0; i < notebookUris.length; i++) {
* NotebookResource notebook=new
* NotebookResource(MindRider.notebookCustodian.get(notebookUris[i]));
* addNotebookNode(node,notebook.resource.metadata.uri.toASCIIString(),notebook.getLabel()); } } }
*/
}
/**
* @see com.emental.LabelCustodianListener.folder.FolderCustodianListener#folderCreated()
*/
public void labelCreated(FolderResource folder) {
Metadata meta = folder.getResource().getMetadata();
logger.debug("Folder created: " + meta.getUri().toASCIIString());
// handle creation of the folder
addFolderNode(meta.getUri().toASCIIString());
}
/**
* @see java.awt.event.FocusListener#focusGained(java.awt.event.FocusEvent)
*/
public void focusGained(FocusEvent arg0) {
// TODO Auto-generated method stub
}
/**
* Change status in the toolbar buttons.
*
* @param level
* The level could be <code>LEVEL_ROOT</code> or
* <code>LEVEL_FOLDERS</code>
*/
protected void enableDisableToolbarButtons(int level) {
// buttons disabling
switch (level) {
case LEVEL_ROOT:
undoButton.setEnabled(false);
deleteButton.setEnabled(false);
emptyButton.setEnabled(true);
break;
case LEVEL_FOLDERS:
undoButton.setEnabled(true);
deleteButton.setEnabled(true);
emptyButton.setEnabled(true);
break;
}
}
private static final long serialVersionUID = 5028293540089775890L;
}
| dvorka/mindraider | mr7/src/main/java/com/mindcognition/mindraider/ui/swing/trash/TrashJPanel.java | Java | apache-2.0 | 24,481 |
package com.fpliu.newton.ui.list;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.GridView;
/**
* @author 792793182@qq.com 2017-06-30.
*/
public interface IGrid<T, V extends GridView> extends ICommon<T> {
V getGridView();
void setItemAdapter(ItemAdapter<T> itemAdapter);
ItemAdapter<T> getItemAdapter();
void setOnItemClickListener(AdapterView.OnItemClickListener listener);
int getItemViewTypeCount();
int getItemViewType(int position);
View getItemView(int position, View convertView, ViewGroup parent);
void notifyDataSetChanged();
void setNumColumns(int numColumns);
}
| leleliu008/Android-List | library/src/main/java/com/fpliu/newton/ui/list/IGrid.java | Java | apache-2.0 | 688 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.