text
stringlengths 7
1.01M
|
|---|
package nl.quintor.studybits.indy.wrapper.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import java.util.Optional;
@Data
@AllArgsConstructor
public class ProvingCredentialKey implements Serializable {
@JsonProperty("cred_id")
private String credId;
private Optional<Boolean> revealed;
}
|
/*
Copyright [2020] [https://www.stylefeng.cn]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Guns采用APACHE LICENSE 2.0开源协议,您在使用过程中,需要注意以下几点:
1.请不要删除和修改根目录下的LICENSE文件。
2.请不要删除和修改Guns源码头部的版权声明。
3.请保留源码和相关描述文件的项目出处,作者声明等。
4.分发源码时候,请注明软件出处 https://gitee.com/stylefeng/guns-separation
5.在修改包名,模块名称,项目代码等时,请注明软件出处 https://gitee.com/stylefeng/guns-separation
6.若您的项目无法满足以上几点,可申请商业授权,获取Guns商业授权许可,请在官网购买授权,地址为 https://www.stylefeng.cn
*/
package cn.stylefeng.guns.core.enums;
/**
* 日志注解操作类型枚举
*
* @author xuyuxiang
* @date 2020/3/16 17:45
*/
public enum LogAnnotionOpTypeEnum {
/**
* 其它
*/
OTHER,
/**
* 增加
*/
ADD,
/**
* 删除
*/
DELETE,
/**
* 编辑
*/
EDIT,
/**
* 更新
*/
UPDATE,
/**
* 查询
*/
QUERY,
/**
* 详情
*/
DETAIL,
/**
* 树
*/
TREE,
/**
* 导入
*/
IMPORT,
/**
* 导出
*/
EXPORT,
/**
* 授权
*/
GRANT,
/**
* 强退
*/
FORCE,
/**
* 清空
*/
CLEAN,
/**
* 修改状态
*/
CHANGE_STATUS
}
|
/**
* Copyright 2009-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.javacrumbs.jsonunit.spring;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.View;
import org.springframework.web.servlet.ViewResolver;
import org.springframework.web.servlet.view.ContentNegotiatingViewResolver;
import org.springframework.web.servlet.view.json.MappingJackson2JsonView;
import java.util.Collections;
@Configuration
@ComponentScan("net.javacrumbs.jsonunit.spring")
public class SpringConfig {
/*
* Configure ContentNegotiatingViewResolver
*/
@Bean
public ViewResolver contentNegotiatingViewResolver() {
ContentNegotiatingViewResolver resolver = new ContentNegotiatingViewResolver();
resolver.setDefaultViews(Collections.<View>singletonList(new MappingJackson2JsonView()));
return resolver;
}
}
|
/*******************************************************************************
* Copyright (c) 2000, 2003 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.swt.internal;
import java.util.EventListener;
/**
* This interface is the cross-platform version of the
* java.util.EventListener interface.
* <p>
* It is part of our effort to provide support for both J2SE
* and J2ME platforms. Under this scheme, classes need to
* implement SWTEventListener instead of java.util.EventListener.
* </p>
* <p>
* Note: java.util.EventListener is not part of CDC and CLDC.
* </p>
*/
public interface SWTEventListener extends EventListener {
}
|
/**
*This class gives conectivity to all the classes
*
* @author Ayush Tripathi.
* @version 1.0.0
*/
public class MainBody
{
public static void main(String [] args)throws Exception
{
StartUp.onStart();
Sorter s = new Sorter();
s.run();
}
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package clases;
import javax.swing.table.DefaultTableModel;
/**
*
* @author juan_
*/
//modelo personalizado solo par evitar que se edite la celda 0
public class ModeloTable extends DefaultTableModel {
public boolean isCellEditable(int row, int column) {
if (column == 0) {
return false;
}
return true;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Internal classes used to implement distributed result grouping */
package org.apache.solr.search.grouping.distributed;
|
package gr.auth.sam.tredingfeelings;
public class Params {
public int woeid = 23424977; // United States
public int topicsCount = 5; // the top 5 trends
public int tweetsCount = 1500; // 1500 tweets for each topic
public boolean multithreaded = false;
//
public boolean clear = true;
public boolean gather = true;
public boolean proc = true;
public boolean graph = true;
//
public Params(String[] args) {
// TODO implement dynamic handling
}
}
|
package creationalpatterns.abstractfactory;
import creationalpatterns.abstractfactory.factory.Data;
import creationalpatterns.abstractfactory.factory.Factory;
import creationalpatterns.abstractfactory.factory.Link;
import creationalpatterns.abstractfactory.factory.Page;
import creationalpatterns.abstractfactory.listfactory.ListFactory;
import creationalpatterns.abstractfactory.tablefactory.TableFactory;
import java.util.InputMismatchException;
import java.util.Scanner;
/*
Create a hierarchical link collection as an HTML file. It can be created in either tabular or list format.
*/
public class Main {
public static void main(String[] args) {
System.out.println("Please enter a number (1 or 2):");
System.out.println(" 1: Create objects by using ListFactory");
System.out.println(" 2: Create objects by using TableFactory");
Scanner scan = new Scanner(System.in);
int number = 0;
try {
number = scan.nextInt();
}
catch (InputMismatchException e) {
System.err.println("Unexpected value.");
System.exit(-1);
}
Factory factory = null;
if (number == 1) {
factory = new ListFactory();
}
else if (number == 2) {
factory = new TableFactory();
}
else {
System.err.println("The value is not 1 or 2.");
System.exit(-1);
}
Link washingtonPost = factory.createLink("The Washington Post", "https://www.washingtonpost.com/");
Link newYorkTimes = factory.createLink("The NewYork Times", "https://www.nytimes.com/");
Link financialTimes = factory.createLink("The Financial Times", "https://www.ft.com/");
Data newspaper = factory.createData("Newspaper");
newspaper.add(washingtonPost);
newspaper.add(newYorkTimes);
newspaper.add(financialTimes);
Link yahoo = factory.createLink("Yahoo!", "https://www.yahoo.com/");
Link google = factory.createLink("Google", "https://www.google.com/");
Data searchEngine = factory.createData("Search engine");
searchEngine.add(yahoo);
searchEngine.add(google);
Page linkPage = factory.createPage("LinkPage", "James Smith");
linkPage.add(newspaper);
linkPage.add(searchEngine);
linkPage.output();
}
}
|
package com.stylefeng.guns.common.constant.state;
/**
* 是否是菜单的枚举
*
* @author fengshuonan
* @date 2017年6月1日22:50:11
*/
public enum ExpenseState {
SUBMITING(1, "待提交"),
CHECKING(2, "待审核"),
PASS(3, "审核通过"),
UN_PASS(4, "未通过");
int code;
String message;
ExpenseState(int code, String message) {
this.code = code;
this.message = message;
}
public int getCode() {
return code;
}
public void setCode(int code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public static String valueOf(Integer status) {
if (status == null) {
return "";
} else {
for (ExpenseState s : ExpenseState.values()) {
if (s.getCode() == status) {
return s.getMessage();
}
}
return "";
}
}
}
|
/*
* This file is part of spiget-java-client, licensed under the MIT License (MIT).
*
* Copyright (c) 2022 Pasqual K. and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package dev.derklaro.spiget.model;
import lombok.Data;
import lombok.experimental.Accessors;
@Data
@Accessors(fluent = true)
public final class Webhook {
private final String id;
private final String secret;
}
|
/*
* Copyright 2017 Ruslan_<<RUS_M>>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rus.cpuinfo.AndroidDepedentModel;
import android.content.Context;
import android.os.Build;
import android.os.SystemClock;
import android.support.annotation.NonNull;
import org.apache.commons.lang3.StringUtils;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import rus.cpuinfo.Util.RootUtil;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_ANDROID_VERSION;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_API_LEVEL;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_BOOTLOADER;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_BUILD_ID;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_JAVA_VM;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_KERNEL_ARCHITECTURE;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_KERNEL_VERSION;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_ROOT_ACCESS;
import static rus.cpuinfo.Model.BaseInfo.SYSTEM_UPTIME;
public class SysInfo extends BaseInfo {
public SysInfo(Context context)
{
super(context);
}
@Override
@NonNull
public String getInfo(int query) {
switch (query) {
case SYSTEM_UPTIME:
return getUpTime();
case SYSTEM_ANDROID_VERSION:
return getAndroidVersion();
case SYSTEM_API_LEVEL:
return getApiLevel();
case SYSTEM_BOOTLOADER:
return getBootLoader();
case SYSTEM_BUILD_ID:
return getBuildId();
case SYSTEM_KERNEL_ARCHITECTURE:
return getKernelArchitecture();
case SYSTEM_KERNEL_VERSION:
return getKernelVersion();
case SYSTEM_JAVA_VM:
return getJavaVM();
case SYSTEM_ROOT_ACCESS:
return isDeviceRooted();
default:
throw new IllegalArgumentException("Query must be with \"SYSTEM.\" prefix");
}
}
@NonNull
private String getUpTime() {
long m = SystemClock.elapsedRealtime();
final long hr = TimeUnit.MILLISECONDS.toHours(m);
final long min = TimeUnit.MILLISECONDS.toMinutes(m - TimeUnit.HOURS.toMillis(hr));
final long sec = TimeUnit.MILLISECONDS.toSeconds(m - TimeUnit.HOURS.toMillis(hr) - TimeUnit.MINUTES.toMillis(min));
return String.format(Locale.getDefault(),"%02d:%02d:%02d", hr, min, sec);
}
@NonNull
private String getAndroidVersion()
{
return Build.VERSION.RELEASE != null ? Build.VERSION.RELEASE : StringUtils.EMPTY;
}
@NonNull
private String getApiLevel()
{
return String.valueOf(Build.VERSION.SDK_INT);
}
@NonNull
private String getBootLoader()
{
return Build.BOOTLOADER != null ? Build.BOOTLOADER : StringUtils.EMPTY;
}
@NonNull
private String getBuildId()
{
return Build.DISPLAY != null ? Build.DISPLAY : StringUtils.EMPTY;
}
@NonNull
private String getKernelArchitecture()
{
String arch = System.getProperty("os.arch");
return arch != null ? arch : StringUtils.EMPTY;
}
@NonNull
private String getKernelVersion()
{
String kernelVers = System.getProperty("os.version");
return kernelVers != null ? kernelVers : StringUtils.EMPTY;
}
@NonNull
private String getJavaVM()
{
String vmName = System.getProperty("java.vm.name");
String vmVersion = System.getProperty("java.vm.version");
return vmName != null && vmVersion != null ? vmName + StringUtils.SPACE + vmVersion : StringUtils.EMPTY;
}
@NonNull
private String isDeviceRooted()
{
return RootUtil.isDeviceRooted() ? "yes" : "no";
}
}
|
package cat.udl.eps.softarch.hello.service;
import cat.udl.eps.softarch.hello.model.Film;
import cat.udl.eps.softarch.hello.model.Userfilm;
/**
* Created by http://rhizomik.net/~roberto/
*/
public interface UserGreetingsService {
Userfilm getUserAndFilms(Long userId);
Film addFilmToUser(Film greeting);
Film updateFilmFromUser(Film updateGreeting, Long greetingId);
void removeFilmFromUser(Long greetingId);
}
|
package Homework2;
public class Point {
int x, y;
public Point(int х, int y) {
this.x = х;
this.y = y;
}
public double distance(int х, int у) {
int dx = this.x - х;
int dy = this.y - у;
return Math.sqrt(dx*dx + dy*dy);
}
public double distance(Point p) {
return distance(p.x, p.y);
}
}
|
package com.braintreegateway;
import com.braintreegateway.util.NodeWrapper;
import java.util.List;
import java.util.ArrayList;
public class UsBankAccount implements PaymentMethod {
private String routingNumber;
private String last4;
private String accountType;
private String accountHolderName;
private String token;
private String imageUrl;
private String bankName;
private List<Subscription> subscriptions;
private String customerId;
private Boolean isDefault;
private AchMandate achMandate;
private List<UsBankAccountVerification> verifications;
private Boolean isVerified;
public UsBankAccount(NodeWrapper node) {
this.routingNumber= node.findString("routing-number");
this.last4 = node.findString("last-4");
this.accountType = node.findString("account-type");
this.accountHolderName = node.findString("account-holder-name");
this.token = node.findString("token");
this.imageUrl = node.findString("image-url");
this.bankName = node.findString("bank-name");
this.subscriptions = new ArrayList<Subscription>();
for (NodeWrapper subscriptionResponse : node.findAll("subscriptions/subscription")) {
this.subscriptions.add(new Subscription(subscriptionResponse));
}
this.customerId = node.findString("customer-id");
this.isDefault = node.findBoolean("default");
NodeWrapper achMandateNode = node.findFirst("ach-mandate");
if (achMandateNode != null) {
this.achMandate = new AchMandate(achMandateNode);
}
this.verifications = new ArrayList<UsBankAccountVerification>();
for (NodeWrapper verification : node.findAll("verifications/us-bank-account-verification")) {
this.verifications.add(new UsBankAccountVerification(verification));
}
this.isVerified = node.findBoolean("verified");
}
public String getRoutingNumber() {
return routingNumber;
}
public String getLast4() {
return last4;
}
public String getAccountType() {
return accountType;
}
public String getAccountHolderName() {
return accountHolderName;
}
public String getToken() {
return token;
}
public String getImageUrl() {
return imageUrl;
}
public String getBankName() {
return bankName;
}
public boolean isDefault() {
return isDefault;
}
public AchMandate getAchMandate() {
return achMandate;
}
public String getCustomerId() {
return customerId;
}
public List<Subscription> getSubscriptions() {
return subscriptions;
}
public List<UsBankAccountVerification> getVerifications() {
return verifications;
}
public Boolean isVerified() {
return isVerified;
}
}
|
package com.qa.ims.services;
import java.util.List;
public interface CrudServices<T> {
public List<T> readAll();
T create(T t);
T update(T t);
public void delete(Object t);
}
|
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.devkit.util;
import com.intellij.openapi.actionSystem.ActionGroup;
import com.intellij.openapi.actionSystem.AnAction;
import com.intellij.openapi.keymap.KeymapManager;
import com.intellij.psi.PsiClass;
import com.intellij.psi.JavaPsiFacade;
import com.intellij.psi.xml.XmlFile;
import com.intellij.psi.xml.XmlTag;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NonNls;
/**
* @author swr
*/
public enum ActionType {
ACTION(AnAction.class, "action"),
GROUP(ActionGroup.class, "group");
public final String myClassName;
private final String myName;
public interface Processor {
boolean process(ActionType type, XmlTag action);
}
ActionType(Class<? extends AnAction> clazz, @NonNls String name) {
myClassName = clazz.getName();
myName = name;
}
public boolean isOfType(PsiClass klass) {
final PsiClass psiClass = JavaPsiFacade.getInstance(klass.getProject()).findClass(myClassName, klass.getResolveScope());
return psiClass != null && klass.isInheritor(psiClass, true);
}
public void process(XmlTag rootTag, Processor processor) {
final XmlTag[] actions = rootTag.findSubTags("actions");
for (XmlTag tag : actions) {
if (!tag.isPhysical()) continue;
final XmlTag[] components = tag.getSubTags();
for (XmlTag actionOrGroup : components) {
if (myName.equals(actionOrGroup.getName())) {
if (!processor.process(this, actionOrGroup)) {
return;
}
} else if (this == ACTION && GROUP.myName.equals(actionOrGroup.getName())) {
final XmlTag[] groupActions = actionOrGroup.findSubTags(myName);
for (XmlTag a : groupActions) {
if (!processor.process(this, a)) {
return;
}
}
}
}
}
}
public void patchPluginXml(XmlFile pluginXml, PsiClass klass, ActionData dialog) throws IncorrectOperationException {
final XmlTag rootTag = pluginXml.getDocument().getRootTag();
if (rootTag != null && "idea-plugin".equals(rootTag.getName())) {
XmlTag actions = rootTag.findFirstSubTag("actions");
if (actions == null || !actions.isPhysical()) {
actions = (XmlTag)rootTag.add(rootTag.createChildTag("actions", rootTag.getNamespace(), null, false));
}
XmlTag actionTag = (XmlTag)actions.add(actions.createChildTag(myName, actions.getNamespace(), null, false));
actionTag.setAttribute("id", dialog.getActionId());
actionTag.setAttribute("class", klass.getQualifiedName());
actionTag.setAttribute("text", dialog.getActionText());
String description = dialog.getActionDescription();
if (description != null && description.length() > 0) {
actionTag.setAttribute("description", description);
}
String groupId = dialog.getSelectedGroupId();
if (groupId != null) {
XmlTag groupTag = (XmlTag)actionTag.add(actionTag.createChildTag("add-to-group", actions.getNamespace(), null, false));
groupTag.setAttribute("group-id", groupId);
@NonNls final String anchor = dialog.getSelectedAnchor();
groupTag.setAttribute("anchor", anchor);
if (anchor.equals("before") || anchor.equals("after")) {
groupTag.setAttribute("relative-to-action", dialog.getSelectedActionId());
}
}
String firstKeyStroke = dialog.getFirstKeyStroke();
if (firstKeyStroke != null && firstKeyStroke.length() > 0) {
XmlTag keyTag = (XmlTag)actionTag.add(actionTag.createChildTag("keyboard-shortcut", actions.getNamespace(), null, false));
keyTag.setAttribute("keymap", KeymapManager.DEFAULT_IDEA_KEYMAP);
keyTag.setAttribute("first-keystroke", firstKeyStroke);
final String secondKeyStroke = dialog.getSecondKeyStroke();
if (secondKeyStroke != null && secondKeyStroke.length() > 0) {
keyTag.setAttribute("second-keystroke", secondKeyStroke);
}
}
}
}
}
|
/*
* Copyright 2018-2021 adorsys GmbH & Co KG
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.adorsys.psd2.xs2a.service.mapper.spi_xs2a_mappers;
import de.adorsys.psd2.xs2a.core.psu.PsuIdData;
import de.adorsys.psd2.xs2a.spi.domain.psu.SpiPsuData;
import de.adorsys.xs2a.reader.JsonReader;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
class SpiToXs2aPsuDataMapperTest {
private SpiToXs2aPsuDataMapper mapper;
private final JsonReader reader = new JsonReader();
@BeforeEach
void setUp() {
mapper = new SpiToXs2aPsuDataMapper();
}
@Test
void mapToPsuIdData() {
//Given
PsuIdData expected =
reader.getObjectFromFile("json/service/mapper/spi_xs2a_mappers/psu-id-data.json", PsuIdData.class);
//When
PsuIdData actual = mapper.mapToPsuIdData(getTestSpiPsuData());
//Then
assertThat(actual).isEqualTo(expected);
}
private SpiPsuData getTestSpiPsuData() {
return SpiPsuData.builder()
.psuId("psu Id")
.psuIdType("psuId Type")
.psuCorporateId("psu Corporate Id")
.psuCorporateIdType("psuCorporate Id Type")
.psuIpAddress("psu IP address")
.build();
}
}
|
package com.ke.css.cimp.fhl.fhl2;
/* -----------------------------------------------------------------------------
* Rule_MESSAGE.java
* -----------------------------------------------------------------------------
*
* Producer : com.parse2.aparse.Parser 2.5
* Produced : Thu Feb 22 17:14:24 KST 2018
*
* -----------------------------------------------------------------------------
*/
import java.util.ArrayList;
final public class Rule_MESSAGE extends Rule
{
public Rule_MESSAGE(String spelling, ArrayList<Rule> rules)
{
super(spelling, rules);
}
public Object accept(Visitor visitor)
{
return visitor.visit(this);
}
public static Rule_MESSAGE parse(ParserContext context)
{
context.push("MESSAGE");
boolean parsed = true;
int s0 = context.index;
ParserAlternative a0 = new ParserAlternative(s0);
ArrayList<ParserAlternative> as1 = new ArrayList<ParserAlternative>();
parsed = false;
{
int s1 = context.index;
ParserAlternative a1 = new ParserAlternative(s1);
parsed = true;
if (parsed)
{
boolean f1 = true;
int c1 = 0;
for (int i1 = 0; i1 < 1 && f1; i1++)
{
Rule rule = Rule_HEADER.parse(context);
if ((f1 = rule != null))
{
a1.add(rule, context.index);
c1++;
}
}
parsed = c1 == 1;
}
if (parsed)
{
boolean f1 = true;
int c1 = 0;
for (int i1 = 0; i1 < 1 && f1; i1++)
{
Rule rule = Rule_FHL.parse(context);
if ((f1 = rule != null))
{
a1.add(rule, context.index);
c1++;
}
}
parsed = c1 == 1;
}
if (parsed)
{
as1.add(a1);
}
context.index = s1;
}
ParserAlternative b = ParserAlternative.getBest(as1);
parsed = b != null;
if (parsed)
{
a0.add(b.rules, b.end);
context.index = b.end;
}
Rule rule = null;
if (parsed)
{
rule = new Rule_MESSAGE(context.text.substring(a0.start, a0.end), a0.rules);
}
else
{
context.index = s0;
}
context.pop("MESSAGE", parsed);
return (Rule_MESSAGE)rule;
}
}
/* -----------------------------------------------------------------------------
* eof
* -----------------------------------------------------------------------------
*/
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2019 Code Technology Studio
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package io.jpom.system;
import cn.hutool.core.convert.Convert;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.script.ScriptUtil;
import cn.jiangzeyin.common.spring.SpringUtil;
import io.jpom.system.extconf.DbExtConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import javax.annotation.Resource;
import java.util.concurrent.TimeUnit;
/**
* 外部配置文件
*
* @author jiangzeyin
* @date 2019/3/04
*/
@Configuration
public class ServerExtConfigBean {
/**
* 系统最多能创建多少用户
*/
@Value("${user.maxCount:10}")
public int userMaxCount;
/**
* 用户连续登录失败次数,超过此数将自动不再被允许登录,零是不限制
*/
@Value("${user.alwaysLoginError:5}")
public int userAlwaysLoginError;
/**
* 当ip连续登录失败,锁定对应IP时长,单位毫秒
*/
@Value("${user.ipErrorLockTime:60*60*5*1000}")
private String ipErrorLockTime;
private long ipErrorLockTimeValue = -1;
/**
* demo 账号的提示
*/
@Value("${user.demoTip:}")
private String userDemoTip;
/**
* author Hotstrip
* 是否开启 web 访问数据库
*
* @see <a href=http://${ip}:${port}/h2-console>http://${ip}:${port}/h2-console</a>
*/
@Value("${spring.h2.console.enabled:false}")
private boolean h2ConsoleEnabled;
/**
* 服务端api token,长度要求大于等于6位,字母数字符号组合
*/
@Value("${jpom.authorize.token:}")
private String authorizeToken;
/**
* 登录token失效时间(单位:小时),默认为24
*/
@Value("${jpom.authorize.expired:24}")
private int authorizeExpired;
/**
* 登录token失效后自动续签时间(单位:分钟),默认为60,
*/
@Value("${jpom.authorize.renewal:60}")
private int authorizeRenewal;
/**
* 登录token 加密的key 长度建议控制到 16位
*/
@Value("${jpom.authorize.key:}")
private String authorizeKey;
/**
* 构建最多保存多少份历史记录
*/
@Value("${build.maxHistoryCount:1000}")
private int buildMaxHistoryCount;
/**
* 每一项构建最多保存的历史份数
*/
@Value("${build.itemMaxHistoryCount:50}")
private int buildItemMaxHistoryCount;
@Value("${build.checkDeleteCommand:true}")
private Boolean buildCheckDeleteCommand;
/**
* ssh 中执行命令 初始化的环境变量
*/
@Value("${ssh.initEnv:}")
private String sshInitEnv;
/**
* 上传文件的超时时间 单位秒,最短5秒中
*/
@Value("${node.uploadFileTimeOut:300}")
private int uploadFileTimeOut;
/**
* 前端接口 超时时间 单位秒
*/
@Value("${jpom.webApiTimeout:20}")
private int webApiTimeout;
/**
* 系统名称
*/
@Value("${jpom.name:}")
private String name;
/**
* 系统副名称(标题) 建议4个汉字以内
*/
@Value("${jpom.subTitle:}")
private String subTitle;
/**
* 登录页标题
*/
@Value("${jpom.loginTitle:}")
private String loginTitle;
/**
* logo 文件路径
*/
@Value("${jpom.logoFile:}")
private String logoFile;
/**
* 禁用页面引导导航
*/
@Value("${jpom.disabledGuide:false}")
private Boolean disabledGuide;
/**
* 检查节点心跳间隔时间
*/
@Value("${system.nodeHeartSecond:30}")
private Integer nodeHeartSecond;
@Resource
private DbExtConfig dbExtConfig;
/**
* 获取上传文件超时时间
*
* @return 返回毫秒
*/
public int getUploadFileTimeOut() {
return Math.max(this.uploadFileTimeOut, 5) * 1000;
}
public String getSshInitEnv() {
return StrUtil.emptyToDefault(this.sshInitEnv, "source /etc/profile && source ~/.bash_profile && source ~/.bashrc");
}
public String getAuthorizeToken() {
return authorizeToken;
}
public long getIpErrorLockTime() {
if (this.ipErrorLockTimeValue == -1) {
String str = StrUtil.emptyToDefault(this.ipErrorLockTime, "60*60*5*1000");
this.ipErrorLockTimeValue = Convert.toLong(ScriptUtil.eval(str), TimeUnit.HOURS.toMillis(5));
}
return this.ipErrorLockTimeValue;
}
public int getBuildMaxHistoryCount() {
return buildMaxHistoryCount;
}
public int getBuildItemMaxHistoryCount() {
return buildItemMaxHistoryCount;
}
public int getAuthorizeExpired() {
return authorizeExpired;
}
public int getAuthorizeRenewal() {
return authorizeRenewal;
}
public boolean isH2ConsoleEnabled() {
return h2ConsoleEnabled;
}
public byte[] getAuthorizeKey() {
return StrUtil.emptyToDefault(this.authorizeKey, "KZQfFBJTW2v6obS1").getBytes();
}
public boolean getBuildCheckDeleteCommand() {
return buildCheckDeleteCommand != null && buildCheckDeleteCommand;
}
/**
* 最小值 10秒
*
* @return 超时时间(单位秒)
*/
public int getWebApiTimeout() {
return Math.max(this.webApiTimeout, 10);
}
public String getName() {
return StrUtil.emptyToDefault(name, "Jpom项目管理系统");
}
public String getSubTitle() {
return StrUtil.emptyToDefault(subTitle, "项目管理");
}
public String getLoginTitle() {
return StrUtil.emptyToDefault(loginTitle, "登录JPOM");
}
public String getLogoFile() {
return logoFile;
}
public String getUserDemoTip() {
return userDemoTip;
}
public boolean getDisabledGuide() {
return Convert.toBool(disabledGuide, false);
}
public int getNodeHeartSecond() {
int integer = ObjectUtil.defaultIfNull(nodeHeartSecond, 30);
return Math.max(integer, 5);
}
/**
* 单例
*
* @return this
*/
public static ServerExtConfigBean getInstance() {
return SpringUtil.getBean(ServerExtConfigBean.class);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.coyote.http11;
import java.io.IOException;
import java.io.EOFException;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import org.apache.tomcat.jni.Socket;
import org.apache.tomcat.jni.Status;
import org.apache.tomcat.util.buf.ByteChunk;
import org.apache.tomcat.util.buf.MessageBytes;
import org.apache.tomcat.util.http.MimeHeaders;
import org.apache.tomcat.util.res.StringManager;
import org.apache.coyote.InputBuffer;
import org.apache.coyote.Request;
import org.apache.juli.logging.Log;
import org.apache.juli.logging.LogFactory;
/**
* Implementation of InputBuffer which provides HTTP request header parsing as
* well as transfer decoding.
*
* @author <a href="mailto:remm@apache.org">Remy Maucherat</a>
*/
public class InternalAprInputBuffer extends AbstractInputBuffer {
private static final Log log = LogFactory.getLog(InternalAprInputBuffer.class);
// ----------------------------------------------------------- Constructors
/**
* Alternate constructor.
*/
public InternalAprInputBuffer(Request request, int headerBufferSize) {
this.request = request;
headers = request.getMimeHeaders();
buf = new byte[headerBufferSize];
if (headerBufferSize < (8 * 1024)) {
bbuf = ByteBuffer.allocateDirect(6 * 1500);
} else {
bbuf = ByteBuffer.allocateDirect((headerBufferSize / 1500 + 1) * 1500);
}
inputStreamInputBuffer = new SocketInputBuffer();
filterLibrary = new InputFilter[0];
activeFilters = new InputFilter[0];
lastActiveFilter = -1;
parsingHeader = true;
swallowInput = true;
}
// -------------------------------------------------------------- Variables
/**
* The string manager for this package.
*/
protected static StringManager sm =
StringManager.getManager(Constants.Package);
// ----------------------------------------------------- Instance Variables
/**
* Associated Coyote request.
*/
protected Request request;
/**
* Headers of the associated request.
*/
protected MimeHeaders headers;
/**
* State.
*/
protected boolean parsingHeader;
/**
* Swallow input ? (in the case of an expectation)
*/
protected boolean swallowInput;
/**
* Pointer to the current read buffer.
*/
protected byte[] buf;
/**
* Last valid byte.
*/
protected int lastValid;
/**
* Position in the buffer.
*/
protected int pos;
/**
* Pos of the end of the header in the buffer, which is also the
* start of the body.
*/
protected int end;
/**
* Direct byte buffer used to perform actual reading.
*/
protected ByteBuffer bbuf;
/**
* Underlying socket.
*/
protected long socket;
/**
* Underlying input buffer.
*/
protected InputBuffer inputStreamInputBuffer;
/**
* Filter library.
* Note: Filter[0] is always the "chunked" filter.
*/
protected InputFilter[] filterLibrary;
/**
* Active filters (in order).
*/
protected InputFilter[] activeFilters;
/**
* Index of the last active filter.
*/
protected int lastActiveFilter;
// ------------------------------------------------------------- Properties
/**
* Set the underlying socket.
*/
public void setSocket(long socket) {
this.socket = socket;
Socket.setrbb(this.socket, bbuf);
}
/**
* Get the underlying socket input stream.
*/
public long getSocket() {
return socket;
}
/**
* Add an input filter to the filter library.
*/
public void addFilter(InputFilter filter) {
InputFilter[] newFilterLibrary =
new InputFilter[filterLibrary.length + 1];
for (int i = 0; i < filterLibrary.length; i++) {
newFilterLibrary[i] = filterLibrary[i];
}
newFilterLibrary[filterLibrary.length] = filter;
filterLibrary = newFilterLibrary;
activeFilters = new InputFilter[filterLibrary.length];
}
/**
* Get filters.
*/
public InputFilter[] getFilters() {
return filterLibrary;
}
/**
* Clear filters.
*/
public void clearFilters() {
filterLibrary = new InputFilter[0];
lastActiveFilter = -1;
}
/**
* Add an input filter to the filter library.
*/
public void addActiveFilter(InputFilter filter) {
if (lastActiveFilter == -1) {
filter.setBuffer(inputStreamInputBuffer);
} else {
for (int i = 0; i <= lastActiveFilter; i++) {
if (activeFilters[i] == filter)
return;
}
filter.setBuffer(activeFilters[lastActiveFilter]);
}
activeFilters[++lastActiveFilter] = filter;
filter.setRequest(request);
}
/**
* Set the swallow input flag.
*/
public void setSwallowInput(boolean swallowInput) {
this.swallowInput = swallowInput;
}
// --------------------------------------------------------- Public Methods
/**
* Recycle the input buffer. This should be called when closing the
* connection.
*/
public void recycle() {
// Recycle Request object
request.recycle();
// Recycle filters
for (int i = 0; i <= lastActiveFilter; i++) {
activeFilters[i].recycle();
}
socket = 0;
lastValid = 0;
pos = 0;
lastActiveFilter = -1;
parsingHeader = true;
swallowInput = true;
}
/**
* End processing of current HTTP request.
* Note: All bytes of the current request should have been already
* consumed. This method only resets all the pointers so that we are ready
* to parse the next HTTP request.
*/
public void nextRequest() {
// Recycle Request object
request.recycle();
// Copy leftover bytes to the beginning of the buffer
if (lastValid - pos > 0 && pos > 0) {
System.arraycopy(buf, pos, buf, 0, lastValid - pos);
}
// Recycle filters
for (int i = 0; i <= lastActiveFilter; i++) {
activeFilters[i].recycle();
}
// Reset pointers
lastValid = lastValid - pos;
pos = 0;
lastActiveFilter = -1;
parsingHeader = true;
swallowInput = true;
}
/**
* End request (consumes leftover bytes).
*
* @throws IOException an undelying I/O error occured
*/
public void endRequest()
throws IOException {
if (swallowInput && (lastActiveFilter != -1)) {
int extraBytes = (int) activeFilters[lastActiveFilter].end();
pos = pos - extraBytes;
}
}
/**
* Read the request line. This function is meant to be used during the
* HTTP request header parsing. Do NOT attempt to read the request body
* using it.
*
* @throws IOException If an exception occurs during the underlying socket
* read operations, or if the given buffer is not big enough to accomodate
* the whole line.
* @return true if data is properly fed; false if no data is available
* immediately and thread should be freed
*/
public boolean parseRequestLine(boolean useAvailableData)
throws IOException {
int start = 0;
//
// Skipping blank lines
//
byte chr = 0;
do {
// Read new bytes if needed
if (pos >= lastValid) {
if (useAvailableData) {
return false;
}
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
chr = buf[pos++];
} while ((chr == Constants.CR) || (chr == Constants.LF));
pos--;
// Mark the current buffer position
start = pos;
if (pos >= lastValid) {
if (useAvailableData) {
return false;
}
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
//
// Reading the method name
// Method name is always US-ASCII
//
boolean space = false;
while (!space) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
// Spec says no CR or LF in method name
if (buf[pos] == Constants.CR || buf[pos] == Constants.LF) {
throw new IllegalArgumentException(
sm.getString("iib.invalidmethod"));
}
// Spec says single SP but it also says be tolerant of HT
if (buf[pos] == Constants.SP || buf[pos] == Constants.HT) {
space = true;
request.method().setBytes(buf, start, pos - start);
}
pos++;
}
// Spec says single SP but also says be tolerant of multiple and/or HT
while (space) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.SP || buf[pos] == Constants.HT) {
pos++;
} else {
space = false;
}
}
// Mark the current buffer position
start = pos;
int end = 0;
int questionPos = -1;
//
// Reading the URI
//
boolean eol = false;
while (!space) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
// Spec says single SP but it also says be tolerant of HT
if (buf[pos] == Constants.SP || buf[pos] == Constants.HT) {
space = true;
end = pos;
} else if ((buf[pos] == Constants.CR)
|| (buf[pos] == Constants.LF)) {
// HTTP/0.9 style request
eol = true;
space = true;
end = pos;
} else if ((buf[pos] == Constants.QUESTION)
&& (questionPos == -1)) {
questionPos = pos;
}
pos++;
}
request.unparsedURI().setBytes(buf, start, end - start);
if (questionPos >= 0) {
request.queryString().setBytes(buf, questionPos + 1,
end - questionPos - 1);
request.requestURI().setBytes(buf, start, questionPos - start);
} else {
request.requestURI().setBytes(buf, start, end - start);
}
// Spec says single SP but also says be tolerant of multiple and/or HT
while (space) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.SP || buf[pos] == Constants.HT) {
pos++;
} else {
space = false;
}
}
// Mark the current buffer position
start = pos;
end = 0;
//
// Reading the protocol
// Protocol is always US-ASCII
//
while (!eol) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.CR) {
end = pos;
} else if (buf[pos] == Constants.LF) {
if (end == 0)
end = pos;
eol = true;
}
pos++;
}
if ((end - start) > 0) {
request.protocol().setBytes(buf, start, end - start);
} else {
request.protocol().setString("");
}
return true;
}
/**
* Parse the HTTP headers.
*/
public void parseHeaders()
throws IOException {
while (parseHeader()) {
}
parsingHeader = false;
end = pos;
}
/**
* Parse an HTTP header.
*
* @return false after reading a blank line (which indicates that the
* HTTP header parsing is done
*/
@SuppressWarnings("null") // headerValue cannot be null
public boolean parseHeader()
throws IOException {
//
// Check for blank line
//
byte chr = 0;
while (true) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
chr = buf[pos];
if (chr == Constants.CR) {
// Skip
} else if (chr == Constants.LF) {
pos++;
return false;
} else {
break;
}
pos++;
}
// Mark the current buffer position
int start = pos;
//
// Reading the header name
// Header name is always US-ASCII
//
boolean colon = false;
MessageBytes headerValue = null;
while (!colon) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.COLON) {
colon = true;
headerValue = headers.addValue(buf, start, pos - start);
} else if (!HTTP_TOKEN_CHAR[buf[pos]]) {
// If a non-token header is detected, skip the line and
// ignore the header
skipLine(start);
return true;
}
chr = buf[pos];
if ((chr >= Constants.A) && (chr <= Constants.Z)) {
buf[pos] = (byte) (chr - Constants.LC_OFFSET);
}
pos++;
}
// Mark the current buffer position
start = pos;
int realPos = pos;
//
// Reading the header value (which can be spanned over multiple lines)
//
boolean eol = false;
boolean validLine = true;
while (validLine) {
boolean space = true;
// Skipping spaces
while (space) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if ((buf[pos] == Constants.SP) || (buf[pos] == Constants.HT)) {
pos++;
} else {
space = false;
}
}
int lastSignificantChar = realPos;
// Reading bytes until the end of the line
while (!eol) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.CR) {
// Skip
} else if (buf[pos] == Constants.LF) {
eol = true;
} else if (buf[pos] == Constants.SP) {
buf[realPos] = buf[pos];
realPos++;
} else {
buf[realPos] = buf[pos];
realPos++;
lastSignificantChar = realPos;
}
pos++;
}
realPos = lastSignificantChar;
// Checking the first character of the new line. If the character
// is a LWS, then it's a multiline header
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
chr = buf[pos];
if ((chr != Constants.SP) && (chr != Constants.HT)) {
validLine = false;
} else {
eol = false;
// Copying one extra space in the buffer (since there must
// be at least one space inserted between the lines)
buf[realPos] = chr;
realPos++;
}
}
// Set the header value
headerValue.setBytes(buf, start, realPos - start);
return true;
}
private void skipLine(int start) throws IOException {
boolean eol = false;
int lastRealByte = start;
if (pos - 1 > start) {
lastRealByte = pos - 1;
}
while (!eol) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.CR) {
// Skip
} else if (buf[pos] == Constants.LF) {
eol = true;
} else {
lastRealByte = pos;
}
pos++;
}
if (log.isDebugEnabled()) {
log.debug(sm.getString("iib.invalidheader", new String(buf, start,
lastRealByte - start + 1, "ISO-8859-1")));
}
}
/**
* Available bytes (note that due to encoding, this may not correspond )
*/
public int available() {
int result = (lastValid - pos);
if ((result == 0) && (lastActiveFilter >= 0)) {
for (int i = 0; (result == 0) && (i <= lastActiveFilter); i++) {
result = activeFilters[i].available();
}
}
return result;
}
// ---------------------------------------------------- InputBuffer Methods
/**
* Read some bytes.
*/
public int doRead(ByteChunk chunk, Request req)
throws IOException {
if (lastActiveFilter == -1)
return inputStreamInputBuffer.doRead(chunk, req);
else
return activeFilters[lastActiveFilter].doRead(chunk,req);
}
// ------------------------------------------------------ Protected Methods
/**
* Fill the internal buffer using data from the undelying input stream.
*
* @return false if at end of stream
*/
protected boolean fill()
throws IOException {
int nRead = 0;
if (parsingHeader) {
if (lastValid == buf.length) {
throw new IllegalArgumentException
(sm.getString("iib.requestheadertoolarge.error"));
}
bbuf.clear();
nRead = Socket.recvbb(socket, 0, buf.length - lastValid);
if (nRead > 0) {
bbuf.limit(nRead);
bbuf.get(buf, pos, nRead);
lastValid = pos + nRead;
} else {
if ((-nRead) == Status.EAGAIN) {
return false;
} else {
throw new IOException(sm.getString("iib.failedread"));
}
}
} else {
if (buf.length - end < 4500) {
// In this case, the request header was really large, so we allocate a
// brand new one; the old one will get GCed when subsequent requests
// clear all references
buf = new byte[buf.length];
end = 0;
}
pos = end;
lastValid = pos;
bbuf.clear();
nRead = Socket.recvbb(socket, 0, buf.length - lastValid);
if (nRead > 0) {
bbuf.limit(nRead);
bbuf.get(buf, pos, nRead);
lastValid = pos + nRead;
} else {
if ((-nRead) == Status.ETIMEDOUT || (-nRead) == Status.TIMEUP) {
throw new SocketTimeoutException(sm.getString("iib.failedread"));
} else if (nRead == 0) {
// APR_STATUS_IS_EOF, since native 1.1.22
return false;
} else {
throw new IOException(sm.getString("iib.failedread"));
}
}
}
return (nRead > 0);
}
// ------------------------------------- InputStreamInputBuffer Inner Class
/**
* This class is an input buffer which will read its data from an input
* stream.
*/
protected class SocketInputBuffer
implements InputBuffer {
/**
* Read bytes into the specified chunk.
*/
public int doRead(ByteChunk chunk, Request req )
throws IOException {
if (pos >= lastValid) {
if (!fill())
return -1;
}
int length = lastValid - pos;
chunk.setBytes(buf, pos, length);
pos = lastValid;
return (length);
}
}
}
|
package com.ruoyi.system.service.impl;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.ruoyi.common.annotation.DataScope;
import com.ruoyi.common.constant.UserConstants;
import com.ruoyi.common.core.domain.entity.SysRole;
import com.ruoyi.common.core.domain.entity.SysUser;
import com.ruoyi.common.core.text.Convert;
import com.ruoyi.common.exception.BusinessException;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.common.utils.security.Md5Utils;
import com.ruoyi.system.domain.SysPost;
import com.ruoyi.system.domain.SysUserPost;
import com.ruoyi.system.domain.SysUserRole;
import com.ruoyi.system.mapper.SysPostMapper;
import com.ruoyi.system.mapper.SysRoleMapper;
import com.ruoyi.system.mapper.SysUserMapper;
import com.ruoyi.system.mapper.SysUserPostMapper;
import com.ruoyi.system.mapper.SysUserRoleMapper;
import com.ruoyi.system.service.ISysConfigService;
import com.ruoyi.system.service.ISysUserService;
/**
* 用户 业务层处理
*
* @author ruoyi
*/
@Service
public class SysUserServiceImpl implements ISysUserService
{
private static final Logger log = LoggerFactory.getLogger(SysUserServiceImpl.class);
@Autowired
private SysUserMapper userMapper;
@Autowired
private SysRoleMapper roleMapper;
@Autowired
private SysPostMapper postMapper;
@Autowired
private SysUserPostMapper userPostMapper;
@Autowired
private SysUserRoleMapper userRoleMapper;
@Autowired
private ISysConfigService configService;
/**
* 根据条件分页查询用户列表
*
* @param user 用户信息
* @return 用户信息集合信息
*/
@Override
@DataScope(deptAlias = "d", userAlias = "u")
public List<SysUser> selectUserList(SysUser user)
{
return userMapper.selectUserList(user);
}
/**
* 根据条件分页查询已分配用户角色列表
*
* @param user 用户信息
* @return 用户信息集合信息
*/
@Override
@DataScope(deptAlias = "d", userAlias = "u")
public List<SysUser> selectAllocatedList(SysUser user)
{
return userMapper.selectAllocatedList(user);
}
/**
* 根据条件分页查询未分配用户角色列表
*
* @param user 用户信息
* @return 用户信息集合信息
*/
@Override
@DataScope(deptAlias = "d", userAlias = "u")
public List<SysUser> selectUnallocatedList(SysUser user)
{
return userMapper.selectUnallocatedList(user);
}
/**
* 通过用户名查询用户
*
* @param userName 用户名
* @return 用户对象信息
*/
@Override
public SysUser selectUserByLoginName(String userName)
{
return userMapper.selectUserByLoginName(userName);
}
/**
* 通过手机号码查询用户
*
* @param phoneNumber 手机号码
* @return 用户对象信息
*/
@Override
public SysUser selectUserByPhoneNumber(String phoneNumber)
{
return userMapper.selectUserByPhoneNumber(phoneNumber);
}
/**
* 通过邮箱查询用户
*
* @param email 邮箱
* @return 用户对象信息
*/
@Override
public SysUser selectUserByEmail(String email)
{
return userMapper.selectUserByEmail(email);
}
/**
* 通过用户ID查询用户
*
* @param userId 用户ID
* @return 用户对象信息
*/
@Override
public SysUser selectUserById(Long userId)
{
return userMapper.selectUserById(userId);
}
/**
* 通过用户ID查询用户和角色关联
*
* @param userId 用户ID
* @return 用户和角色关联列表
*/
@Override
public List<SysUserRole> selectUserRoleByUserId(Long userId)
{
return userRoleMapper.selectUserRoleByUserId(userId);
}
/**
* 通过用户ID删除用户
*
* @param userId 用户ID
* @return 结果
*/
@Override
@Transactional
public int deleteUserById(Long userId)
{
// 删除用户与角色关联
userRoleMapper.deleteUserRoleByUserId(userId);
// 删除用户与岗位表
userPostMapper.deleteUserPostByUserId(userId);
return userMapper.deleteUserById(userId);
}
/**
* 批量删除用户信息
*
* @param ids 需要删除的数据ID
* @return 结果
*/
@Override
@Transactional
public int deleteUserByIds(String ids) throws BusinessException
{
Long[] userIds = Convert.toLongArray(ids);
for (Long userId : userIds)
{
checkUserAllowed(new SysUser(userId));
}
// 删除用户与角色关联
userRoleMapper.deleteUserRole(userIds);
// 删除用户与岗位关联
userPostMapper.deleteUserPost(userIds);
return userMapper.deleteUserByIds(userIds);
}
/**
* 新增保存用户信息
*
* @param user 用户信息
* @return 结果
*/
@Override
@Transactional
public int insertUser(SysUser user)
{
// 新增用户信息
int rows = userMapper.insertUser(user);
// 新增用户岗位关联
insertUserPost(user);
// 新增用户与角色管理
insertUserRole(user.getUserId(), user.getRoleIds());
return rows;
}
/**
* 注册用户信息
*
* @param user 用户信息
* @return 结果
*/
@Override
public boolean registerUser(SysUser user)
{
user.setUserType(UserConstants.REGISTER_USER_TYPE);
return userMapper.insertUser(user) > 0;
}
/**
* 修改保存用户信息
*
* @param user 用户信息
* @return 结果
*/
@Override
@Transactional
public int updateUser(SysUser user)
{
Long userId = user.getUserId();
// 删除用户与角色关联
userRoleMapper.deleteUserRoleByUserId(userId);
// 新增用户与角色管理
insertUserRole(user.getUserId(), user.getRoleIds());
// 删除用户与岗位关联
userPostMapper.deleteUserPostByUserId(userId);
// 新增用户与岗位管理
insertUserPost(user);
return userMapper.updateUser(user);
}
/**
* 修改用户个人详细信息
*
* @param user 用户信息
* @return 结果
*/
@Override
public int updateUserInfo(SysUser user)
{
return userMapper.updateUser(user);
}
/**
* 用户授权角色
*
* @param userId 用户ID
* @param roleIds 角色组
*/
@Override
public void insertUserAuth(Long userId, Long[] roleIds)
{
userRoleMapper.deleteUserRoleByUserId(userId);
insertUserRole(userId, roleIds);
}
/**
* 修改用户密码
*
* @param user 用户信息
* @return 结果
*/
@Override
public int resetUserPwd(SysUser user)
{
return updateUserInfo(user);
}
/**
* 新增用户角色信息
*
* @param user 用户对象
*/
public void insertUserRole(Long userId, Long[] roleIds)
{
if (StringUtils.isNotNull(roleIds))
{
// 新增用户与角色管理
List<SysUserRole> list = new ArrayList<SysUserRole>();
for (Long roleId : roleIds)
{
SysUserRole ur = new SysUserRole();
ur.setUserId(userId);
ur.setRoleId(roleId);
list.add(ur);
}
if (list.size() > 0)
{
userRoleMapper.batchUserRole(list);
}
}
}
/**
* 新增用户岗位信息
*
* @param user 用户对象
*/
public void insertUserPost(SysUser user)
{
Long[] posts = user.getPostIds();
if (StringUtils.isNotNull(posts))
{
// 新增用户与岗位管理
List<SysUserPost> list = new ArrayList<SysUserPost>();
for (Long postId : posts)
{
SysUserPost up = new SysUserPost();
up.setUserId(user.getUserId());
up.setPostId(postId);
list.add(up);
}
if (list.size() > 0)
{
userPostMapper.batchUserPost(list);
}
}
}
/**
* 校验登录名称是否唯一
*
* @param loginName 用户名
* @return
*/
@Override
public String checkLoginNameUnique(String loginName)
{
int count = userMapper.checkLoginNameUnique(loginName);
if (count > 0)
{
return UserConstants.USER_NAME_NOT_UNIQUE;
}
return UserConstants.USER_NAME_UNIQUE;
}
/**
* 校验手机号码是否唯一
*
* @param user 用户信息
* @return
*/
@Override
public String checkPhoneUnique(SysUser user)
{
Long userId = StringUtils.isNull(user.getUserId()) ? -1L : user.getUserId();
SysUser info = userMapper.checkPhoneUnique(user.getPhonenumber());
if (StringUtils.isNotNull(info) && info.getUserId().longValue() != userId.longValue())
{
return UserConstants.USER_PHONE_NOT_UNIQUE;
}
return UserConstants.USER_PHONE_UNIQUE;
}
/**
* 校验email是否唯一
*
* @param user 用户信息
* @return
*/
@Override
public String checkEmailUnique(SysUser user)
{
Long userId = StringUtils.isNull(user.getUserId()) ? -1L : user.getUserId();
SysUser info = userMapper.checkEmailUnique(user.getEmail());
if (StringUtils.isNotNull(info) && info.getUserId().longValue() != userId.longValue())
{
return UserConstants.USER_EMAIL_NOT_UNIQUE;
}
return UserConstants.USER_EMAIL_UNIQUE;
}
/**
* 校验用户是否允许操作
*
* @param user 用户信息
*/
@Override
public void checkUserAllowed(SysUser user)
{
if (StringUtils.isNotNull(user.getUserId()) && user.isAdmin())
{
throw new BusinessException("不允许操作超级管理员用户");
}
}
/**
* 查询用户所属角色组
*
* @param userId 用户ID
* @return 结果
*/
@Override
public String selectUserRoleGroup(Long userId)
{
List<SysRole> list = roleMapper.selectRolesByUserId(userId);
StringBuffer idsStr = new StringBuffer();
for (SysRole role : list)
{
idsStr.append(role.getRoleName()).append(",");
}
if (StringUtils.isNotEmpty(idsStr.toString()))
{
return idsStr.substring(0, idsStr.length() - 1);
}
return idsStr.toString();
}
/**
* 查询用户所属岗位组
*
* @param userId 用户ID
* @return 结果
*/
@Override
public String selectUserPostGroup(Long userId)
{
List<SysPost> list = postMapper.selectPostsByUserId(userId);
StringBuffer idsStr = new StringBuffer();
for (SysPost post : list)
{
idsStr.append(post.getPostName()).append(",");
}
if (StringUtils.isNotEmpty(idsStr.toString()))
{
return idsStr.substring(0, idsStr.length() - 1);
}
return idsStr.toString();
}
/**
* 导入用户数据
*
* @param userList 用户数据列表
* @param isUpdateSupport 是否更新支持,如果已存在,则进行更新数据
* @param operName 操作用户
* @return 结果
*/
@Override
public String importUser(List<SysUser> userList, Boolean isUpdateSupport, String operName)
{
if (StringUtils.isNull(userList) || userList.size() == 0)
{
throw new BusinessException("导入用户数据不能为空!");
}
int successNum = 0;
int failureNum = 0;
StringBuilder successMsg = new StringBuilder();
StringBuilder failureMsg = new StringBuilder();
String password = configService.selectConfigByKey("sys.user.initPassword");
for (SysUser user : userList)
{
try
{
// 验证是否存在这个用户
SysUser u = userMapper.selectUserByLoginName(user.getLoginName());
if (StringUtils.isNull(u))
{
user.setPassword(Md5Utils.hash(user.getLoginName() + password));
user.setCreateBy(operName);
this.insertUser(user);
successNum++;
successMsg.append("<br/>" + successNum + "、账号 " + user.getLoginName() + " 导入成功");
}
else if (isUpdateSupport)
{
user.setUpdateBy(operName);
this.updateUser(user);
successNum++;
successMsg.append("<br/>" + successNum + "、账号 " + user.getLoginName() + " 更新成功");
}
else
{
failureNum++;
failureMsg.append("<br/>" + failureNum + "、账号 " + user.getLoginName() + " 已存在");
}
}
catch (Exception e)
{
failureNum++;
String msg = "<br/>" + failureNum + "、账号 " + user.getLoginName() + " 导入失败:";
failureMsg.append(msg + e.getMessage());
log.error(msg, e);
}
}
if (failureNum > 0)
{
failureMsg.insert(0, "很抱歉,导入失败!共 " + failureNum + " 条数据格式不正确,错误如下:");
throw new BusinessException(failureMsg.toString());
}
else
{
successMsg.insert(0, "恭喜您,数据已全部导入成功!共 " + successNum + " 条,数据如下:");
}
return successMsg.toString();
}
/**
* 用户状态修改
*
* @param user 用户信息
* @return 结果
*/
@Override
public int changeStatus(SysUser user)
{
return userMapper.updateUser(user);
}
}
|
package org.jgroups.protocols.pbcast;
import org.jgroups.*;
import org.jgroups.annotations.*;
import org.jgroups.conf.AttributeType;
import org.jgroups.protocols.TCP;
import org.jgroups.protocols.TP;
import org.jgroups.stack.DiagnosticsHandler;
import org.jgroups.stack.Protocol;
import org.jgroups.util.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
* Negative AcKnowledgement layer (NAKs). Messages are assigned a monotonically
* increasing sequence number (seqno). Receivers deliver messages ordered
* according to seqno and request retransmission of missing messages.<br/>
* Retransmit requests are usually sent to the original sender of a message, but
* this can be changed by xmit_from_random_member (send to random member) or
* use_mcast_xmit_req (send to everyone). Responses can also be sent to everyone
* instead of the requester by setting use_mcast_xmit to true.
*
* @author Bela Ban
*/
@MBean(description="Reliable transmission multipoint FIFO protocol")
public class NAKACK2 extends Protocol implements DiagnosticsHandler.ProbeHandler {
protected static final int NUM_REBROADCAST_MSGS=3;
/* ----------------------------------------------------- Properties --------------------- ------------------------------------ */
/**
* Retransmit messages using multicast rather than unicast. This has the advantage that, if many receivers
* lost a message, the sender only retransmits once
*/
@Property(description="Retransmit retransmit responses (messages) using multicast rather than unicast")
protected boolean use_mcast_xmit=true;
/**
* Use a multicast to request retransmission of missing messages. This may
* be costly as every member in the cluster will send a response
*/
@Property(description="Use a multicast to request retransmission of missing messages")
protected boolean use_mcast_xmit_req;
/**
* Ask a random member for retransmission of a missing message. If set to
* true, discard_delivered_msgs will be set to false
*/
@Property(description="Ask a random member for retransmission of a missing message. Default is false")
protected boolean xmit_from_random_member;
/**
* Messages that have been received in order are sent up the stack (= delivered to the application).
* Delivered messages are removed from the retransmission buffer, so they can get GC'ed by the JVM. When this
* property is true, everyone (except the sender of a message) removes the message from their retransission
* buffers as soon as it has been delivered to the application
*/
@Property(description="Should messages delivered to application be discarded")
protected boolean discard_delivered_msgs=true;
@Property(description="Timeout to rebroadcast messages. Default is 2000 msec",type=AttributeType.TIME)
protected long max_rebroadcast_timeout=2000;
/** If true, logs messages discarded because received from other members */
@Property(description="discards warnings about promiscuous traffic")
protected boolean log_discard_msgs=true;
@Property(description="If false, trashes warnings about retransmission messages not found in the xmit_table (used for testing)")
protected boolean log_not_found_msgs=true;
@Property(description="Interval (in milliseconds) at which missing messages (from all retransmit buffers) " +
"are retransmitted",type=AttributeType.TIME)
protected long xmit_interval=1000;
@Property(description="Number of rows of the matrix in the retransmission table (only for experts)",writable=false)
protected int xmit_table_num_rows=100;
@Property(description="Number of elements of a row of the matrix in the retransmission table; gets rounded to the " +
"next power of 2 (only for experts). The capacity of the matrix is xmit_table_num_rows * xmit_table_msgs_per_row",
writable=false)
protected int xmit_table_msgs_per_row=1024;
@Property(description="Resize factor of the matrix in the retransmission table (only for experts)",writable=false)
protected double xmit_table_resize_factor=1.2;
@Property(description="Number of milliseconds after which the matrix in the retransmission table " +
"is compacted (only for experts)",writable=false,type=AttributeType.TIME)
protected long xmit_table_max_compaction_time=10000;
@Property(description="Size of the queue to hold messages received after creating the channel, but before being " +
"connected (is_server=false). After becoming the server, the messages in the queue are fed into up() and the " +
"queue is cleared. The motivation is to avoid retransmissions (see https://issues.jboss.org/browse/JGRP-1509 " +
"for details). 0 disables the queue.")
protected int become_server_queue_size=50;
@Property(description="Time during which identical warnings about messages from a non member will be suppressed. " +
"0 disables this (every warning will be logged). Setting the log level to ERROR also disables this.",
type=AttributeType.TIME)
protected long suppress_time_non_member_warnings=60000;
@Property(description="Max number of messages to ask for in a retransmit request. 0 disables this and uses " +
"the max bundle size in the transport",type=AttributeType.SCALAR)
protected int max_xmit_req_size;
@Property(description="If enabled, multicasts the highest sent seqno every xmit_interval ms. This is skipped if " +
"a regular message has been multicast, and the task aquiesces if the highest sent seqno hasn't changed for " +
"resend_last_seqno_max_times times. Used to speed up retransmission of dropped last messages (JGRP-1904)")
protected boolean resend_last_seqno=true;
@Property(description="Max number of times the last seqno is resent before acquiescing if last seqno isn't incremented")
protected int resend_last_seqno_max_times=1;
@ManagedAttribute(description="True if sending a message can block at the transport level")
protected boolean sends_can_block=true;
/* -------------------------------------------------- JMX ---------------------------------------------------------- */
@ManagedAttribute(description="Number of messages sent",type=AttributeType.SCALAR)
protected int num_messages_sent;
@ManagedAttribute(description="Number of messages received",type=AttributeType.SCALAR)
protected int num_messages_received;
protected static final Message DUMMY_OOB_MSG=new EmptyMessage().setFlag(Message.Flag.OOB);
// Accepts messages which are (1) non-null, (2) no DUMMY_OOB_MSGs and (3) not OOB_DELIVERED
protected final Predicate<Message> no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs=msg ->
msg != null && msg != DUMMY_OOB_MSG
&& (!msg.isFlagSet(Message.Flag.OOB) || msg.setFlagIfAbsent(Message.TransientFlag.OOB_DELIVERED))
&& !(msg.isFlagSet(Message.TransientFlag.DONT_LOOPBACK) && this.local_addr != null && this.local_addr.equals(msg.getSrc()));
protected static final Predicate<Message> dont_loopback_filter=msg -> msg != null && msg.isFlagSet(Message.TransientFlag.DONT_LOOPBACK);
protected static final BiConsumer<MessageBatch,Message> BATCH_ACCUMULATOR=MessageBatch::add;
@ManagedAttribute(description="Number of retransmit requests received",type=AttributeType.SCALAR)
protected final LongAdder xmit_reqs_received=new LongAdder();
@ManagedAttribute(description="Number of retransmit requests sent",type=AttributeType.SCALAR)
protected final LongAdder xmit_reqs_sent=new LongAdder();
@ManagedAttribute(description="Number of retransmit responses received",type=AttributeType.SCALAR)
protected final LongAdder xmit_rsps_received=new LongAdder();
@ManagedAttribute(description="Number of retransmit responses sent",type=AttributeType.SCALAR)
protected final LongAdder xmit_rsps_sent=new LongAdder();
@ManagedAttribute(description="Is the retransmit task running")
public boolean isXmitTaskRunning() {return xmit_task != null && !xmit_task.isDone();}
@ManagedAttribute(description="Number of messages from non-members",type=AttributeType.SCALAR)
public int getNonMemberMessages() {
return suppress_log_non_member != null? suppress_log_non_member.getCache().size() : 0;
}
@ManagedOperation(description="Clears the cache for messages from non-members")
public void clearNonMemberCache() {
if(suppress_log_non_member != null)
suppress_log_non_member.getCache().clear();
}
@ManagedAttribute
public void setResendLastSeqno(boolean flag) {
if(resend_last_seqno != flag)
resend_last_seqno=flag;
if(resend_last_seqno) {
if(last_seqno_resender == null)
last_seqno_resender=new LastSeqnoResender();
}
else {
if(last_seqno_resender != null)
last_seqno_resender=null;
}
}
public NAKACK2 resendLastSeqno(boolean flag) {setResendLastSeqno(flag); return this;}
@ManagedAttribute(description="Whether or not the task to resend the last seqno is running (depends on resend_last_seqno)")
public boolean resendTaskRunning() {return last_seqno_resender != null;}
@ManagedAttribute(description="tracing is enabled or disabled for the given log",writable=true)
protected boolean is_trace=log.isTraceEnabled();
/* ------------------------------------------------- Fields ------------------------------------------------------------------------- */
protected volatile boolean is_server;
protected Address local_addr;
protected volatile List<Address> members=new ArrayList<>();
protected volatile View view;
private final AtomicLong seqno=new AtomicLong(0); // current message sequence number (starts with 1)
/** Map to store sent and received messages (keyed by sender) */
protected final ConcurrentMap<Address,Table<Message>> xmit_table=Util.createConcurrentMap();
/** RetransmitTask running every xmit_interval ms */
protected Future<?> xmit_task;
/** Used by the retransmit task to keep the last retransmitted seqno per sender (https://issues.jboss.org/browse/JGRP-1539) */
protected final Map<Address,Long> xmit_task_map=new ConcurrentHashMap<>();
protected volatile boolean leaving=false;
protected volatile boolean running=false;
protected TimeScheduler timer=null;
protected LastSeqnoResender last_seqno_resender;
protected final Lock rebroadcast_lock=new ReentrantLock();
protected final Condition rebroadcast_done=rebroadcast_lock.newCondition();
// set during processing of a rebroadcast event
protected volatile boolean rebroadcasting=false;
protected final Lock rebroadcast_digest_lock=new ReentrantLock();
@GuardedBy("rebroadcast_digest_lock")
protected Digest rebroadcast_digest=null;
/** Keeps the last N stability messages */
protected final BoundedList<String> stability_msgs=new BoundedList<>(10);
/** Keeps a bounded list of the last N digest sets */
protected final BoundedList<String> digest_history=new BoundedList<>(10);
protected BoundedList<Message> become_server_queue;
/** Log to suppress identical warnings for messages from non-members */
protected SuppressLog<Address> suppress_log_non_member;
public long getXmitRequestsReceived() {return xmit_reqs_received.sum();}
public long getXmitRequestsSent() {return xmit_reqs_sent.sum();}
public long getXmitResponsesReceived() {return xmit_rsps_received.sum();}
public long getXmitResponsesSent() {return xmit_rsps_sent.sum();}
public boolean useMcastXmit() {return use_mcast_xmit;}
public NAKACK2 useMcastXmit(boolean u) {this.use_mcast_xmit=u; return this;}
public boolean useMcastXmitReq() {return use_mcast_xmit_req;}
public NAKACK2 useMcastXmitReq(boolean flag) {this.use_mcast_xmit_req=flag; return this;}
public boolean xmitFromRandomMember() {return xmit_from_random_member;}
public NAKACK2 xmitFromRandomMember(boolean x) {this.xmit_from_random_member=x; return this;}
public boolean discardDeliveredMsgs() {return discard_delivered_msgs;}
public NAKACK2 discardDeliveredMsgs(boolean d) {this.discard_delivered_msgs=d; return this;}
public boolean logDiscardMessages() {return log_discard_msgs;}
public NAKACK2 logDiscardMessages(boolean l) {this.log_discard_msgs=l; return this;}
public boolean logNotFoundMessages() {return log_not_found_msgs;}
public NAKACK2 logNotFoundMessages(boolean flag) {log_not_found_msgs=flag; return this;}
public NAKACK2 setResendLastSeqnoMaxTimes(int n) {this.resend_last_seqno_max_times=n; return this;}
public int getResendLastSeqnoMaxTimes() {return resend_last_seqno_max_times;}
public NAKACK2 setXmitFromRandomMember(boolean xmit_from_random_member) {
this.xmit_from_random_member=xmit_from_random_member; return this;
}
public NAKACK2 setDiscardDeliveredMsgs(boolean discard_delivered_msgs) {
this.discard_delivered_msgs=discard_delivered_msgs;
return this;
}
public long getMaxRebroadcastTimeout() {return max_rebroadcast_timeout;}
public NAKACK2 setMaxRebroadcastTimeout(long m) {this.max_rebroadcast_timeout=m; return this;}
public long getXmitInterval() {return xmit_interval;}
public NAKACK2 setXmitInterval(long x) {this.xmit_interval=x; return this;}
public int getXmitTableNumRows() {return xmit_table_num_rows;}
public NAKACK2 setXmitTableNumRows(int x) {this.xmit_table_num_rows=x; return this;}
public int getXmitTableMsgsPerRow() {return xmit_table_msgs_per_row;}
public NAKACK2 setXmitTableMsgsPerRow(int x) {this.xmit_table_msgs_per_row=x; return this;}
public double getXmitTableResizeFactor() {return xmit_table_resize_factor;}
public NAKACK2 setXmitTableResizeFactor(double x) {this.xmit_table_resize_factor=x; return this;}
public long getXmitTableMaxCompactionTime() {return xmit_table_max_compaction_time;}
public NAKACK2 setXmitTableMaxCompactionTime(long x) {this.xmit_table_max_compaction_time=x; return this;}
public int getBecomeServerQueueSize() {return become_server_queue_size;}
public NAKACK2 setBecomeServerQueueSize(int b) {this.become_server_queue_size=b; return this;}
public long getSuppressTimeNonMemberWarnings() {return suppress_time_non_member_warnings;}
public NAKACK2 setSuppressTimeNonMemberWarnings(long s) {this.suppress_time_non_member_warnings=s; return this;}
public int getMaxXmitReqSize() {return max_xmit_req_size;}
public NAKACK2 setMaxXmitReqSize(int m) {this.max_xmit_req_size=m; return this;}
public boolean sendsCanBlock() {return sends_can_block;}
public NAKACK2 sendsCanBlock(boolean s) {this.sends_can_block=s; return this;}
public int getNumMessagesSent() {return num_messages_sent;}
public NAKACK2 setNumMessagesSent(int n) {this.num_messages_sent=n; return this;}
public int getNumMessagesReceived() {return num_messages_received;}
public NAKACK2 setNumMessagesReceived(int n) {this.num_messages_received=n; return this;}
public boolean isTrace() {return is_trace;}
public NAKACK2 isTrace(boolean i) {this.is_trace=i; return this;}
public <T extends Protocol> T setLevel(String level) {
T retval=super.setLevel(level);
is_trace=log.isTraceEnabled();
return retval;
}
@ManagedAttribute(description="Actual size of the become_server_queue",type=AttributeType.SCALAR)
public int getBecomeServerQueueSizeActual() {
return become_server_queue != null? become_server_queue.size() : -1;
}
/** Returns the receive window for sender; only used for testing. Do not use ! */
public Table<Message> getWindow(Address sender) {
return xmit_table.get(sender);
}
/** Only used for unit tests, don't use ! */
public void setTimer(TimeScheduler timer) {this.timer=timer;}
@ManagedAttribute(description="Total number of undelivered messages in all retransmit buffers",type=AttributeType.SCALAR)
public int getXmitTableUndeliveredMsgs() {
int num=0;
for(Table<Message> buf: xmit_table.values())
num+=buf.size();
return num;
}
@ManagedAttribute(description="Total number of missing (= not received) messages in all retransmit buffers"
,type=AttributeType.SCALAR)
public int getXmitTableMissingMessages() {
int num=0;
for(Table<Message> buf: xmit_table.values())
num+=buf.getNumMissing();
return num;
}
@ManagedAttribute(description="Capacity of the retransmit buffer. Computed as xmit_table_num_rows * xmit_table_msgs_per_row")
public long getXmitTableCapacity() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.capacity() : 0;
}
@ManagedAttribute(description="Prints the number of rows currently allocated in the matrix. This value will not " +
"be lower than xmit_table_now_rows")
public int getXmitTableNumCurrentRows() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.getNumRows() : 0;
}
@ManagedAttribute(description="Returns the number of bytes of all messages in all retransmit buffers. " +
"To compute the size, Message.getLength() is used",type=AttributeType.BYTES)
public long getSizeOfAllMessages() {
long retval=0;
for(Table<Message> buf: xmit_table.values())
retval+=sizeOfAllMessages(buf,false);
return retval;
}
@ManagedAttribute(description="Returns the number of bytes of all messages in all retransmit buffers. " +
"To compute the size, Message.size() is used",type=AttributeType.BYTES)
public long getSizeOfAllMessagesInclHeaders() {
long retval=0;
for(Table<Message> buf: xmit_table.values())
retval+=sizeOfAllMessages(buf, true);
return retval;
}
@ManagedAttribute(description="Number of retransmit table compactions")
public int getXmitTableNumCompactions() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.getNumCompactions() : 0;
}
@ManagedAttribute(description="Number of retransmit table moves")
public int getXmitTableNumMoves() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.getNumMoves() : 0;
}
@ManagedAttribute(description="Number of retransmit table resizes")
public int getXmitTableNumResizes() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.getNumResizes(): 0;
}
@ManagedAttribute(description="Number of retransmit table purges")
public int getXmitTableNumPurges() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
return table != null? table.getNumPurges(): 0;
}
@ManagedOperation(description="Prints the contents of the receiver windows for all members")
public String printMessages() {
StringBuilder ret=new StringBuilder(local_addr + ":\n");
for(Map.Entry<Address,Table<Message>> entry: xmit_table.entrySet()) {
Address addr=entry.getKey();
Table<Message> buf=entry.getValue();
ret.append(addr).append(": ").append(buf.toString()).append('\n');
}
return ret.toString();
}
@ManagedAttribute public long getCurrentSeqno() {return seqno.get();}
@ManagedOperation(description="Prints the stability messages received")
public String printStabilityMessages() {
return Util.printListWithDelimiter(stability_msgs, "\n");
}
@ManagedOperation(description="Keeps information about the last N times a digest was set or merged")
public String printDigestHistory() {
StringBuilder sb=new StringBuilder(local_addr + ":\n");
for(String tmp: digest_history)
sb.append(tmp).append("\n");
return sb.toString();
}
@ManagedOperation(description="Compacts the retransmit buffer")
public void compact() {
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
if(table != null)
table.compact();
}
@ManagedOperation(description="Prints the number of rows currently allocated in the matrix for all members. " +
"This value will not be lower than xmit_table_now_rows")
public String dumpXmitTablesNumCurrentRows() {
StringBuilder sb=new StringBuilder();
for(Map.Entry<Address,Table<Message>> entry: xmit_table.entrySet())
sb.append(String.format("%s: %d\n", entry.getKey(), entry.getValue().getNumRows()));
return sb.toString();
}
@ManagedOperation(description="Resets all statistics")
public void resetStats() {
num_messages_sent=num_messages_received=0;
xmit_reqs_received.reset();
xmit_reqs_sent.reset();
xmit_rsps_received.reset();
xmit_rsps_sent.reset();
stability_msgs.clear();
digest_history.clear();
Table<Message> table=local_addr != null? xmit_table.get(local_addr) : null;
if(table != null)
table.resetStats();
}
public void init() throws Exception {
if(xmit_from_random_member && discard_delivered_msgs) {
discard_delivered_msgs=false;
log.debug("%s: xmit_from_random_member set to true: changed discard_delivered_msgs to false", local_addr);
}
TP transport=getTransport();
sends_can_block=transport instanceof TCP; // UDP and TCP_NIO2 won't block
transport.registerProbeHandler(this);
if(!transport.supportsMulticasting()) {
if(use_mcast_xmit) {
log.debug(Util.getMessage("NoMulticastTransport"), "use_mcast_xmit", transport.getName(), "use_mcast_xmit");
use_mcast_xmit=false;
}
if(use_mcast_xmit_req) {
log.debug(Util.getMessage("NoMulticastTransport"), "use_mcast_xmit_req", transport.getName(), "use_mcast_xmit_req");
use_mcast_xmit_req=false;
}
}
if(become_server_queue_size > 0)
become_server_queue=new BoundedList<>(become_server_queue_size);
if(suppress_time_non_member_warnings > 0)
suppress_log_non_member=new SuppressLog<>(log, "MsgDroppedNak", "SuppressMsg");
// max bundle size (minus overhead) divided by <long size> times bits per long
int estimated_max_msgs_in_xmit_req=(transport.getMaxBundleSize() -50) * Global.LONG_SIZE;
int old_max_xmit_size=max_xmit_req_size;
if(max_xmit_req_size <= 0)
max_xmit_req_size=estimated_max_msgs_in_xmit_req;
else
max_xmit_req_size=Math.min(max_xmit_req_size, estimated_max_msgs_in_xmit_req);
if(old_max_xmit_size != max_xmit_req_size)
log.trace("%s: set max_xmit_req_size from %d to %d", local_addr, old_max_xmit_size, max_xmit_req_size);
if(resend_last_seqno)
setResendLastSeqno(resend_last_seqno);
}
public List<Integer> providedUpServices() {
return Arrays.asList(Event.GET_DIGEST,Event.SET_DIGEST,Event.OVERWRITE_DIGEST,Event.MERGE_DIGEST);
}
public void start() throws Exception {
timer=getTransport().getTimer();
if(timer == null)
throw new Exception("timer is null");
running=true;
leaving=false;
startRetransmitTask();
}
public void stop() {
running=false;
is_server=false;
if(become_server_queue != null)
become_server_queue.clear();
stopRetransmitTask();
xmit_task_map.clear();
reset();
}
/**
* <b>Callback</b>. Called by superclass when event may be handled.<p> <b>Do not use {@code down_prot.down()} in this
* method as the event is passed down by default by the superclass after this method returns !</b>
*/
public Object down(Event evt) {
switch(evt.getType()) {
case Event.STABLE: // generated by STABLE layer. Delete stable messages passed in arg
stable(evt.getArg());
return null; // do not pass down further (Bela Aug 7 2001)
case Event.GET_DIGEST:
return getDigest(evt.getArg());
case Event.SET_DIGEST:
setDigest(evt.getArg());
return null;
case Event.OVERWRITE_DIGEST:
overwriteDigest(evt.getArg());
return null;
case Event.MERGE_DIGEST:
mergeDigest(evt.getArg());
return null;
case Event.TMP_VIEW:
View tmp_view=evt.getArg();
members=tmp_view.getMembers();
break;
case Event.VIEW_CHANGE:
tmp_view=evt.getArg();
List<Address> mbrs=tmp_view.getMembers();
members=mbrs;
view=tmp_view;
adjustReceivers(mbrs);
is_server=true; // check vids from now on
if(suppress_log_non_member != null)
suppress_log_non_member.removeExpired(suppress_time_non_member_warnings);
xmit_task_map.keySet().retainAll(mbrs);
break;
case Event.BECOME_SERVER:
is_server=true;
flushBecomeServerQueue();
break;
case Event.SET_LOCAL_ADDRESS:
local_addr=evt.getArg();
break;
case Event.DISCONNECT:
leaving=true;
reset();
break;
case Event.REBROADCAST:
rebroadcasting=true;
rebroadcast_digest=evt.getArg();
try {
rebroadcastMessages();
}
finally {
rebroadcasting=false;
rebroadcast_digest_lock.lock();
try {
rebroadcast_digest=null;
}
finally {
rebroadcast_digest_lock.unlock();
}
}
return null;
}
return down_prot.down(evt);
}
public Object down(Message msg) {
Address dest=msg.getDest();
if(dest != null || msg.isFlagSet(Message.Flag.NO_RELIABILITY))
return down_prot.down(msg); // unicast address: not null and not mcast, pass down unchanged
send(msg);
return null; // don't pass down the stack
}
/**
* <b>Callback</b>. Called by superclass when event may be handled.<p> <b>Do not use {@code passUp} in this
* method as the event is passed up by default by the superclass after this method returns !</b>
*/
public Object up(Event evt) {
switch(evt.getType()) {
case Event.STABLE: // generated by STABLE layer. Delete stable messages passed in arg
stable(evt.getArg());
return null; // do not pass up further (Bela Aug 7 2001)
case Event.SUSPECT:
// release the promise if rebroadcasting is in progress... otherwise we wait forever. there will be a new
// flush round anyway
if(rebroadcasting)
cancelRebroadcasting();
break;
}
return up_prot.up(evt);
}
public Object up(Message msg) {
if(msg.isFlagSet(Message.Flag.NO_RELIABILITY))
return up_prot.up(msg);
NakAckHeader2 hdr=msg.getHeader(this.id);
if(hdr == null)
return up_prot.up(msg); // pass up (e.g. unicast msg)
if(!is_server) { // discard messages while not yet server (i.e., until JOIN has returned)
queueMessage(msg, hdr.seqno);
return null;
}
switch(hdr.type) {
case NakAckHeader2.MSG:
handleMessage(msg, hdr);
return null; // transmitter passes message up for us !
case NakAckHeader2.XMIT_REQ:
try {
SeqnoList missing=msg.getObject();
if(missing != null)
handleXmitReq(msg.getSrc(), missing, hdr.sender);
}
catch(Exception e) {
log.error("failed deserializing retransmission list", e);
}
return null;
case NakAckHeader2.XMIT_RSP:
handleXmitRsp(msg, hdr);
return null;
case NakAckHeader2.HIGHEST_SEQNO:
handleHighestSeqno(msg.getSrc(), hdr.seqno);
return null;
default:
log.error(Util.getMessage("HeaderTypeNotKnown"), local_addr, hdr.type);
return null;
}
}
public void up(MessageBatch batch) {
int size=batch.size();
boolean got_retransmitted_msg=false; // if at least 1 XMIT-RSP was received
List<LongTuple<Message>> msgs=null; // regular or retransmitted messages
for(Iterator<Message> it=batch.iterator(); it.hasNext();) {
final Message msg=it.next();
NakAckHeader2 hdr;
if(msg == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY) || (hdr=msg.getHeader(id)) == null)
continue;
it.remove(); // remove the message from the batch, so it won't be passed up the stack
if(!is_server) { // discard messages while not yet server (i.e., until JOIN has returned)
queueMessage(msg, hdr.seqno);
continue;
}
switch(hdr.type) {
case NakAckHeader2.MSG:
if(msgs == null)
msgs=new ArrayList<>(size);
msgs.add(new LongTuple<>(hdr.seqno, msg));
break;
case NakAckHeader2.XMIT_REQ:
try {
SeqnoList missing=msg.getObject();
if(missing != null)
handleXmitReq(msg.getSrc(), missing, hdr.sender);
}
catch(Exception e) {
log.error("failed deserializing retransmission list", e);
}
break;
case NakAckHeader2.XMIT_RSP:
Message xmitted_msg=msgFromXmitRsp(msg, hdr);
if(xmitted_msg != null) {
if(msgs == null)
msgs=new ArrayList<>(size);
msgs.add(new LongTuple<>(hdr.seqno, xmitted_msg));
got_retransmitted_msg=true;
}
break;
case NakAckHeader2.HIGHEST_SEQNO:
handleHighestSeqno(batch.sender(), hdr.seqno);
break;
default:
log.error(Util.getMessage("HeaderTypeNotKnown"), local_addr, hdr.type);
}
}
// Process (new and retransmitted) messages:
if(msgs != null)
handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName());
// received XMIT-RSPs:
if(got_retransmitted_msg && rebroadcasting)
checkForRebroadcasts();
if(!batch.isEmpty())
up_prot.up(batch);
}
// ProbeHandler interface
public Map<String, String> handleProbe(String... keys) {
Map<String,String> retval=new HashMap<>();
for(String key: keys) {
switch(key) {
case "digest-history":
retval.put(key, printDigestHistory());
break;
case "dump-digest":
retval.put(key, "\n" + printMessages());
break;
}
}
return retval;
}
// ProbeHandler interface
public String[] supportedKeys() {
return new String[]{"digest-history", "dump-digest"};
}
/* --------------------------------- Private Methods --------------------------------------- */
protected void queueMessage(Message msg, long seqno) {
if(become_server_queue != null) {
become_server_queue.add(msg);
log.trace("%s: message %s#%d was added to queue (not yet server)", local_addr, msg.getSrc(), seqno);
}
else
log.trace("%s: message %s#%d was discarded (not yet server)", local_addr, msg.getSrc(), seqno);
}
protected void unknownMember(Address sender, Object message) {
if(leaving)
return;
if(log_discard_msgs && log.isWarnEnabled()) {
if(suppress_log_non_member != null)
suppress_log_non_member.log(SuppressLog.Level.warn, sender, suppress_time_non_member_warnings,
local_addr, message, sender, view);
else
log.warn(Util.getMessage("MsgDroppedNak"), local_addr, message, sender, view);
}
}
/**
* Adds the message to the sent_msgs table and then passes it down the stack. Change Bela Ban May 26 2002: we don't
* store a copy of the message, but a reference ! This saves us a lot of memory. However, this also means that a
* message should not be changed after storing it in the sent-table ! See protocols/DESIGN for details.
* Made seqno increment and adding to sent_msgs atomic, e.g. seqno won't get incremented if adding to
* sent_msgs fails e.g. due to an OOM (see http://jira.jboss.com/jira/browse/JGRP-179). bela Jan 13 2006
*/
protected void send(Message msg) {
if(!running) {
log.trace("%s: discarded message as we're not in the 'running' state, message: %s", local_addr, msg);
return;
}
long msg_id;
Table<Message> buf=xmit_table.get(local_addr);
if(buf == null) // discard message if there is no entry for local_addr
return;
if(msg.getSrc() == null)
msg.setSrc(local_addr); // this needs to be done so we can check whether the message sender is the local_addr
boolean dont_loopback_set=msg.isFlagSet(Message.TransientFlag.DONT_LOOPBACK);
msg_id=seqno.incrementAndGet();
long sleep=10;
do {
try {
msg.putHeader(this.id, NakAckHeader2.createMessageHeader(msg_id));
buf.add(msg_id, msg, dont_loopback_set? dont_loopback_filter : null);
break;
}
catch(Throwable t) {
if(running) {
Util.sleep(sleep);
sleep=Math.min(5000, sleep*2);
}
}
}
while(running);
// moved down_prot.down() out of synchronized clause (bela Sept 7 2006) http://jira.jboss.com/jira/browse/JGRP-300
if(is_trace)
log.trace("%s --> [all]: #%d", local_addr, msg_id);
down_prot.down(msg); // if this fails, since msg is in sent_msgs, it can be retransmitted
num_messages_sent++;
if(resend_last_seqno && last_seqno_resender != null)
last_seqno_resender.skipNext();
}
/**
* Finds the corresponding retransmit buffer and adds the message to it (according to seqno). Then removes as many
* messages as possible and passes them up the stack. Discards messages from non-members.
*/
protected void handleMessage(Message msg, NakAckHeader2 hdr) {
Address sender=msg.getSrc();
Table<Message> buf=xmit_table.get(sender);
if(buf == null) { // discard message if there is no entry for sender
unknownMember(sender, hdr.seqno);
return;
}
num_messages_received++;
boolean loopback=local_addr.equals(sender);
// If the message was sent by myself, then it is already in the table and we don't need to add it. If not,
// and the message is OOB, insert a dummy message (same msg, saving space), deliver it and drop it later on
// removal. Else insert the real message
boolean added=loopback || buf.add(hdr.seqno, msg.isFlagSet(Message.Flag.OOB)? DUMMY_OOB_MSG : msg);
//if(added && is_trace)
// log.trace("%s <-- %s: #%d", local_addr, sender, hdr.seqno);
// OOB msg is passed up. When removed, we discard it. Affects ordering: http://jira.jboss.com/jira/browse/JGRP-379
if(added && msg.isFlagSet(Message.Flag.OOB)) {
if(loopback) { // sent by self
msg=buf.get(hdr.seqno); // we *have* to get a message, because loopback means we didn't add it to win !
if(msg != null && msg.isFlagSet(Message.Flag.OOB) && msg.setFlagIfAbsent(Message.TransientFlag.OOB_DELIVERED))
deliver(msg, sender, hdr.seqno, "OOB message");
}
else // sent by someone else
deliver(msg, sender, hdr.seqno, "OOB message");
}
removeAndDeliver(buf, sender, loopback, null); // at most 1 thread will execute this at any given time
}
protected void handleMessages(Address dest, Address sender, List<LongTuple<Message>> msgs, boolean oob, AsciiString cluster_name) {
Table<Message> buf=xmit_table.get(sender);
if(buf == null) { // discard message if there is no entry for sender
unknownMember(sender, "batch");
return;
}
num_messages_received+= msgs.size();
boolean loopback=local_addr.equals(sender);
boolean added=loopback || buf.add(msgs, oob, oob? DUMMY_OOB_MSG : null);
//if(added && is_trace)
// log.trace("%s <-- %s: #%d-%d (%d messages)",
// local_addr, sender, msgs.get(0).getVal1(), msgs.get(msgs.size() -1).getVal1(), msgs.size());
// OOB msg is passed up. When removed, we discard it. Affects ordering: http://jira.jboss.com/jira/browse/JGRP-379
if(added && oob) {
MessageBatch oob_batch=new MessageBatch(dest, sender, null, dest == null, MessageBatch.Mode.OOB, msgs.size());
if(loopback) {
for(LongTuple<Message> tuple: msgs) {
long seq=tuple.getVal1();
Message msg=buf.get(seq); // we *have* to get the message, because loopback means we didn't add it to win !
if(msg != null && msg.isFlagSet(Message.Flag.OOB) && msg.setFlagIfAbsent(Message.TransientFlag.OOB_DELIVERED))
oob_batch.add(msg);
}
}
else {
for(LongTuple<Message> tuple: msgs)
oob_batch.add(tuple.getVal2());
}
deliverBatch(oob_batch);
}
removeAndDeliver(buf, sender, loopback, cluster_name); // at most 1 thread will execute this at any given time
}
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case,
* we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829).
* Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool
*/
protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) {
AtomicInteger adders=buf.getAdders();
if(adders.getAndIncrement() != 0)
return;
boolean remove_msgs=discard_delivered_msgs && !loopback;
MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true);
Supplier<MessageBatch> batch_creator=() -> batch;
do {
try {
batch.reset();
// Don't include DUMMY and OOB_DELIVERED messages in the removed set
buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs,
batch_creator, BATCH_ACCUMULATOR);
}
catch(Throwable t) {
log.error("failed removing messages from table for " + sender, t);
}
if(!batch.isEmpty())
deliverBatch(batch);
}
while(adders.decrementAndGet() != 0);
if(rebroadcasting)
checkForRebroadcasts();
}
/**
* Retransmits messsages first_seqno to last_seqno from original_sender from xmit_table to xmit_requester,
* called when XMIT_REQ is received.
* @param xmit_requester The sender of the XMIT_REQ, we have to send the requested copy of the message to this address
* @param missing_msgs A list of seqnos that have to be retransmitted
* @param original_sender The member who originally sent the messsage. Guaranteed to be non-null
*/
protected void handleXmitReq(Address xmit_requester, SeqnoList missing_msgs, Address original_sender) {
log.trace("%s <-- %s: XMIT(%s%s)", local_addr, xmit_requester, original_sender, missing_msgs);
if(stats)
xmit_reqs_received.add(missing_msgs.size());
Table<Message> buf=xmit_table.get(original_sender);
if(buf == null) {
log.error(Util.getMessage("SenderNotFound"), local_addr, original_sender);
return;
}
for(long i: missing_msgs) {
Message msg=buf.get(i);
if(msg == null) {
if(log.isWarnEnabled() && log_not_found_msgs && !local_addr.equals(xmit_requester) && i > buf.getLow())
log.warn(Util.getMessage("MessageNotFound"), local_addr, original_sender, i);
continue;
}
if(is_trace)
log.trace("%s --> [all]: resending %s#%d", local_addr, original_sender, i);
sendXmitRsp(xmit_requester, msg);
}
}
protected void deliver(Message msg, Address sender, long seqno, String error_msg) {
if(is_trace)
log.trace("%s <-- %s: #%d", local_addr, sender, seqno);
try {
up_prot.up(msg);
}
catch(Throwable t) {
log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, error_msg, msg, t);
}
}
protected void deliverBatch(MessageBatch batch) {
try {
if(batch == null || batch.isEmpty())
return;
if(is_trace) {
Message first=batch.first(), last=batch.last();
StringBuilder sb=new StringBuilder(local_addr + " <-- " + batch.sender() + ": ");
if(first != null && last != null) {
NakAckHeader2 hdr1=first.getHeader(id), hdr2=last.getHeader(id);
sb.append("#").append(hdr1.seqno).append("-").append(hdr2.seqno);
}
sb.append(" (" + batch.size()).append(" messages)");
log.trace(sb);
}
up_prot.up(batch);
}
catch(Throwable t) {
log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t);
}
}
/**
* Flushes the queue. Done in a separate thread as we don't want to block the
* {@link GMS#installView(org.jgroups.View,org.jgroups.util.Digest)} method (called when a view is installed).
*/
protected void flushBecomeServerQueue() {
if(become_server_queue != null && !become_server_queue.isEmpty()) {
log.trace("%s: flushing become_server_queue (%d elements)", local_addr, become_server_queue.size());
TP transport=getTransport();
for(final Message msg: become_server_queue) {
transport.submitToThreadPool(() -> {
try {
up(msg);
}
finally {
become_server_queue.remove(msg);
}
}, true);
}
}
}
protected void cancelRebroadcasting() {
rebroadcast_lock.lock();
try {
rebroadcasting=false;
rebroadcast_done.signalAll();
}
finally {
rebroadcast_lock.unlock();
}
}
/**
* Sends a message msg to the requester. We have to wrap the original message into a retransmit message, as we need
* to preserve the original message's properties, such as src, headers etc.
* @param dest
* @param msg
*/
protected void sendXmitRsp(Address dest, Message msg) {
if(msg == null)
return;
if(stats)
xmit_rsps_sent.increment();
if(msg.getSrc() == null)
msg.setSrc(local_addr);
if(use_mcast_xmit) { // we simply send the original multicast message
down_prot.down(msg);
return;
}
Message xmit_msg=msg.copy(true, true).setDest(dest); // copy payload and headers
NakAckHeader2 hdr=xmit_msg.getHeader(id);
NakAckHeader2 newhdr=hdr.copy();
newhdr.type=NakAckHeader2.XMIT_RSP; // change the type in the copy from MSG --> XMIT_RSP
xmit_msg.putHeader(id, newhdr);
down_prot.down(xmit_msg);
}
protected void handleXmitRsp(Message msg, NakAckHeader2 hdr) {
if(msg == null)
return;
try {
if(stats)
xmit_rsps_received.increment();
msg.setDest(null);
NakAckHeader2 newhdr=hdr.copy();
newhdr.type=NakAckHeader2.MSG; // change the type back from XMIT_RSP --> MSG
msg.putHeader(id, newhdr);
handleMessage(msg, newhdr);
if(rebroadcasting)
checkForRebroadcasts();
}
catch(Exception ex) {
log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "retransmitted message", msg, ex);
}
}
/**
* Compares the sender's highest seqno with my highest seqno: if the sender's is higher, ask sender for retransmission
* @param sender The sender
* @param seqno The highest seqno sent by sender
*/
protected void handleHighestSeqno(Address sender, long seqno) {
// check whether the highest seqno received from sender is > highest seqno received for sender in my digest.
// If yes, request retransmission (see "Last Message Dropped" topic in DESIGN)
Table<Message> buf=xmit_table.get(sender);
if(buf == null)
return;
long my_highest_received=buf.getHighestReceived();
if(my_highest_received >= 0 && seqno > my_highest_received) {
log.trace("%s: my_highest_rcvd (%s#%d) < highest received (%s#%d): requesting retransmission",
local_addr, sender, my_highest_received, sender, seqno);
retransmit(seqno,seqno,sender);
}
}
protected Message msgFromXmitRsp(Message msg, NakAckHeader2 hdr) {
if(msg == null)
return null;
if(stats)
xmit_rsps_received.increment();
msg.setDest(null);
NakAckHeader2 newhdr=hdr.copy();
newhdr.type=NakAckHeader2.MSG; // change the type back from XMIT_RSP --> MSG
msg.putHeader(id,newhdr);
return msg;
}
/**
* Takes the argument highest_seqnos and compares it to the current digest. If the current digest has fewer messages,
* then send retransmit messages for the missing messages. Return when all missing messages have been received. If
* we're waiting for a missing message from P, and P crashes while waiting, we need to exclude P from the wait set.
*/
protected void rebroadcastMessages() {
Digest their_digest;
long sleep=max_rebroadcast_timeout / NUM_REBROADCAST_MSGS;
long wait_time=max_rebroadcast_timeout, start=System.currentTimeMillis();
while(wait_time > 0) {
rebroadcast_digest_lock.lock();
try {
if(rebroadcast_digest == null)
break;
their_digest=rebroadcast_digest.copy();
}
finally {
rebroadcast_digest_lock.unlock();
}
Digest my_digest=getDigest();
boolean xmitted=false;
for(Digest.Entry entry: their_digest) {
Address member=entry.getMember();
long[] my_entry=my_digest.get(member);
if(my_entry == null)
continue;
long their_high=entry.getHighest();
// Cannot ask for 0 to be retransmitted because the first seqno in NAKACK2 and UNICAST(2) is always 1 !
// Also, we need to ask for retransmission of my_high+1, because we already *have* my_high, and don't
// need it, so the retransmission range is [my_high+1 .. their_high]: *exclude* my_high, but *include*
// their_high
long my_high=Math.max(my_entry[0], my_entry[1]);
if(their_high > my_high) {
log.trace("%s: fetching %d-%d from %s", local_addr, my_high, their_high, member);
retransmit(my_high+1, their_high, member, true); // use multicast to send retransmit request
xmitted=true;
}
}
if(!xmitted)
return; // we're done; no retransmissions are needed anymore. our digest is >= rebroadcast_digest
rebroadcast_lock.lock();
try {
try {
my_digest=getDigest();
rebroadcast_digest_lock.lock();
try {
if(!rebroadcasting || isGreaterThanOrEqual(my_digest, rebroadcast_digest))
return;
}
finally {
rebroadcast_digest_lock.unlock();
}
rebroadcast_done.await(sleep, TimeUnit.MILLISECONDS);
wait_time-=(System.currentTimeMillis() - start);
}
catch(InterruptedException ignored) {
}
}
finally {
rebroadcast_lock.unlock();
}
}
}
protected void checkForRebroadcasts() {
Digest tmp=getDigest();
boolean cancel_rebroadcasting=false;
rebroadcast_digest_lock.lock();
try {
cancel_rebroadcasting=isGreaterThanOrEqual(tmp, rebroadcast_digest);
}
catch(Throwable ignored) {
;
}
finally {
rebroadcast_digest_lock.unlock();
}
if(cancel_rebroadcasting)
cancelRebroadcasting();
}
/**
* Returns true if all senders of the current digest have their seqnos >= the ones from other
*/
protected static boolean isGreaterThanOrEqual(Digest first, Digest other) {
if(other == null)
return true;
for(Digest.Entry entry: first) {
Address sender=entry.getMember();
long[] their_entry=other.get(sender);
if(their_entry == null)
continue;
long my_highest=entry.getHighest();
long their_highest=Math.max(their_entry[0],their_entry[1]);
if(my_highest < their_highest)
return false;
}
return true;
}
/**
* Removes old members from xmit-table and adds new members to xmit-table (at seqnos hd=0, hr=0).
* This method is not called concurrently
*/
protected void adjustReceivers(List<Address> members) {
Set<Address> keys=xmit_table.keySet();
// remove members which left
for(Address member: keys) {
if(!members.contains(member)) {
if(Objects.equals(local_addr, member))
continue;
Table<Message> buf=xmit_table.remove(member);
if(buf != null)
log.debug("%s: removed %s from xmit_table (not member anymore)", local_addr, member);
}
}
members.stream().filter(mbr -> !keys.contains(mbr)).forEach(mbr -> xmit_table.putIfAbsent(mbr, createTable(0)));
}
/**
* Returns a message digest: for each member P the highest delivered and received seqno is added
*/
public Digest getDigest() {
final Map<Address,long[]> map=new HashMap<>();
for(Map.Entry<Address,Table<Message>> entry: xmit_table.entrySet()) {
Address sender=entry.getKey(); // guaranteed to be non-null (CCHM)
Table<Message> buf=entry.getValue(); // guaranteed to be non-null (CCHM)
long[] seqnos=buf.getDigest();
map.put(sender, seqnos);
}
return new Digest(map);
}
public Digest getDigest(Address mbr) {
if(mbr == null)
return getDigest();
Table<Message> buf=xmit_table.get(mbr);
if(buf == null)
return null;
long[] seqnos=buf.getDigest();
return new Digest(mbr, seqnos[0], seqnos[1]);
}
/**
* Creates a retransmit buffer for each sender in the digest according to the sender's seqno.
* If a buffer already exists, it resets it.
*/
protected void setDigest(Digest digest) {
setDigest(digest,false);
}
/**
* For all members of the digest, adjust the retransmit buffers in xmit_table. If no entry
* exists, create one with the initial seqno set to the seqno of the member in the digest. If the member already
* exists, and is not the local address, replace it with the new entry (http://jira.jboss.com/jira/browse/JGRP-699)
* if the digest's seqno is greater than the seqno in the window.
*/
protected void mergeDigest(Digest digest) {
setDigest(digest,true);
}
/**
* Overwrites existing entries, but does NOT remove entries not found in the digest
* @param digest
*/
protected void overwriteDigest(Digest digest) {
if(digest == null)
return;
StringBuilder sb=new StringBuilder("\n[overwriteDigest()]\n");
sb.append("existing digest: " + getDigest()).append("\nnew digest: " + digest);
for(Digest.Entry entry: digest) {
Address member=entry.getMember();
if(member == null)
continue;
long highest_delivered_seqno=entry.getHighestDeliveredSeqno();
Table<Message> buf=xmit_table.get(member);
if(buf != null) {
if(local_addr.equals(member)) {
// Adjust the highest_delivered seqno (to send msgs again): https://jira.jboss.org/browse/JGRP-1251
buf.setHighestDelivered(highest_delivered_seqno);
continue; // don't destroy my own window
}
xmit_table.remove(member);
}
buf=createTable(highest_delivered_seqno);
xmit_table.put(member, buf);
}
sb.append("\n").append("resulting digest: " + getDigest().toString(digest));
digest_history.add(sb.toString());
log.debug(sb.toString());
}
/**
* Sets or merges the digest. If there is no entry for a given member in xmit_table, create a new buffer.
* Else skip the existing entry, unless it is a merge. In this case, skip the existing entry if its seqno is
* greater than or equal to the one in the digest, or reset the window and create a new one if not.
* @param digest The digest
* @param merge Whether to merge the new digest with our own, or not
*/
protected void setDigest(Digest digest, boolean merge) {
if(digest == null)
return;
StringBuilder sb=log.isDebugEnabled()?
new StringBuilder("\n[" + local_addr + (merge? " mergeDigest()]\n" : " setDigest()]\n"))
.append("existing digest: " + getDigest()).append("\nnew digest: " + digest) : null;
boolean set_own_seqno=false;
for(Digest.Entry entry: digest) {
Address member=entry.getMember();
if(member == null)
continue;
long highest_delivered_seqno=entry.getHighestDeliveredSeqno();
Table<Message> buf=xmit_table.get(member);
if(buf != null) {
// We only reset the window if its seqno is lower than the seqno shipped with the digest. Also, we
// don't reset our own window (https://jira.jboss.org/jira/browse/JGRP-948, comment 20/Apr/09 03:39 AM)
if(!merge
|| (Objects.equals(local_addr, member)) // never overwrite our own entry
|| buf.getHighestDelivered() >= highest_delivered_seqno) // my seqno is >= digest's seqno for sender
continue;
xmit_table.remove(member);
// to get here, merge must be false !
if(member.equals(local_addr)) { // Adjust the seqno: https://jira.jboss.org/browse/JGRP-1251
seqno.set(highest_delivered_seqno);
set_own_seqno=true;
}
}
buf=createTable(highest_delivered_seqno);
xmit_table.put(member, buf);
}
if(sb != null) {
sb.append("\n").append("resulting digest: " + getDigest().toString(digest));
if(set_own_seqno)
sb.append("\nnew seqno for " + local_addr + ": " + seqno);
digest_history.add(sb.toString());
log.debug(sb.toString());
}
}
protected Table<Message> createTable(long initial_seqno) {
return new Table<>(xmit_table_num_rows, xmit_table_msgs_per_row,
initial_seqno, xmit_table_resize_factor, xmit_table_max_compaction_time);
}
/**
* Garbage collect messages that have been seen by all members. Update sent_msgs: for the sender P in the digest
* which is equal to the local address, garbage collect all messages <= seqno at digest[P]. Update xmit_table:
* for each sender P in the digest and its highest seqno seen SEQ, garbage collect all delivered_msgs in the
* retransmit buffer corresponding to P which are <= seqno at digest[P].
*/
protected void stable(Digest digest) {
if(members == null || local_addr == null || digest == null)
return;
log.trace("%s: received stable digest %s", local_addr, digest);
stability_msgs.add(digest.toString());
for(Digest.Entry entry: digest) {
Address member=entry.getMember();
if(member == null)
continue;
long hd=entry.getHighestDeliveredSeqno();
long hr=entry.getHighestReceivedSeqno();
// check whether the last seqno received for a sender P in the stability digest is > last seqno
// received for P in my digest. if yes, request retransmission (see "Last Message Dropped" topic in DESIGN)
Table<Message> buf=xmit_table.get(member);
if(buf != null) {
long my_hr=buf.getHighestReceived();
if(hr >= 0 && hr > my_hr) {
log.trace("%s: my_highest_rcvd (%d) < stability_highest_rcvd (%d): requesting retransmission of %s",
local_addr, my_hr, hr, member + "#" + hr);
retransmit(hr, hr, member);
}
}
// delete *delivered* msgs that are stable (all messages with seqnos <= seqno)
if(hd >= 0 && buf != null) {
log.trace("%s: deleting msgs <= %s from %s", local_addr, hd, member);
buf.purge(hd);
}
}
}
protected void retransmit(long first_seqno, long last_seqno, Address sender) {
if(first_seqno <= last_seqno)
retransmit(first_seqno,last_seqno,sender,false);
}
protected void retransmit(long first_seqno, long last_seqno, final Address sender, boolean multicast_xmit_request) {
SeqnoList list=new SeqnoList((int)(last_seqno - first_seqno +1), first_seqno).add(first_seqno, last_seqno);
retransmit(list,sender,multicast_xmit_request);
}
protected void retransmit(SeqnoList missing_msgs, final Address sender, boolean multicast_xmit_request) {
Address dest=(multicast_xmit_request || this.use_mcast_xmit_req)? null : sender; // to whom do we send the XMIT request ?
if(xmit_from_random_member && !local_addr.equals(sender)) {
Address random_member=Util.pickRandomElement(members);
if(random_member != null && !local_addr.equals(random_member))
dest=random_member;
}
Message retransmit_msg=new ObjectMessage(dest, missing_msgs).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL)
.putHeader(this.id, NakAckHeader2.createXmitRequestHeader(sender));
log.trace("%s --> %s: XMIT_REQ(%s)", local_addr, dest, missing_msgs);
down_prot.down(retransmit_msg);
if(stats)
xmit_reqs_sent.add(missing_msgs.size());
}
protected void reset() {
seqno.set(0);
xmit_table.clear();
}
protected static long sizeOfAllMessages(Table<Message> buf, boolean include_headers) {
return buf.stream().reduce(0L, (size,el) -> {
if(el == null)
return size;
else
return size + (include_headers? el.size() : el.getLength());
}, (l,r) -> l);
}
protected void startRetransmitTask() {
if(xmit_task == null || xmit_task.isDone())
xmit_task=timer.scheduleWithFixedDelay(new RetransmitTask(), 0, xmit_interval, TimeUnit.MILLISECONDS, sends_can_block);
}
protected void stopRetransmitTask() {
if(xmit_task != null) {
xmit_task.cancel(true);
xmit_task=null;
}
}
/**
* Retransmitter task which periodically (every xmit_interval ms) looks at all the retransmit tables and
* sends retransmit request to all members from which we have missing messages
*/
protected class RetransmitTask implements Runnable {
public void run() {
triggerXmit();
}
public String toString() {
return NAKACK2.class.getSimpleName() + ": RetransmitTask (interval=" + xmit_interval + " ms)";
}
}
@ManagedOperation(description="Triggers the retransmission task, asking all senders for missing messages")
public void triggerXmit() {
SeqnoList missing;
for(Map.Entry<Address,Table<Message>> entry: xmit_table.entrySet()) {
Address target=entry.getKey(); // target to send retransmit requests to
Table<Message> buf=entry.getValue();
if(buf != null && buf.getNumMissing() > 0 && (missing=buf.getMissing(max_xmit_req_size)) != null) { // getNumMissing() is fast
long highest=missing.getLast();
Long prev_seqno=xmit_task_map.get(target);
if(prev_seqno == null) {
xmit_task_map.put(target, highest); // no retransmission
}
else {
missing.removeHigherThan(prev_seqno); // we only retransmit the 'previous batch'
if(highest > prev_seqno)
xmit_task_map.put(target, highest);
if(!missing.isEmpty())
retransmit(missing, target, false);
}
}
else if(!xmit_task_map.isEmpty())
xmit_task_map.remove(target); // no current gaps for target
}
if(resend_last_seqno && last_seqno_resender != null)
last_seqno_resender.execute(seqno.get());
}
/** Class which is called by RetransmitTask to resend the last seqno sent (if resend_last_seqno is enabled) */
protected class LastSeqnoResender {
// Number of times the same seqno has been sent (acquiesces after resend_last_seqno_max_times)
protected int num_resends;
protected long last_seqno_resent; // the last seqno that was resent by this task
// set to true when a regular msg is sent to prevent the task from running
protected final AtomicBoolean skip_next_resend=new AtomicBoolean(false);
protected void skipNext() {
skip_next_resend.compareAndSet(false,true);
}
protected void execute(long seqno) {
if(seqno == 0 || skip_next_resend.compareAndSet(true,false))
return;
if(seqno == last_seqno_resent && num_resends >= resend_last_seqno_max_times)
return;
if(seqno > last_seqno_resent) {
last_seqno_resent=seqno;
num_resends=1;
}
else
num_resends++;
Message msg=new EmptyMessage(null).putHeader(id, NakAckHeader2.createHighestSeqnoHeader(seqno))
.setFlag(Message.Flag.OOB, Message.Flag.INTERNAL)
.setFlag(Message.TransientFlag.DONT_LOOPBACK); // we don't need to receive our own broadcast
down_prot.down(msg);
}
}
}
|
/**
Code modified from:
http://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html#Source_Code
**/
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TestCaseV2 {
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
if (sum >= 2) { //filter
result.set(sum);
context.write(key, result);
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(TestCaseV2.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
|
/**
* Copyright 2005-2019 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.rice.krad.data.metadata;
import java.util.List;
/**
* Non-top-level metadata object
*
* <p>
* Interface shared by all non-top-level metadata objects which link to other persistable objects. This is used as the
* base interface for 1:1/M:1 Relationships and 1:M/N:M Collections.
* </p>
*
* @author Kuali Rice Team (rice.collab@kuali.org)
*/
public interface MetadataChild extends MetadataCommon {
/**
* Gets the type of related object
*
* <p>
* This is the type of the object referenced by this relationship or contained in this collection.
* </p>
*
* @return type of related object
*/
Class<?> getRelatedType();
/**
* Gets the parent-child related fields
*
* <p>
* Returns the related fields between the parent and child objects.
* </p>
*
* @return related fields. List must not be empty. There always must be at least one related field.
*/
List<DataObjectAttributeRelationship> getAttributeRelationships();
/**
* Gets bi-directional relationship
*
* <p>
* If this metadata element is part of a bi-directional relationship, this method returns the other side of the
* bi-directional relationship.
* </p>
*
* @return the inverse of this relationship if it is bi-directional, false otherwise
*/
MetadataChild getInverseRelationship();
/**
* Determines whether object automatically saved
*
* <p>
* For related objects, whether this object will be automatically saved when the containing object is persisted.
* </p>
*
* @return whether object is automatically saved
*/
boolean isSavedWithParent();
/**
* Determines whether this object will be automatically deleted when the containing object is deleted.
*
* <p>
* This is a special case of the {@link #isSavedWithParent()} method. It probably would never be true if the
* {@link #isSavedWithParent()} returns false.
* </p>
*
* @return whether automatically deleted
*/
boolean isDeletedWithParent();
/**
* Determines whether object will be loaded with parent
*
* <p>
* For related objects, whether this related object will be loaded from the persistence layer at the same time as
* the parent object.
* </p>
* <p>
* If false, the object will be loaded upon demand, either via automatic lazy-loading provided by the infrastructure
* or by explicit request.
* </p>
*
* @return whether object
*/
boolean isLoadedAtParentLoadTime();
/**
* Determines whether the object is reloaded automatically with parent
*
* <p>
* For related objects, whether this related object will be loaded from the persistence layer automatically when it
* is accessed by client code.
* </p>
* <p>
* If false, then the object must be refreshed manually by client code. (Though such a refresh may be possible by
* requesting the refresh from the persistence provider.)
* </p>
*
* @return whether object loaded automatically with parent
*/
boolean isLoadedDynamicallyUponUse();
/**
* Gets foreign key attribute from parent.
*
* <p>
* For a given child key attribute, return the matching foreign key attribute on the parent object.
* </p>
*
* @return null if the attribute name given is not part of the key relationship.
*/
String getParentAttributeNameRelatedToChildAttributeName(String childAttribute);
}
|
/*
* Copyright 2017 Apereo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.core.item.edit.attachment;
public interface ZipAttachmentEditor extends AttachmentEditor {
void editFolder(String folderPath);
void editMapped(boolean mapped);
}
|
package com.revature.test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
import com.revature.beans.Accounts;
import com.revature.services.AccTrans;
public class AccTransTest {
private long aID = 1;
private long[] cID = {1,0,0,0,0,0,0,0,0,0};
private String aType = "CHECKING";
private long acctNumber = 1;
private double balance = 500.0;
private String[] email = {"abcd.def@gmail.com",null,null,null,null,null,null,null,null,null};
private String prevTrans = " ";
private Accounts ac1 = new Accounts(aID, cID, aType, acctNumber, balance, email, prevTrans);
private long aID2 = 2;
private long[] cID2 = {2,0,0,0,0,0,0,0,0,0};
private String aType2 = "SAVING";
private long acctNumber2 = 2;
private double balance2 = 300.0;
private String[] email2 = {"xyz.adef@gmail.com",null,null,null,null,null,null,null,null,null};
private String prevTrans2 = " ";
private Accounts ac2 = new Accounts(aID2, cID2, aType2, acctNumber2, balance2, email2, prevTrans2);
private AccTrans at = new AccTrans();
/*
* Test for Accounts.java
*/
// Withdraw
@Test
void withdrawTest() {
double amount = 200;
at.withdraw(ac1, amount);
//expected, actual
assertEquals(300.0, ac1.getBalance());
}
// Deposit
@Test
void depositTest() {
double amount = 400;
at.deposit(ac1, amount);
//expected, actual
assertEquals(900.0, ac1.getBalance());
}
// Transfer
@Test
void transferTest() {
double amount = 100;
at.transfer(ac1, ac2, amount);
//expected, actual
assertEquals(400.0, ac1.getBalance());
assertEquals(400.0, ac2.getBalance());
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.datafactory.models;
import com.azure.core.annotation.Fluent;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.JsonTypeName;
import java.util.List;
/** A copy activity Teradata source. */
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonTypeName("TeradataSource")
@Fluent
public final class TeradataSource extends TabularSource {
@JsonIgnore private final ClientLogger logger = new ClientLogger(TeradataSource.class);
/*
* Teradata query. Type: string (or Expression with resultType string).
*/
@JsonProperty(value = "query")
private Object query;
/*
* The partition mechanism that will be used for teradata read in parallel.
* Possible values include: "None", "Hash", "DynamicRange".
*/
@JsonProperty(value = "partitionOption")
private Object partitionOption;
/*
* The settings that will be leveraged for teradata source partitioning.
*/
@JsonProperty(value = "partitionSettings")
private TeradataPartitionSettings partitionSettings;
/**
* Get the query property: Teradata query. Type: string (or Expression with resultType string).
*
* @return the query value.
*/
public Object query() {
return this.query;
}
/**
* Set the query property: Teradata query. Type: string (or Expression with resultType string).
*
* @param query the query value to set.
* @return the TeradataSource object itself.
*/
public TeradataSource withQuery(Object query) {
this.query = query;
return this;
}
/**
* Get the partitionOption property: The partition mechanism that will be used for teradata read in parallel.
* Possible values include: "None", "Hash", "DynamicRange".
*
* @return the partitionOption value.
*/
public Object partitionOption() {
return this.partitionOption;
}
/**
* Set the partitionOption property: The partition mechanism that will be used for teradata read in parallel.
* Possible values include: "None", "Hash", "DynamicRange".
*
* @param partitionOption the partitionOption value to set.
* @return the TeradataSource object itself.
*/
public TeradataSource withPartitionOption(Object partitionOption) {
this.partitionOption = partitionOption;
return this;
}
/**
* Get the partitionSettings property: The settings that will be leveraged for teradata source partitioning.
*
* @return the partitionSettings value.
*/
public TeradataPartitionSettings partitionSettings() {
return this.partitionSettings;
}
/**
* Set the partitionSettings property: The settings that will be leveraged for teradata source partitioning.
*
* @param partitionSettings the partitionSettings value to set.
* @return the TeradataSource object itself.
*/
public TeradataSource withPartitionSettings(TeradataPartitionSettings partitionSettings) {
this.partitionSettings = partitionSettings;
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withQueryTimeout(Object queryTimeout) {
super.withQueryTimeout(queryTimeout);
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withAdditionalColumns(List<AdditionalColumns> additionalColumns) {
super.withAdditionalColumns(additionalColumns);
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withSourceRetryCount(Object sourceRetryCount) {
super.withSourceRetryCount(sourceRetryCount);
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withSourceRetryWait(Object sourceRetryWait) {
super.withSourceRetryWait(sourceRetryWait);
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withMaxConcurrentConnections(Object maxConcurrentConnections) {
super.withMaxConcurrentConnections(maxConcurrentConnections);
return this;
}
/** {@inheritDoc} */
@Override
public TeradataSource withDisableMetricsCollection(Object disableMetricsCollection) {
super.withDisableMetricsCollection(disableMetricsCollection);
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
@Override
public void validate() {
super.validate();
if (partitionSettings() != null) {
partitionSettings().validate();
}
}
}
|
package it.unipd.dei.nanocitation.metadata;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.net.HttpURLConnection;
import java.net.ProtocolException;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpression;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.apache.commons.io.IOUtils;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.jsoup.Jsoup;
import org.openrdf.model.Statement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import it.unipd.dei.nanocitation.util.AssertionInfo;
import it.unipd.dei.nanocitation.util.Sio;
import it.unipd.dei.nanocitation.util.TsvParser;
import it.unipd.dei.nanocitation.metadata.types.Assertion;
import it.unipd.dei.nanocitation.metadata.types.DescriptionTopic;
import it.unipd.dei.nanocitation.metadata.types.MetadataContainer;;
public class AssertionManager
{
private Sio sio;
private static final Logger LOGGER = LoggerFactory.getLogger(AssertionInfo.class);
public AssertionManager(Sio sio)
{
this.sio = sio;
}
public ArrayList<Assertion> getAssertions(Set<Statement> ass)
{
ArrayList<Assertion> ret = new ArrayList<>();
Assertion a;
for (Statement u : ass)
{
a = getOneAssertion(u);
ret.add(a);
}
return ret;
}
private Assertion getOneAssertion(Statement u)
{
Assertion ret = new Assertion();
// LOGGER.info("------> Assertion = ");
// LOGGER.info("Subject " + getValue(u.getSubject().toString()));
// LOGGER.info("Predicate " + getValue(u.getPredicate().toString()));
// LOGGER.info("Object " + getValue(u.getObject().toString()));
setValueAndURI(ret, u, 1);
setValueAndURI(ret, u, 2);
setValueAndURI(ret, u, 3);
return ret;
}
private void setValueAndURI(Assertion ret, Statement u, int type)
{
switch (type)
{
case 1: // subject
ret.setSubject(getValue(u.getSubject().toString()));
try
{
ret.setSubjectURI(URI.create(u.getSubject().toString()));
} catch (IllegalArgumentException e)
{
}
break;
case 2: // predicate
ret.setPredicate(getValue(u.getPredicate().toString()));
try
{
ret.setPredicateURI(URI.create(u.getPredicate().toString()));
} catch (IllegalArgumentException e)
{
}
break;
case 3: // object
ret.setObject(getValue(u.getObject().toString()));
try
{
ret.setObjectURI(URI.create(u.getObject().toString()));
} catch (IllegalArgumentException e)
{
}
break;
}
}
private String getValue(String u)
{
String ret = "";
if (u.contains("/resource/SIO_"))
ret = sio.sioToReadable(u);
else if (u.contains("linkedlifedata.com"))
ret = linkedlifedataToReadable(u);
else if (u.contains("ncicb.nci"))
ret = ncitIdToReadable(u.substring(u.lastIndexOf('#') + 1));
else if (u.contains("proteinatlas.org"))
ret = proteinatlasToReadable(u);
else if (u.contains("^^<http://www.w3.org/2001/XMLSchema#string>"))
ret = u.substring(1, u.indexOf("^^") - 1);
else if (u.contains("www.w3.org") && u.contains("/rdf-schema#"))
ret = rdfSyntaxToReadable(u.substring(u.lastIndexOf('#') + 1));
else if (u.contains("www.w3.org") && u.contains("rdf-syntax-ns#"))
ret = rdfSyntaxToReadable(u.substring(u.lastIndexOf('#') + 1));
else if (u.contains("rdf.disgenet.org/resource/gda/"))
ret = "GDA id " + u.substring(u.lastIndexOf('/') + 1);
else if (u.contains("rdf.disgenet.org/gene-disease-association"))
ret = "GDA id " + u.substring(u.lastIndexOf('#') + 1);
else if (u.contains("identifiers.org/ncbigene"))
ret = identifiersGeneToReadable(u);
else if (u.contains("/purl.obolibrary.org/obo/caloha.obo#"))
ret = obocalohaToReadable(rdfSyntaxToReadable(u));
else if (u.contains("purl.obolibrary.org/obo/"))
ret = oboToReadable(u);
else if (u.contains("http://ontology.neuinfo.org/") && u.contains("#"))
ret = u.substring(u.lastIndexOf('#') + 1);
else
ret = u;
return ret;
}
private String oboToReadable(String u)
{
String url = "http://www.ontobee.org/ontology/RO?iri=" + u;
File obofile = getFileFromUrl(url, "");
if (obofile != null)
{
try (BufferedReader br = new BufferedReader(new FileReader(obofile)))
{
String s = "";
String line;
while ((line = br.readLine()) != null)
{
s += line;
}
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(s));
Document doc = builder.parse(is);
XPathFactory xpathfactory = XPathFactory.newInstance();
XPath xpath = xpathfactory.newXPath();
XPathExpression expr = xpath.compile("//ObjectProperty[@about='" + u + "']/label/text()");
Object result = expr.evaluate(doc, XPathConstants.NODESET);
NodeList nodes = (NodeList) result;
return nodes.item(0).getNodeValue();
} catch (FileNotFoundException e)
{
return "";
} catch (IOException e)
{
return "";
} catch (ParserConfigurationException e)
{
return "";
} catch (SAXException e)
{
return "";
} catch (XPathExpressionException e)
{
return "";
}
}
return "";
}
private String obocalohaToReadable(String term)
{
JSONObject data = null;
String ret = "";
try
{
HttpURLConnection conn = (HttpURLConnection) (new URL("https://api.nextprot.org/term/" + term))
.openConnection();
conn.setRequestProperty("Accept", "application/json");
conn.setDoOutput(true);
conn.setDoInput(true);
conn.setRequestMethod("GET");
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String out = "";
for (String line; (line = reader.readLine()) != null;)
{
out = out + line;
}
reader.close();
data = new JSONObject(out);
if (!data.has("cvTerm"))
return "";
data = data.getJSONObject("cvTerm");
if (data.has("name"))
ret = data.getString("name");
/*
* if(data.has("description") ret =
* data.getString("description").toString();
*/
} catch (ProtocolException e)
{
e.printStackTrace();
} catch (IOException e)
{
e.printStackTrace();
}
return ret;
}
/**
* @param ncitId
* @return
*/
private String ncitIdToReadable(String ncitId)
{
String ret = null;
switch (ncitId)
{
case "C16612":
ret = "Gene";
break;
case "C7057":
ret = "Disease";
break;
case "C18279":
ret = "SNP";
break;
case "C47902":
ret = "PubmedArticle";
break;
case "C25338":
ret = "Score";
break;
case "C43568":
ret = "HGNC Gene Symbol";
break;
case "C17021":
ret = "Protein";
break;
case "C20633":
ret = "Pathway";
break;
default:
ret = "";
}
return ret;
}
/**
* @param synt
* @return
*/
private String rdfSyntaxToReadable(String synt)
{
return synt.substring(synt.lastIndexOf('#') + 1);
}
private String getLastPartOfUrl(String url)
{
return url.substring(url.lastIndexOf('/') + 1);
}
/**
* @param url
* @return
*/
private String llIdToReadable(String url)
{
String ret = null;
String urlJsonGet = url + ".json";
try (InputStream in = new URL(urlJsonGet).openStream())
{
// String s = "assertion/";
// Files.createDirectories(Paths.get(OUTDIRPATH + s));
// Files.copy(in, Paths.get(OUTDIRPATH + s +
// urlJsonGet.substring(urlJsonGet.lastIndexOf('/') + 1)),
// StandardCopyOption.REPLACE_EXISTING);
// ret = in.toString();
ret = IOUtils.toString(in, StandardCharsets.UTF_8);
// Files.delete(Paths.get(OUTDIRPATH + s +
// urlJsonGet.substring(urlJsonGet.lastIndexOf('/') + 1)));
} catch (IOException e)
{
LOGGER.error("Error in llIdToReadable\n" + e.getClass().getName() + ": " + e.getMessage());
}
return ret;
}
private String linkedlifedataToReadable(String url)
{
String ret = "";
String json = llIdToReadable(url);
if (json != null)
ret = readJsonLinkedLifeDataValue(json, url);
return ret;
}
private String proteinatlasToReadable(String u)
{
String ret = "";
TsvParser tsvpars = new TsvParser();
//
File proteinInfoTsv = getFileFromUrl(u, ".tsv");
if (proteinInfoTsv != null)
{
Map<String, String> pro = tsvpars.parseTsvFile(proteinInfoTsv);
String gen = pro.get("Gene description");
ret = gen != null ? gen : "";
}
if (ret == "")
ret = getLastPartOfUrl(u);
return ret;
}
/**
* @param url
* @return
*/
private String identifiersGeneToReadable(String url)
{
String ret = null;
URL obj;
try
{
obj = new URL(url);
HttpURLConnection conn = (HttpURLConnection) obj.openConnection();
conn.setReadTimeout(5000);
conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8");
conn.addRequestProperty("User-Agent", "Mozilla");
conn.addRequestProperty("Referer", "google.com");
boolean redirect = false;
int status = conn.getResponseCode();
if (status == HttpURLConnection.HTTP_MOVED_TEMP || status == HttpURLConnection.HTTP_MOVED_PERM
|| status == HttpURLConnection.HTTP_SEE_OTHER)
redirect = true;
// identifiers.org redirection
if (redirect)
{
url = conn.getHeaderField("Location"); // +
// (url.contains("ncbi")
// ?
// "?report=tabular&format=text"
// : "");
if (url.contains("gene"))
{
org.jsoup.nodes.Document doc;
doc = Jsoup.connect(url).get();
String title = doc.title().substring(0, doc.title().lastIndexOf('[') - 1);
return title;
// String[] parts = title.split("\\[");
// return parts[0];
}
}
} catch (IOException e)
{
LOGGER.error("Error in identifiersGeneToReadable\n" + e.getClass().getName() + ": " + e.getMessage());
}
return ret;
}
private String readJsonLinkedLifeDataValue(String json, String url)
{
String ret = null;
JSONObject rootObject;
try
{
rootObject = new JSONObject(json);
JSONArray rows = rootObject.getJSONObject(url)
.getJSONArray("http://www.w3.org/2004/02/skos/core#exactMatch");
JSONObject element = rows.getJSONObject(0);
String name = element.getString("value");
name = name.substring(name.lastIndexOf("mesh/") + 5);
name = URLDecoder.decode(name, "UTF-8");
return name;
} catch (JSONException e)
{
org.jsoup.nodes.Document doc;
try
{
doc = Jsoup.connect(url).get();
ret = doc.title().substring(Integer.max(doc.title().indexOf("CONCEPT ") + 8, 0));
ret = URLDecoder.decode(ret, "UTF-8");
return ret;
} catch (IOException ex)
{
LOGGER.error(
"Error in readJsonLinkedLifeDataValue\n" + ex.getClass().getName() + ": " + ex.getMessage());
}
} catch (UnsupportedEncodingException e)
{
LOGGER.error("Error in readJsonLinkedLifeDataValue\n" + e.getClass().getName() + ": " + e.getMessage());
}
return ret;
}
@SuppressWarnings("resource")
private File getFileFromUrl(String u, String extension)
{
InputStream is = null;
FileOutputStream fos = null;
String tempDir = System.getProperty("java.io.tmpdir");
String outputPath = tempDir + "/" + u.substring(u.lastIndexOf('/') + 1);
try
{
URL url = new URL(u + extension);
URLConnection urlConn = url.openConnection();
is = urlConn.getInputStream();
fos = new FileOutputStream(outputPath);
byte[] buffer = new byte[4096];
int length;
while ((length = is.read(buffer)) > 0)
{
fos.write(buffer, 0, length);
}
return new File(outputPath);
} catch (IOException e)
{
}
return null;
}
protected ArrayList<DescriptionTopic> getAssertionContentFormatted(MetadataContainer meta, String npId,
ArrayList<Assertion> asser)
{
ArrayList<DescriptionTopic> dt = new ArrayList<>();
DescriptionTopic tmp;
if (npId.contains("disgenet"))
{
if (asser.size() == 5)
{
tmp = stringFrom5Assertions(asser);
dt.add(tmp);
}
// GDA associations
else if (asser.size() == 3)
{
tmp = stringFrom3Assertions(asser);
dt.add(tmp);
}
}
else if (npId.contains("proteinatlas"))
{
if (meta.getAssertionType().equals("IHCEvidence"))
{
tmp = stringFromIHCAssert(asser);
dt.add(tmp);
}
}
return dt;
}
private DescriptionTopic stringFromIHCAssert(ArrayList<Assertion> asser)
{
DescriptionTopic dt = new DescriptionTopic();
dt.setSubject("IHCEvidence");
ArrayList<String> assHumRe = new ArrayList<String>();
String expression = "";
String protein = "";
String tissue = "";
String tissueName = "";
for (Assertion a : asser)
{
if (a.getPredicate() != null && a.getPredicate().contains("nlx"))
{
expression = a.getObject();
protein = a.getSubject();
for (Assertion as : asser)
{
if (as.getPredicate() != null && as.getPredicate().contains("occurs in"))
{
tissue = as.getObject();
}
else if (!tissue.equals("") && as.getPredicate().contains("type") && as.getObject() != null)
{
tissueName = as.getObject();
break;
}
}
}
}
assHumRe.add(protein + " with "+ expression +" expression in " + tissueName + " (" + tissue + ")");
dt.setAssertion(assHumRe);
return dt;
}
private DescriptionTopic stringFrom5Assertions(ArrayList<Assertion> asser)
{
DescriptionTopic dt = new DescriptionTopic();
for (Assertion a : asser)
if (a.getSubject().contains("GDA id") && a.getPredicate().contains("type"))
{
dt.setSubject(a.getObject());
String gene = "";
String disease = "";
for (Assertion as : asser)
{
if (as.getObject().equals("Gene") && as.getPredicate().contains("type"))
gene = as.getSubject();
else if (as.getObject().equals("Disease") && as.getPredicate().contains("type"))
disease = as.getSubject();
}
ArrayList<String> assHumRe = new ArrayList<String>();
assHumRe.add(gene + " - " + disease);
dt.setAssertion(assHumRe);
return dt;
}
return dt;
}
/**
* @param asser
* @return
*/
private DescriptionTopic stringFrom3Assertions(ArrayList<Assertion> asser)
{
DescriptionTopic dt = new DescriptionTopic();
for (Assertion a : asser)
{
if (a.getSubject().contains("GDA id") && a.getPredicate().contains("type"))
{
dt.setSubject(a.getObject());
String gene = "";
String disease = "";
String gdaId = a.getSubject();
for (Assertion as : asser)
{
if (as.getSubject().equals(gdaId) && as.getPredicate().equals("refers to") && gene.equals(""))
gene = as.getObject();
else if (as.getSubject().equals(gdaId) && as.getPredicate().equals("refers to")
&& disease.equals(""))
disease = as.getObject();
}
ArrayList<String> assHumRe = new ArrayList<String>();
assHumRe.add(gene + " - " + disease);
dt.setAssertion(assHumRe);
return dt;
}
}
return dt;
}
}
|
// Copyright 2021 Goldman Sachs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.finos.legend.engine.language.pure.grammar.test;
import org.junit.Test;
public class TestExternalFormatGrammarRoundtrip extends TestGrammarRoundtrip.TestGrammarRoundtripTestSuite
{
@Test
public void testSchemas()
{
test("###ExternalFormat\n" +
"SchemaSet test::Example\n" +
"{\n" +
" format: Example;\n" +
" schemas: [\n" +
" {\n" +
" content: 'Sample Schema Description\\nsome data\\nis described here\\n';\n" +
" }\n" +
" ];\n" +
"}\n" +
"\n" +
"SchemaSet test::Example2\n" +
"{\n" +
" format: Example;\n" +
" schemas: [\n" +
" {\n" +
" id: ex2_1;\n" +
" location: 'ex2_1.schema';\n" +
" content: 'Second Sample Schema Description\\nSchema 1 of Example 2';\n" +
" },\n" +
" {\n" +
" id: ex2_2;\n" +
" location: 'ex2_2.schema';\n" +
" content: 'Third Sample Schema Description\\nSchema 2 of Example 2';\n" +
" }\n" +
" ];\n" +
"}\n"
);
}
@Test
public void testValidSchemaAndBinding()
{
test("###ExternalFormat\n" +
"SchemaSet test::Example\n" +
"{\n" +
" format: Example;\n" +
" schemas: [\n" +
" {\n" +
" content: 'Schema Description';\n" +
" }\n" +
" ];\n" +
"}\n" +
"\n" +
"Binding test::ExampleBinding\n" +
"{\n" +
" schemaSet: test::Example;\n" +
" contentType: 'text/example';\n" +
" modelIncludes: [\n" +
" my::ClassA,\n" +
" my::ClassB\n" +
" ];\n" +
" modelExcludes: [\n" +
" my::ClassC\n" +
" ];\n" +
"}\n"
);
}
@Test
public void testValidSchemaAndBindingWithId()
{
test("###ExternalFormat\n" +
"SchemaSet test::Example\n" +
"{\n" +
" format: Example;\n" +
" schemas: [\n" +
" {\n" +
" id: s1;\n" +
" content: 'Schema Description';\n" +
" }\n" +
" ];\n" +
"}\n" +
"\n" +
"Binding test::ExampleBinding\n" +
"{\n" +
" schemaSet: test::Example;\n" +
" schemaId: s1;\n" +
" contentType: 'text/example';\n" +
" modelIncludes: [\n" +
" my::ClassA,\n" +
" my::ClassB\n" +
" ];\n" +
" modelExcludes: [\n" +
" my::ClassC\n" +
" ];\n" +
"}\n"
);
}
@Test
public void testValidSchemalessBinding()
{
test("###ExternalFormat\n" +
"Binding test::ExampleBinding\n" +
"{\n" +
" contentType: 'text/example';\n" +
" modelIncludes: [\n" +
" my::ClassA,\n" +
" my::ClassB\n" +
" ];\n" +
" modelExcludes: [\n" +
" my::ClassC\n" +
" ];\n" +
"}\n"
);
}
}
|
/*
* Copyright (c) 1996, 2003, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.io;
import sun.nio.cs.ext.MS1255;
/**
* A table to convert Cp1255 to Unicode
*
* @author ConverterGenerator tool
*/
public class ByteToCharCp1255 extends ByteToCharSingleByte {
private final static MS1255 nioCoder = new MS1255();
public String getCharacterEncoding() {
return "Cp1255";
}
public ByteToCharCp1255() {
super.byteToCharTable = nioCoder.getDecoderSingleByteMappings();
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.appmesh.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.appmesh.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* MeshStatus JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class MeshStatusJsonUnmarshaller implements Unmarshaller<MeshStatus, JsonUnmarshallerContext> {
public MeshStatus unmarshall(JsonUnmarshallerContext context) throws Exception {
MeshStatus meshStatus = new MeshStatus();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("status", targetDepth)) {
context.nextToken();
meshStatus.setStatus(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return meshStatus;
}
private static MeshStatusJsonUnmarshaller instance;
public static MeshStatusJsonUnmarshaller getInstance() {
if (instance == null)
instance = new MeshStatusJsonUnmarshaller();
return instance;
}
}
|
/*
*
* Copyright 2017-2018 549477611@qq.com(xiaoyu)
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, see <http://www.gnu.org/licenses/>.
*
*/
package com.github.myth.common.exception;
/**
* @author xiaoyu
*/
public class MythRuntimeException extends RuntimeException {
private static final long serialVersionUID = -1949770547060521702L;
public MythRuntimeException() {
}
public MythRuntimeException(String message) {
super(message);
}
public MythRuntimeException(String message, Throwable cause) {
super(message, cause);
}
public MythRuntimeException(Throwable cause) {
super(cause);
}
}
|
/*
* @author Andrei Constantin Tanasache, act@omtia.com
*/
package com.omtia.un;
/**
* Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
*
* There is only one repeated number in nums, return this repeated number.
*
* You must solve the problem without modifying the array nums and uses only constant extra space.
*
*
*
* Example 1:
*
* Input: nums = [1,3,4,2,2]
* Output: 2
* Example 2:
*
* Input: nums = [3,1,3,4,2]
* Output: 3
*
*
* Constraints:
*
* 1 <= n <= 105
* nums.length == n + 1
* 1 <= nums[i] <= n
* All the integers in nums appear only once except for precisely one integer which appears two or more times.
*
*
* Follow up:
*
* How can we prove that at least one duplicate number must exist in nums?
* Can you solve the problem in linear runtime complexity?
*/
public class _287_find_the_duplicate_number {
public int findDuplicate(int[] nums) {
int slow = nums[nums[0]];
int fast = nums[nums[nums[0]]];
while(slow != fast) {
slow = nums[slow];
fast = nums[nums[fast]];
}
slow = nums[0];
while(slow != fast) {
slow = nums[slow];
fast = nums[fast];
}
return slow;
}
}
|
/*
* Copyright © 2018-2021 Apple Inc. and the ServiceTalk project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicetalk.concurrent.api;
import io.servicetalk.concurrent.Cancellable;
import io.servicetalk.concurrent.SingleSource;
import io.servicetalk.concurrent.SingleSource.Subscriber;
import io.servicetalk.concurrent.api.SourceToFuture.SingleToFuture;
import io.servicetalk.context.api.ContextMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BiFunction;
import java.util.function.BooleanSupplier;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.Predicate;
import java.util.function.Supplier;
import javax.annotation.Nullable;
import static io.servicetalk.concurrent.api.Executors.global;
import static io.servicetalk.concurrent.api.NeverSingle.neverSingle;
import static io.servicetalk.concurrent.api.Publisher.from;
import static io.servicetalk.concurrent.api.Publisher.fromIterable;
import static io.servicetalk.concurrent.api.RepeatWhenSingle.END_REPEAT_COMPLETABLE;
import static io.servicetalk.concurrent.api.SingleDoOnUtils.doOnErrorSupplier;
import static io.servicetalk.concurrent.api.SingleDoOnUtils.doOnSubscribeSupplier;
import static io.servicetalk.concurrent.api.SingleDoOnUtils.doOnSuccessSupplier;
import static io.servicetalk.concurrent.internal.SubscriberUtils.deliverErrorFromSource;
import static java.util.Objects.requireNonNull;
import static java.util.function.Function.identity;
/**
* An asynchronous computation that either completes with success giving the result or completes with an error.
*
* <h2>How to subscribe?</h2>
*
* This class does not provide a way to subscribe using a {@link Subscriber} as such calls are
* ambiguous about the intent whether the subscribe is part of the same source (a.k.a an operator) or it is a terminal
* subscribe. If it is required to subscribe to a source, then a {@link SourceAdapters source adapter} can be used to
* convert to a {@link SingleSource}.
*
* @param <T> Type of the result of the single.
*/
public abstract class Single<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(Single.class);
static {
AsyncContext.autoEnable();
}
/**
* New instance.
*
*/
protected Single() {
}
//
// Operators Begin
//
/**
* Maps the result of this single to a different type. Error, if any is forwarded to the returned {@link Single}.
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T tResult = resultOfThisSingle();
* R rResult = mapper.apply(tResult);
* }</pre>
* @param mapper To convert this result to other.
* @param <R> Type of the returned {@code Single}.
* @return A new {@link Single} that will now have the result of type {@link R}.
*/
public final <R> Single<R> map(Function<? super T, ? extends R> mapper) {
return new MapSingle<>(this, mapper);
}
/**
* Cast this {@link Single} from type {@link T} to type {@link R}.
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T tResult = resultOfThisSingle();
* R rResult = clazz.cast(tResult);
* }</pre>
* @param clazz The type to cast to.
* @param <R> The resulting type of the cast operation.
* @return The cast of this {@link Single} to type {@link R}. Terminates with a {@link ClassCastException} if
* signals cannot be cast to type {@link R}.
* @see <a href="https://reactivex.io/documentation/operators/map.html">ReactiveX cast operator.</a>
*/
public final <R> Single<R> cast(Class<R> clazz) {
return map(clazz::cast);
}
/**
* Transform errors emitted on this {@link Single} into {@link Subscriber#onSuccess(Object)} signal
* (e.g. swallows the error).
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* return itemSupplier.apply(cause);
* }
* return result;
* }</pre>
* @param itemSupplier returns the element to emit to {@link Subscriber#onSuccess(Object)}.
* @return A {@link Single} which transform errors emitted on this {@link Single} into
* {@link Subscriber#onSuccess(Object)} signal (e.g. swallows the error).
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final Single<T> onErrorReturn(Function<? super Throwable, ? extends T> itemSupplier) {
return onErrorReturn(t -> true, itemSupplier);
}
/**
* Transform errors emitted on this {@link Single} which match {@code type} into
* {@link Subscriber#onSuccess(Object)} signal (e.g. swallows the error).
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* if (!type.isInstance(cause)) {
* throw cause;
* }
* return itemSupplier.apply(cause);
* }
* return result;
* }</pre>
* @param type The {@link Throwable} type to filter, operator will not apply for errors which don't match this type.
* @param itemSupplier returns the element to emit to {@link Subscriber#onSuccess(Object)}.
* @param <E> The type of {@link Throwable} to transform.
* @return A {@link Single} which transform errors emitted on this {@link Single} into
* {@link Subscriber#onSuccess(Object)} signal (e.g. swallows the error).
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final <E extends Throwable> Single<T> onErrorReturn(
Class<E> type, Function<? super E, ? extends T> itemSupplier) {
@SuppressWarnings("unchecked")
final Function<Throwable, ? extends T> rawSupplier = (Function<Throwable, ? extends T>) itemSupplier;
return onErrorReturn(type::isInstance, rawSupplier);
}
/**
* Transform errors emitted on this {@link Single} which match {@code predicate} into
* {@link Subscriber#onSuccess(Object)} signal (e.g. swallows the error).
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* if (!predicate.test(cause)) {
* throw cause;
* }
* return itemSupplier.apply(cause);
* }
* return result;
* }</pre>
* @param predicate returns {@code true} if the {@link Throwable} should be transformed to
* {@link Subscriber#onSuccess(Object)} signal. Returns {@code false} to propagate the error.
* @param itemSupplier returns the element to emit to {@link Subscriber#onSuccess(Object)}.
* @return A {@link Single} which transform errors emitted on this {@link Single} into
* {@link Subscriber#onSuccess(Object)} signal (e.g. swallows the error).
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final Single<T> onErrorReturn(Predicate<? super Throwable> predicate,
Function<? super Throwable, ? extends T> itemSupplier) {
requireNonNull(itemSupplier);
return onErrorResume(predicate, t -> succeeded(itemSupplier.apply(t)));
}
/**
* Transform errors emitted on this {@link Single} into a different error.
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* throw mapper.apply(cause);
* }
* return result;
* }</pre>
* @param mapper returns the error used to terminate the returned {@link Single}.
* @return A {@link Single} which transform errors emitted on this {@link Single} into a different error.
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final Single<T> onErrorMap(Function<? super Throwable, ? extends Throwable> mapper) {
return onErrorMap(t -> true, mapper);
}
/**
* Transform errors emitted on this {@link Single} which match {@code type} into a different error.
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* if (type.isInstance(cause)) {
* throw mapper.apply(cause);
* } else {
* throw cause;
* }
* }
* return result;
* }</pre>
* @param type The {@link Throwable} type to filter, operator will not apply for errors which don't match this type.
* @param mapper returns the error used to terminate the returned {@link Single}.
* @param <E> The type of {@link Throwable} to transform.
* @return A {@link Single} which transform errors emitted on this {@link Single} into a different error.
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final <E extends Throwable> Single<T> onErrorMap(
Class<E> type, Function<? super E, ? extends Throwable> mapper) {
@SuppressWarnings("unchecked")
final Function<Throwable, Throwable> rawMapper = (Function<Throwable, Throwable>) mapper;
return onErrorMap(type::isInstance, rawMapper);
}
/**
* Transform errors emitted on this {@link Single} which match {@code predicate} into a different error.
* <p>
* This method provides a data transformation in sequential programming similar to:
* <pre>{@code
* T results = resultOfThisSingle();
* try {
* terminalOfThisSingle();
* } catch (Throwable cause) {
* if (predicate.test(cause)) {
* throw mapper.apply(cause);
* } else {
* throw cause;
* }
* }
* return result;
* }</pre>
* @param predicate returns {@code true} if the {@link Throwable} should be transformed via {@code mapper}. Returns
* {@code false} to propagate the original error.
* @param mapper returns the error used to terminate the returned {@link Single}.
* @return A {@link Single} which transform errors emitted on this {@link Single} into a different error.
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final Single<T> onErrorMap(Predicate<? super Throwable> predicate,
Function<? super Throwable, ? extends Throwable> mapper) {
return new OnErrorMapSingle<>(this, predicate, mapper);
}
/**
* Recover from any error emitted by this {@link Single} by using another {@link Single} provided by the
* passed {@code nextFactory}.
* <p>
* This method provides similar capabilities to a try/catch block in sequential programming:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch (Throwable cause) {
* // Note that nextFactory returning a error Single is like re-throwing (nextFactory shouldn't throw).
* result = nextFactory.apply(cause);
* }
* return result;
* }</pre>
* @param nextFactory Returns the next {@link Single}, when this {@link Single} emits an error.
* @return A {@link Single} that recovers from an error from this {@link Single} by using another
* {@link Single} provided by the passed {@code nextFactory}.
*/
public final Single<T> onErrorResume(Function<? super Throwable, ? extends Single<? extends T>> nextFactory) {
return onErrorResume(t -> true, nextFactory);
}
/**
* Recover from errors emitted by this {@link Single} which match {@code type} by using another {@link Single}
* provided by the passed {@code nextFactory}.
* <p>
* This method provides similar capabilities to a try/catch block in sequential programming:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch (Throwable cause) {
* if (type.isInstance(cause)) {
* // Note that nextFactory returning a error Single is like re-throwing (nextFactory shouldn't throw).
* result = nextFactory.apply(cause);
* } else {
* throw cause;
* }
* }
* return result;
* }</pre>
*
* @param type The {@link Throwable} type to filter, operator will not apply for errors which don't match this type.
* @param nextFactory Returns the next {@link Single}, when this {@link Single} emits an error.
* @param <E> The type of {@link Throwable} to transform.
* @return A {@link Single} that recovers from an error from this {@link Single} by using another
* {@link Single} provided by the passed {@code nextFactory}.
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final <E extends Throwable> Single<T> onErrorResume(
Class<E> type, Function<? super E, ? extends Single<? extends T>> nextFactory) {
@SuppressWarnings("unchecked")
Function<Throwable, ? extends Single<? extends T>> rawNextFactory =
(Function<Throwable, ? extends Single<? extends T>>) nextFactory;
return onErrorResume(type::isInstance, rawNextFactory);
}
/**
* Recover from errors emitted by this {@link Single} which match {@code predicate} by using another
* {@link Single} provided by the passed {@code nextFactory}.
* <p>
* This method provides similar capabilities to a try/catch block in sequential programming:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch (Throwable cause) {
* if (predicate.test(cause)) {
* // Note that nextFactory returning a error Single is like re-throwing (nextFactory shouldn't throw).
* result = nextFactory.apply(cause);
* } else {
* throw cause;
* }
* }
* return result;
* }</pre>
*
* @param predicate returns {@code true} if the {@link Throwable} should be transformed via {@code nextFactory}.
* Returns {@code false} to propagate the original error.
* @param nextFactory Returns the next {@link Single}, when this {@link Single} emits an error.
* @return A {@link Single} that recovers from an error from this {@link Single} by using another
* {@link Single} provided by the passed {@code nextFactory}.
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX catch operator.</a>
*/
public final Single<T> onErrorResume(Predicate<? super Throwable> predicate,
Function<? super Throwable, ? extends Single<? extends T>> nextFactory) {
return new OnErrorResumeSingle<>(this, predicate, nextFactory);
}
/**
* Returns a {@link Single} that mirrors emissions from the {@link Single} returned by {@code next}.
* Any error emitted by this {@link Single} is forwarded to the returned {@link Single}.
* <p>
* This method is similar to {@link #map(Function)} but the result is asynchronous, and provides a data
* transformation in sequential programming similar to:
* <pre>{@code
* T tResult = resultOfThisSingle();
* R rResult = mapper.apply(tResult); // Asynchronous result is flatten into a value by this operator.
* }</pre>
* @param next Function to give the next {@link Single}.
* @param <R> Type of the result of the resulting {@link Single}.
* @return New {@link Single} that switches to the {@link Single} returned by {@code next} after this {@link Single}
* completes successfully.
*/
public final <R> Single<R> flatMap(Function<? super T, ? extends Single<? extends R>> next) {
return new SingleFlatMapSingle<>(this, next);
}
/**
* Returns a {@link Completable} that mirrors emissions from the {@link Completable} returned by {@code next}.
* Any error emitted by this {@link Single} is forwarded to the returned {@link Completable}.
* <p>
* This method is similar to {@link #map(Function)} but the result is asynchronous with either complete/error status
* in sequential programming similar to:
* <pre>{@code
* T tResult = resultOfThisSingle();
* mapper.apply(tResult); // Asynchronous result is flatten into a error or completion by this operator.
* }</pre>
* @param next Function to give the next {@link Completable}.
* @return New {@link Completable} that switches to the {@link Completable} returned by {@code next} after this
* {@link Single} completes successfully.
*/
public final Completable flatMapCompletable(Function<? super T, ? extends Completable> next) {
return new SingleFlatMapCompletable<>(this, next);
}
/**
* Returns a {@link Publisher} that mirrors emissions from the {@link Publisher} returned by {@code next}.
* Any error emitted by this {@link Single} is forwarded to the returned {@link Publisher}.
* <p>
* This method is similar to {@link #map(Function)} but the result is asynchronous, and provides a data
* transformation in sequential programming similar to:
* <pre>{@code
* T tResult = resultOfThisSingle();
* // Asynchronous result from mapper is flatten into collection of values.
* for (R rResult : mapper.apply(tResult)) {
* // process rResult
* }
* }</pre>
* @param next Function to give the next {@link Publisher}.
* @param <R> Type of objects emitted by the returned {@link Publisher}.
* @return New {@link Publisher} that switches to the {@link Publisher} returned by {@code next} after this
* {@link Single} completes successfully.
*/
public final <R> Publisher<R> flatMapPublisher(Function<? super T, ? extends Publisher<? extends R>> next) {
return new SingleFlatMapPublisher<>(this, next);
}
/**
* Invokes the {@code onSuccess} {@link Consumer} argument when {@link Subscriber#onSuccess(Object)} is called for
* {@link Subscriber}s of the returned {@link Single}.
* <p>
* The order in which {@code onSuccess} will be invoked relative to {@link Subscriber#onSuccess(Object)} is
* undefined. If you need strict ordering see {@link #beforeOnSuccess(Consumer)} and
* {@link #afterOnSuccess(Consumer)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result = resultOfThisSingle();
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(result);
* onSuccess.accept(result);
* }</pre>
* @param onSuccess Invoked when {@link Subscriber#onSuccess(Object)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see #beforeOnSuccess(Consumer)
* @see #afterOnSuccess(Consumer)
*/
public final Single<T> whenOnSuccess(Consumer<? super T> onSuccess) {
return beforeOnSuccess(onSuccess);
}
/**
* Invokes the {@code onError} {@link Consumer} argument when {@link Subscriber#onError(Throwable)} is called for
* {@link Subscriber}s of the returned {@link Single}.
* <p>
* The order in which {@code onError} will be invoked relative to {@link Subscriber#onError(Throwable)} is
* undefined. If you need strict ordering see {@link #beforeOnError(Consumer)} and
* {@link #afterOnError(Consumer)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } catch (Throwable cause) {
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(cause);
* onError.accept(cause);
* }
* }</pre>
* @param onError Invoked when {@link Subscriber#onError(Throwable)} is called for {@link Subscriber}s of the
* returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see #beforeOnError(Consumer)
* @see #afterOnError(Consumer)
*/
public final Single<T> whenOnError(Consumer<Throwable> onError) {
return beforeOnError(onError);
}
/**
* Invokes the {@code whenFinally} {@link Runnable} argument exactly once, when any of the following terminal
* methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* The order in which {@code whenFinally} will be invoked relative to the above methods is undefined. If you need
* strict ordering see {@link #beforeFinally(Runnable)} and {@link #afterFinally(Runnable)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } finally {
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.run();
* }
* }</pre>
*
* @param doFinally Invoked exactly once, when any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see #beforeFinally(Runnable)
* @see #afterFinally(Runnable)
*/
public final Single<T> whenFinally(Runnable doFinally) {
return beforeFinally(doFinally);
}
/**
* Invokes the corresponding method on {@code whenFinally} {@link TerminalSignalConsumer} argument when any
* of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link TerminalSignalConsumer#onComplete()}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link TerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link TerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* The order in which {@code whenFinally} will be invoked relative to the above methods is undefined. If you need
* strict ordering see {@link #beforeFinally(TerminalSignalConsumer)} and
* {@link #afterFinally(TerminalSignalConsumer)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onError(t);
* return;
* }
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onComplete();
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link TerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see #beforeFinally(TerminalSignalConsumer)
* @see #afterFinally(TerminalSignalConsumer)
*/
public final Single<T> whenFinally(TerminalSignalConsumer doFinally) {
return beforeFinally(doFinally);
}
/**
* Invokes the corresponding method on {@code whenFinally} {@link SingleTerminalSignalConsumer} argument when any
* of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link SingleTerminalSignalConsumer#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link SingleTerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link SingleTerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* The order in which {@code whenFinally} will be invoked relative to the above methods is undefined. If you need
* strict ordering see {@link #beforeFinally(SingleTerminalSignalConsumer)} and
* {@link #afterFinally(SingleTerminalSignalConsumer)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onError(t);
* return;
* }
* // NOTE: The order of operations here is not guaranteed by this method!
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onSuccess(result);
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link SingleTerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see #beforeFinally(SingleTerminalSignalConsumer)
* @see #afterFinally(SingleTerminalSignalConsumer)
*/
public final Single<T> whenFinally(SingleTerminalSignalConsumer<? super T> doFinally) {
return beforeFinally(doFinally);
}
/**
* Invokes the {@code onCancel} {@link Runnable} argument when {@link Cancellable#cancel()} is called for
* Subscriptions of the returned {@link Single}.
* <p>
* The order in which {@code whenFinally} will be invoked relative to {@link Cancellable#cancel()} is undefined. If
* you need strict ordering see {@link #beforeCancel(Runnable)} and {@link #afterCancel(Runnable)}.
* @param onCancel Invoked when {@link Cancellable#cancel()} is called for Subscriptions of the
* returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see #beforeCancel(Runnable)
* @see #afterCancel(Runnable)
*/
public final Single<T> whenCancel(Runnable onCancel) {
return beforeCancel(onCancel);
}
/**
* Creates a new {@link Single} that will mimic the signals of this {@link Single} but will terminate
* with a {@link TimeoutException} if time {@code duration} elapses between subscribe and
* termination. The timer starts when the returned {@link Single} is subscribed.
* <p>
* In the event of timeout any {@link Cancellable} from {@link Subscriber#onSubscribe(Cancellable)} will be
* {@link Cancellable#cancel() cancelled} and the associated {@link Subscriber} will be
* {@link Subscriber#onError(Throwable) terminated}.
* @param duration The time duration which is allowed to elapse before {@link Subscriber#onSuccess(Object)}.
* @param unit The units for {@code duration}.
* @return a new {@link Single} that will mimic the signals of this {@link Single} but will terminate with a
* {@link TimeoutException} if time {@code duration} elapses before {@link Subscriber#onSuccess(Object)}.
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX timeout operator.</a>
*/
public final Single<T> timeout(long duration, TimeUnit unit) {
return timeout(duration, unit, global());
}
/**
* Creates a new {@link Single} that will mimic the signals of this {@link Single} but will terminate
* with a {@link TimeoutException} if time {@code duration} elapses between subscribe and
* termination. The timer starts when the returned {@link Single} is subscribed.
* <p>
* In the event of timeout any {@link Cancellable} from {@link Subscriber#onSubscribe(Cancellable)} will be
* {@link Cancellable#cancel() cancelled} and the associated {@link Subscriber} will be
* {@link Subscriber#onError(Throwable) terminated}.
* @param duration The time duration which is allowed to elapse before {@link Subscriber#onSuccess(Object)}.
* @param unit The units for {@code duration}.
* @param timeoutExecutor The {@link io.servicetalk.concurrent.Executor} to use for managing the timer
* notifications.
* @return a new {@link Single} that will mimic the signals of this {@link Single} but will terminate with a
* {@link TimeoutException} if time {@code duration} elapses before {@link Subscriber#onSuccess(Object)}.
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX timeout operator.</a>
*/
public final Single<T> timeout(long duration, TimeUnit unit,
io.servicetalk.concurrent.Executor timeoutExecutor) {
return new TimeoutSingle<>(this, duration, unit, timeoutExecutor);
}
/**
* Creates a new {@link Single} that will mimic the signals of this {@link Single} but will terminate
* with a {@link TimeoutException} if time {@code duration} elapses between subscribe and
* termination. The timer starts when the returned {@link Single} is subscribed.
* <p>
* In the event of timeout any {@link Cancellable} from {@link Subscriber#onSubscribe(Cancellable)} will be
* {@link Cancellable#cancel() cancelled} and the associated {@link Subscriber} will be
* {@link Subscriber#onError(Throwable) terminated}.
* {@link Subscriber} will via {@link Subscriber#onError(Throwable) terminated}.
* @param duration The time duration which is allowed to elapse before {@link Subscriber#onSuccess(Object)}.
* @return a new {@link Single} that will mimic the signals of this {@link Single} but will terminate with a
* {@link TimeoutException} if time {@code duration} elapses before {@link Subscriber#onSuccess(Object)}.
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX timeout operator.</a>
*/
public final Single<T> timeout(Duration duration) {
return timeout(duration, global());
}
/**
* Creates a new {@link Single} that will mimic the signals of this {@link Single} but will terminate with a
* with a {@link TimeoutException} if time {@code duration} elapses between subscribe and termination.
* The timer starts when the returned {@link Single} is subscribed.
* <p>
* In the event of timeout any {@link Cancellable} from {@link Subscriber#onSubscribe(Cancellable)} will be
* {@link Cancellable#cancel() cancelled} and the associated {@link Subscriber} will be
* {@link Subscriber#onError(Throwable) terminated}.
* @param duration The time duration which is allowed to elapse before {@link Subscriber#onSuccess(Object)}.
* @param timeoutExecutor The {@link io.servicetalk.concurrent.Executor} to use for managing the timer
* notifications.
* @return a new {@link Single} that will mimic the signals of this {@link Single} but will terminate with a
* {@link TimeoutException} if time {@code duration} elapses before {@link Subscriber#onSuccess(Object)}.
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX timeout operator.</a>
*/
public final Single<T> timeout(Duration duration, io.servicetalk.concurrent.Executor timeoutExecutor) {
return new TimeoutSingle<>(this, duration, timeoutExecutor);
}
/**
* Returns a {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits
* result of {@code next} {@link Single}. Any error emitted by this {@link Single} or {@code next} {@link Single} is
* forwarded to the returned {@link Publisher}.
* <p>
* This method provides a means to sequence the execution of two asynchronous sources and in sequential programming
* is similar to:
* <pre>{@code
* Pair<T, T> p = new Pair<>();
* p.first = resultOfThisSingle();
* p.second = nextSingle();
* return p;
* }</pre>
* @param next {@link Single} to concat.
* @return New {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits
* result of {@code next} {@link Single}.
*/
public final Publisher<T> concat(Single<? extends T> next) {
return toPublisher().concat(next);
}
/**
* Returns a {@link Single} that emits the result of this {@link Single} after {@code next} {@link Completable}
* terminates successfully.
* {@code next} {@link Completable} will only be subscribed to after this {@link Single} terminates successfully.
* Any error emitted by this {@link Single} or {@code next} {@link Completable} is forwarded to the returned
* {@link Single}.
* <p>
* This method provides a means to sequence the execution of two asynchronous sources and in sequential programming
* is similar to:
* <pre>{@code
* T result = resultOfThisSingle();
* nextCompletable(); // Note this either completes successfully, or throws an error.
* return result;
* }</pre>
* @param next {@link Completable} to concat.
* @return New {@link Single} that emits the result of this {@link Single} after {@code next} {@link Completable}
* terminates successfully.
*/
public final Single<T> concat(Completable next) {
return new SingleConcatWithCompletable<>(this, next);
}
/**
* Returns a {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits all
* elements from {@code next} {@link Publisher}. Any error emitted by this {@link Single} or {@code next}
* {@link Publisher} is forwarded to the returned {@link Publisher}.
* <p>
* Note: this method is an overload for {@link #concat(Publisher, boolean)} with {@code deferSubscribe} equal to
* {@code false}, which triggers subscribe to the {@code next} {@link Publisher} as soon as {@code this}
* {@link Single} completes successfully.
* <p>
* This method provides a means to sequence the execution of two asynchronous sources and in sequential programming
* is similar to:
* <pre>{@code
* List<T> results = new ...;
* results.add(resultOfThisSingle());
* results.addAll(nextStream());
* return results;
* }</pre>
* @param next {@link Publisher} to concat.
* @return New {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits
* all elements from {@code next} {@link Publisher}.
* @see #concat(Publisher, boolean)
*/
public final Publisher<T> concat(Publisher<? extends T> next) {
return new SingleConcatWithPublisher<>(this, next, false);
}
/**
* Returns a {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits all
* elements from {@code next} {@link Publisher}. Any error emitted by this {@link Single} or {@code next}
* {@link Publisher} is forwarded to the returned {@link Publisher}.
* <p>
* This method provides a means to sequence the execution of two asynchronous sources and in sequential programming
* is similar to:
* <pre>{@code
* List<T> results = new ...;
* results.add(resultOfThisSingle());
* results.addAll(nextStream());
* return results;
* }</pre>
* @param next {@link Publisher} to concat.
* @param deferSubscribe if {@code true} subscribe to the {@code next} {@link Publisher} will be deferred until
* demand is received. Otherwise, it subscribes to the {@code next} {@link Publisher} as soon as {@code this}
* {@link Single} completes successfully. Choosing the deferred ({@code true}) behavior is important if the
* {@code next} {@link Publisher} does not or might not support multiple subscribers (non-replayable). Choosing the
* immediate subscribe ({@code false}) behavior may have better performance and may be a preferable choice for
* replayable {@link Publisher}(s) or when eager subscribe is beneficial.
* @return New {@link Publisher} that first emits the result of this {@link Single} and then subscribes and emits
* all elements from {@code next} {@link Publisher}.
*/
public final Publisher<T> concat(Publisher<? extends T> next, boolean deferSubscribe) {
return new SingleConcatWithPublisher<>(this, next, deferSubscribe);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code this} and {@code other}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T> f1 = ...; // this
* CompletableFuture<T2> other = ...;
* CompletableFuture.allOf(f1, other).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), other.get());
* }</pre>
* @param other The other {@link Single} to zip with.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T2> The type of {@code other}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code this} and {@code other}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public final <T2, R> Single<R> zipWith(Single<? extends T2> other,
BiFunction<? super T, ? super T2, ? extends R> zipper) {
return zip(this, other, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code this} and {@code other}. If any of the {@link Single}s terminate with an error, the returned
* {@link Single} will wait for termination till all the other {@link Single}s have been subscribed and terminated,
* and then terminate with the first error.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T> f1 = ...; // this
* CompletableFuture<T2> other = ...;
* CompletableFuture.allOf(f1, other).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), other.get());
* }</pre>
* @param other The other {@link Single} to zip with.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T2> The type of {@code other}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code this} and {@code other}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public final <T2, R> Single<R> zipWithDelayError(Single<? extends T2> other,
BiFunction<? super T, ? super T2, ? extends R> zipper) {
return zipDelayError(this, other, zipper);
}
/**
* Re-subscribes to this {@link Single} if an error is emitted and the passed {@link BiIntPredicate} returns
* {@code true}.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code retryWhen((i, cause) -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to retry an operation under certain failure conditions and in sequential programming
* is similar to:
* <pre>{@code
* public T execute() {
* return execute(0);
* }
*
* private T execute(int attempts) {
* try {
* return resultOfThisSingle();
* } catch (Throwable cause) {
* if (shouldRetry.apply(attempts + 1, cause)) {
* return execute(attempts + 1);
* } else {
* throw cause;
* }
* }
* }
* }</pre>
* @param shouldRetry {@link BiIntPredicate} that given the retry count and the most recent {@link Throwable}
* emitted from this
* {@link Single} determines if the operation should be retried.
* @return A {@link Single} that emits the result from this {@link Single} or re-subscribes if an error is emitted
* and if the passed {@link BiIntPredicate} returned {@code true}.
*
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX retry operator.</a>
*/
public final Single<T> retry(BiIntPredicate<Throwable> shouldRetry) {
return new RetrySingle<>(this, shouldRetry);
}
/**
* Re-subscribes to this {@link Single} if an error is emitted and the {@link Completable} returned by the supplied
* {@link BiIntFunction} completes successfully. If the returned {@link Completable} emits an error, the returned
* {@link Single} terminates with that error.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code retryWhen((i, cause) -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to retry an operation under certain failure conditions in an asynchronous fashion
* and in sequential programming is similar to:
* <pre>{@code
* public T execute() {
* return execute(0);
* }
*
* private T execute(int attempts) {
* try {
* return resultOfThisSingle();
* } catch (Throwable cause) {
* try {
* shouldRetry.apply(attempts + 1, cause); // Either throws or completes normally
* execute(attempts + 1);
* } catch (Throwable ignored) {
* throw cause;
* }
* }
* }
* }</pre>
* @param retryWhen {@link BiIntFunction} that given the retry count and the most recent {@link Throwable} emitted
* from this {@link Single} returns a {@link Completable}. If this {@link Completable} emits an error, that error is
* emitted from the returned {@link Single}, otherwise, original {@link Single} is re-subscribed when this
* {@link Completable} completes.
*
* @return A {@link Single} that emits the result from this {@link Single} or re-subscribes if an error is emitted
* and {@link Completable} returned by {@link BiIntFunction} completes successfully.
*
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX retry operator.</a>
*/
public final Single<T> retryWhen(BiIntFunction<Throwable, ? extends Completable> retryWhen) {
return new RetryWhenSingle<>(this, retryWhen);
}
/**
* Re-subscribes to this {@link Single} when it completes and the passed {@link IntPredicate} returns {@code true}.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code repeatWhen(i -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to repeat an operation multiple times and in sequential programming is similar to:
* <pre>{@code
* List<T> results = new ...;
* int i = 0;
* do {
* results.add(resultOfThisSingle());
* } while (shouldRepeat.test(++i));
* return results;
* }</pre>
* @param shouldRepeat {@link IntPredicate} that given the repetition count returns {@code true} if the operation
* should be repeated.
* @return A {@link Publisher} that emits all items from this {@link Single} and from all re-subscriptions whenever
* the operation is repeated.
*
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX repeat operator.</a>
*/
public final Publisher<T> repeat(IntPredicate shouldRepeat) {
return repeatWhen((i, __) -> shouldRepeat.test(i) ? Completable.completed() : END_REPEAT_COMPLETABLE);
}
/**
* Re-subscribes to this {@link Single} when it completes and the passed {@link IntPredicate} returns {@code true}.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code repeatWhen(i -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to repeat an operation multiple times and in sequential programming is similar to:
* <pre>{@code
* List<T> results = new ...;
* int i = 0;
* T result;
* do {
* result = resultOfThisSingle();
* results.add(result);
* } while (shouldRepeat.test(++i, result));
* return results;
* }</pre>
* @param shouldRepeat {@link BiIntPredicate} that given the repetition count and value from the current iteration
* returns {@code true} if the operation should be repeated.
* @return A {@link Publisher} that emits all items from this {@link Single} and from all re-subscriptions whenever
* the operation is repeated.
*
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX repeat operator.</a>
*/
public final Publisher<T> repeat(BiIntPredicate<? super T> shouldRepeat) {
return repeatWhen((i, t) -> shouldRepeat.test(i, t) ? Completable.completed() : END_REPEAT_COMPLETABLE);
}
/**
* Re-subscribes to this {@link Single} when it completes and the {@link Completable} returned by the supplied
* {@link IntFunction} completes successfully. If the returned {@link Completable} emits an error, the returned
* {@link Single} emits an error.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code repeatWhen(i -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to repeat an operation multiple times when in an asynchronous fashion and in
* sequential programming is similar to:
* <pre>{@code
* List<T> results = new ...;
* int i = 0;
* while (true) {
* results.add(resultOfThisSingle());
* try {
* repeatWhen.apply(++i); // Either throws or completes normally
* } catch (Throwable cause) {
* break;
* }
* }
* return results;
* }</pre>
* @param repeatWhen {@link IntFunction} that given the repetition count returns a {@link Completable}.
* If this {@link Completable} emits an error repeat is terminated, otherwise, original {@link Single} is
* re-subscribed when this {@link Completable} completes.
*
* @return A {@link Publisher} that emits all items from this {@link Single} and from all re-subscriptions whenever
* the operation is repeated.
*
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX retry operator.</a>
*/
public final Publisher<T> repeatWhen(IntFunction<? extends Completable> repeatWhen) {
return repeatWhen((i, __) -> repeatWhen.apply(i));
}
/**
* Re-subscribes to this {@link Single} when it completes and the {@link Completable} returned by the supplied
* {@link BiIntFunction} completes successfully.
* <pre>
* This method may result in a {@link StackOverflowError} if too many consecutive calls are made. This can be
* avoided by trampolining the call stack onto an {@link Executor}. For example:
* {@code repeatWhen(i -> i % 10 == 0 ? executor.submit(() -> { }) : Completable.completed())}
* </pre>
* This method provides a means to repeat an operation multiple times when in an asynchronous fashion and in
* sequential programming is similar to:
* <pre>{@code
* List<T> results = new ...;
* int i = 0;
* while (true) {
* T result = resultOfThisSingle();
* try {
* repeatWhen.apply(++i, result); // Either throws or completes normally
* } catch (Throwable cause) {
* break;
* }
* }
* return results;
* }</pre>
* @param repeatWhen {@link BiIntFunction} that given the repetition count and value from the current iteration
* returns a {@link Completable}. If this {@link Completable} emits an error repeat is terminated, otherwise,
* original {@link Single} is re-subscribed when this {@link Completable} completes.
*
* @return A {@link Publisher} that emits all items from this {@link Single} and from all re-subscriptions whenever
* the operation is repeated.
*
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX retry operator.</a>
*/
public final Publisher<T> repeatWhen(BiIntFunction<? super T, ? extends Completable> repeatWhen) {
return new RepeatWhenSingle<>(this, repeatWhen);
}
/**
* Invokes the {@code onSubscribe} {@link Consumer} argument <strong>before</strong>
* {@link Subscriber#onSubscribe(Cancellable)} is called for {@link Subscriber}s of the returned {@link Single}.
*
* @param onSubscribe Invoked <strong>before</strong> {@link Subscriber#onSubscribe(Cancellable)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> beforeOnSubscribe(Consumer<Cancellable> onSubscribe) {
return beforeSubscriber(doOnSubscribeSupplier(onSubscribe));
}
/**
* Invokes the {@code onSuccess} {@link Consumer} argument <strong>before</strong>
* {@link Subscriber#onSuccess(Object)} is called for {@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result = resultOfThisSingle();
* onSuccess.accept(result);
* nextOperation(result);
* }</pre>
* @param onSuccess Invoked <strong>before</strong> {@link Subscriber#onSuccess(Object)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> beforeOnSuccess(Consumer<? super T> onSuccess) {
return beforeSubscriber(doOnSuccessSupplier(onSuccess));
}
/**
* Invokes the {@code onError} {@link Consumer} argument <strong>before</strong>
* {@link Subscriber#onError(Throwable)} is called for {@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } catch (Throwable cause) {
* onError.accept(cause);
* nextOperation(cause);
* }
* }</pre>
* @param onError Invoked <strong>before</strong> {@link Subscriber#onError(Throwable)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> beforeOnError(Consumer<Throwable> onError) {
return beforeSubscriber(doOnErrorSupplier(onError));
}
/**
* Invokes the {@code onCancel} {@link Runnable} argument <strong>before</strong> {@link Cancellable#cancel()} is
* called for Subscriptions of the returned {@link Single}.
*
* @param onCancel Invoked <strong>before</strong> {@link Cancellable#cancel()} is called for Subscriptions of the
* returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> beforeCancel(Runnable onCancel) {
return new WhenCancellableSingle<>(this, onCancel::run, true);
}
/**
* Invokes the {@code whenFinally} {@link Runnable} argument <strong>before</strong> any of the following terminal
* methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } finally {
* doFinally.run();
* nextOperation(); // Maybe notifying of cancellation, or termination
* }
* }</pre>
*
* @param doFinally Invoked <strong>before</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> beforeFinally(Runnable doFinally) {
return beforeFinally(new RunnableSingleTerminalSignalConsumer<>(doFinally));
}
/**
* Invokes the corresponding method on {@code beforeFinally} {@link TerminalSignalConsumer} argument
* <strong>before</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link TerminalSignalConsumer#onComplete()}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link TerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link TerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* doFinally.onError(t);
* nextOperation(); // Maybe notifying of cancellation, or termination
* return;
* }
* doFinally.onComplete();
* nextOperation(); // Maybe notifying of cancellation, or termination
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link TerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> beforeFinally(TerminalSignalConsumer doFinally) {
return new BeforeFinallySingle<>(this, new TerminalSingleTerminalSignalConsumer<>(doFinally));
}
/**
* Invokes the corresponding method on {@code beforeFinally} {@link SingleTerminalSignalConsumer} argument
* <strong>before</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link SingleTerminalSignalConsumer#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link SingleTerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link SingleTerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* doFinally.onError(t);
* nextOperation(); // Maybe notifying of cancellation, or termination
* return;
* }
* doFinally.onSuccess(result);
* nextOperation(); // Maybe notifying of cancellation, or termination
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link SingleTerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> beforeFinally(SingleTerminalSignalConsumer<? super T> doFinally) {
return new BeforeFinallySingle<>(this, doFinally);
}
/**
* Creates a new {@link Subscriber} (via the {@code subscriberSupplier} argument) on each call to subscribe and
* invokes all the {@link Subscriber} methods <strong>before</strong> the {@link Subscriber}s of the returned
* {@link Single}.
*
* @param subscriberSupplier Creates a new {@link Subscriber} on each call to subscribe and invokes all the
* {@link Subscriber} methods <strong>before</strong> the {@link Subscriber}s of the returned {@link Single}.
* {@link Subscriber} methods <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> beforeSubscriber(Supplier<? extends Subscriber<? super T>> subscriberSupplier) {
return new BeforeSubscriberSingle<>(this, subscriberSupplier);
}
/**
* Invokes the {@code onSubscribe} {@link Consumer} argument <strong>after</strong>
* {@link Subscriber#onSubscribe(Cancellable)} is called for {@link Subscriber}s of the returned {@link Single}.
*
* @param onSubscribe Invoked <strong>after</strong> {@link Subscriber#onSubscribe(Cancellable)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> afterOnSubscribe(Consumer<Cancellable> onSubscribe) {
return afterSubscriber(doOnSubscribeSupplier(onSubscribe));
}
/**
* Invokes the {@code onSubscribe} {@link Consumer} argument when
* {@link Subscriber#onSubscribe(Cancellable)} is called for {@link Subscriber}s of the returned {@link Single}.
*
* <p>
* The order in which {@code onSubscribe} will be invoked relative to
* {@link Subscriber#onSubscribe(Cancellable)} is undefined. If you need strict ordering see
* {@link #beforeOnSubscribe(Consumer)} and {@link #afterOnSubscribe(Consumer)}.
*
* @param onSubscribe Invoked when {@link Subscriber#onSubscribe(Cancellable)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*
* @see #beforeOnSubscribe(Consumer)
* @see #afterOnSubscribe(Consumer)
*/
public final Single<T> whenOnSubscribe(Consumer<Cancellable> onSubscribe) {
return beforeOnSubscribe(onSubscribe);
}
/**
* Invokes the {@code onSuccess} {@link Consumer} argument <strong>after</strong>
* {@link Subscriber#onSuccess(Object)} is called for {@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result = resultOfThisSingle();
* nextOperation(result);
* onSuccess.accept(result);
* }</pre>
* @param onSuccess Invoked <strong>after</strong> {@link Subscriber#onSuccess(Object)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> afterOnSuccess(Consumer<? super T> onSuccess) {
return afterSubscriber(doOnSuccessSupplier(onSuccess));
}
/**
* Invokes the {@code onError} {@link Consumer} argument <strong>after</strong>
* {@link Subscriber#onError(Throwable)} is called for {@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } catch (Throwable cause) {
* nextOperation(cause);
* onError.accept(cause);
* }
* }</pre>
* @param onError Invoked <strong>after</strong> {@link Subscriber#onError(Throwable)} is called for
* {@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> afterOnError(Consumer<Throwable> onError) {
return afterSubscriber(doOnErrorSupplier(onError));
}
/**
* Invokes the {@code onCancel} {@link Runnable} argument <strong>after</strong> {@link Cancellable#cancel()} is
* called for Subscriptions of the returned {@link Single}.
*
* @param onCancel Invoked <strong>after</strong> {@link Cancellable#cancel()} is called for Subscriptions of the
* returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> afterCancel(Runnable onCancel) {
return new WhenCancellableSingle<>(this, onCancel::run, false);
}
/**
* Invokes the {@code whenFinally} {@link Runnable} argument <strong>after</strong> any of the following terminal
* methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* try {
* T result = resultOfThisSingle();
* } finally {
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.run();
* }
* }</pre>
*
* @param doFinally Invoked <strong>after</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}. <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> afterFinally(Runnable doFinally) {
return afterFinally(new RunnableSingleTerminalSignalConsumer<>(doFinally));
}
/**
* Invokes the corresponding method on {@code afterFinally} {@link TerminalSignalConsumer} argument
* <strong>after</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link TerminalSignalConsumer#onComplete()}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link TerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link TerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onError(t);
* return;
* }
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onComplete();
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link TerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> afterFinally(TerminalSignalConsumer doFinally) {
return new AfterFinallySingle<>(this, new TerminalSingleTerminalSignalConsumer<>(doFinally));
}
/**
* Invokes the corresponding method on {@code afterFinally} {@link SingleTerminalSignalConsumer} argument
* <strong>after</strong> any of the following terminal methods are called:
* <ul>
* <li>{@link Subscriber#onSuccess(Object)} - invokes
* {@link SingleTerminalSignalConsumer#onSuccess(Object)}</li>
* <li>{@link Subscriber#onError(Throwable)} - invokes
* {@link SingleTerminalSignalConsumer#onError(Throwable)}</li>
* <li>{@link Cancellable#cancel()} - invokes {@link SingleTerminalSignalConsumer#cancel()}</li>
* </ul>
* for Subscriptions/{@link Subscriber}s of the returned {@link Single}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* T result;
* try {
* result = resultOfThisSingle();
* } catch(Throwable t) {
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onError(t);
* return;
* }
* nextOperation(); // Maybe notifying of cancellation, or termination
* doFinally.onSuccess(result);
* }</pre>
*
* @param doFinally For each subscribe of the returned {@link Single}, at most one method of this
* {@link SingleTerminalSignalConsumer} will be invoked.
* @return The new {@link Single}.
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX do operator.</a>
*/
public final Single<T> afterFinally(SingleTerminalSignalConsumer<? super T> doFinally) {
return new AfterFinallySingle<>(this, doFinally);
}
/**
* Creates a new {@link Subscriber} (via the {@code subscriberSupplier} argument) on each call to subscribe and
* invokes all the {@link Subscriber} methods <strong>after</strong> the {@link Subscriber}s of the returned
* {@link Single}.
*
* @param subscriberSupplier Creates a new {@link Subscriber} on each call to subscribe and invokes all the
* {@link Subscriber} methods <strong>after</strong> the {@link Subscriber}s of the returned {@link Single}.
* {@link Subscriber} methods <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> afterSubscriber(Supplier<? extends Subscriber<? super T>> subscriberSupplier) {
return new AfterSubscriberSingle<>(this, subscriberSupplier);
}
/**
* Creates a new {@link Subscriber} (via the {@code subscriberSupplier} argument) for each new subscribe and
* invokes methods on that {@link Subscriber} when the corresponding methods are called for {@link Subscriber}s of
* the returned {@link Single}.
*
* @param subscriberSupplier Creates a new {@link Subscriber} for each new subscribe and invokes methods on that
* {@link Subscriber} when the corresponding methods are called for {@link Subscriber}s of the returned
* {@link Single}. {@link Subscriber} methods <strong>MUST NOT</strong> throw.
* @return The new {@link Single}.
*/
public final Single<T> whenSubscriber(Supplier<? extends Subscriber<? super T>> subscriberSupplier) {
return beforeSubscriber(subscriberSupplier);
}
/**
* Creates a new {@link Single} that will use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* {@link Subscriber} methods. This method does <strong>not</strong> override preceding {@link Executor}s, if any,
* specified for {@code this} {@link Single}. Only subsequent operations, if any, added in this execution chain will
* use this {@link io.servicetalk.concurrent.Executor}.
* <p>
* Note: unlike {@link #publishOn(io.servicetalk.concurrent.Executor, BooleanSupplier)}, current operator always
* enforces offloading to the passed {@link io.servicetalk.concurrent.Executor}.
*
* @param executor {@link io.servicetalk.concurrent.Executor} to use.
* @return A new {@link Single} that will use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* {@link Subscriber} methods.
* @see #publishOn(io.servicetalk.concurrent.Executor, BooleanSupplier)
*/
public final Single<T> publishOn(io.servicetalk.concurrent.Executor executor) {
return PublishAndSubscribeOnSingles.publishOn(this, Boolean.TRUE::booleanValue, executor);
}
/**
* Creates a new {@link Single} that may use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* {@link Subscriber} methods.
* This method does <strong>not</strong> override preceding {@link io.servicetalk.concurrent.Executor}s, if any,
* specified for {@code this} {@link Single}. Only subsequent operations, if any, added in this execution chain will
* use this {@link io.servicetalk.concurrent.Executor}.
* <p>
* Note: unlike {@link #publishOn(io.servicetalk.concurrent.Executor)}, current operator may skip offloading to the
* passed {@link io.servicetalk.concurrent.Executor},
* depending on the result of the {@link BooleanSupplier} hint.
*
* @param executor {@link io.servicetalk.concurrent.Executor} to use.
* @param shouldOffload Provides a hint whether offloading to the executor can be omitted or not. Offloading may
* still occur even if {@code false} is returned in order to preserve signal ordering.
* @return A new {@link Single} that may use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* {@link Subscriber} methods.
* @see #publishOn(io.servicetalk.concurrent.Executor)
*/
public final Single<T> publishOn(io.servicetalk.concurrent.Executor executor, BooleanSupplier shouldOffload) {
return PublishAndSubscribeOnSingles.publishOn(this, shouldOffload, executor);
}
/**
* Creates a new {@link Single} that will use the passed {@link io.servicetalk.concurrent.Executor} to invoke the
* following methods:
* <ul>
* <li>All {@link Cancellable} methods.</li>
* <li>The {@link #handleSubscribe(SingleSource.Subscriber)} method.</li>
* </ul>
* This method does <strong>not</strong> override preceding {@link io.servicetalk.concurrent.Executor}s, if any,
* specified for {@code this} {@link Single}. Only subsequent operations, if any, added in this execution chain will
* use this {@link io.servicetalk.concurrent.Executor}.
* <p>
* Note: unlike {@link #subscribeOn(io.servicetalk.concurrent.Executor, BooleanSupplier)}, current operator always
* enforces offloading to the passed {@link io.servicetalk.concurrent.Executor}.
*
* @param executor {@link io.servicetalk.concurrent.Executor} to use.
* @return A new {@link Single} that will use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* methods of {@link Cancellable} and {@link #handleSubscribe(SingleSource.Subscriber)}.
* @see #subscribeOn(io.servicetalk.concurrent.Executor, BooleanSupplier)
*/
public final Single<T> subscribeOn(io.servicetalk.concurrent.Executor executor) {
return PublishAndSubscribeOnSingles.subscribeOn(this, Boolean.TRUE::booleanValue, executor);
}
/**
* Creates a new {@link Single} that may use the passed {@link io.servicetalk.concurrent.Executor} to invoke the
* following methods:
* <ul>
* <li>All {@link Cancellable} methods.</li>
* <li>The {@link #handleSubscribe(SingleSource.Subscriber)} method.</li>
* </ul>
* This method does <strong>not</strong> override preceding {@link io.servicetalk.concurrent.Executor}s, if any,
* specified for {@code this} {@link Single}. Only subsequent operations, if any, added in this execution chain will
* use this {@link io.servicetalk.concurrent.Executor}.
* <p>
* Note: unlike {@link #subscribeOn(io.servicetalk.concurrent.Executor)}, current operator may skip offloading to
* the passed {@link io.servicetalk.concurrent.Executor}, depending on the result of the {@link BooleanSupplier}
* hint.
*
* @param executor {@link io.servicetalk.concurrent.Executor} to use.
* @param shouldOffload Provides a hint whether offloading to the executor can be omitted or not. Offloading may
* still occur even if {@code false} is returned in order to preserve signal ordering.
* @return A new {@link Single} that may use the passed {@link io.servicetalk.concurrent.Executor} to invoke all
* methods of {@link Cancellable} and {@link #handleSubscribe(SingleSource.Subscriber)}.
* @see #subscribeOn(io.servicetalk.concurrent.Executor)
*/
public final Single<T> subscribeOn(io.servicetalk.concurrent.Executor executor, BooleanSupplier shouldOffload) {
return PublishAndSubscribeOnSingles.subscribeOn(this, shouldOffload, executor);
}
/**
* Signifies that when the returned {@link Single} is subscribed to, the {@link AsyncContext} will be shared
* instead of making a {@link ContextMap#copy() copy}.
* <p>
* This operator only impacts behavior if the returned {@link Single} is subscribed directly after this operator,
* that means this must be the "last operator" in the chain for this to have an impact.
*
* @return A {@link Single} that will share the {@link AsyncContext} instead of making a
* {@link ContextMap#copy() copy} when subscribed to.
*/
public final Single<T> shareContextOnSubscribe() {
return new SingleShareContextOnSubscribe<>(this);
}
/**
* <strong>This method requires advanced knowledge of building operators. Before using this method please attempt
* to compose existing operator(s) to satisfy your use case.</strong>
* <p>
* Returns a {@link Single} which will wrap the {@link SingleSource.Subscriber} using the provided {@code operator}
* argument before subscribing to this {@link Single}.
* <pre>{@code
* Single<X> pub = ...;
* pub.map(..) // A
* .liftSync(original -> modified)
* .afterFinally(..) // B
* }</pre>
* The {@code original -> modified} "operator" <strong>MUST</strong> be "synchronous" in that it does not interact
* with the original {@link Subscriber} from outside the modified {@link Subscriber} or {@link Cancellable}
* threads. That is to say this operator will not impact the {@link Executor} constraints already in place between
* <i>A</i> and <i>B</i> above. If you need asynchronous behavior, or are unsure, see
* {@link #liftAsync(SingleOperator)}.
* @param operator The custom operator logic. The input is the "original" {@link Subscriber} to this
* {@link Single} and the return is the "modified" {@link Subscriber} that provides custom operator business
* logic.
* @param <R> Type of the items emitted by the returned {@link Single}.
* @return a {@link Single} which when subscribed, the {@code operator} argument will be used to wrap the
* {@link Subscriber} before subscribing to this {@link Single}.
* @see #liftAsync(SingleOperator)
*/
public final <R> Single<R> liftSync(SingleOperator<? super T, ? extends R> operator) {
return new LiftSynchronousSingleOperator<>(this, operator);
}
/**
* <strong>This method requires advanced knowledge of building operators. Before using this method please attempt
* to compose existing operator(s) to satisfy your use case.</strong>
* <p>
* Returns a {@link Single} which will wrap the {@link SingleSource.Subscriber} using the provided {@code operator}
* argument before subscribing to this {@link Single}.
* <pre>{@code
* Publisher<X> pub = ...;
* pub.map(..) // Aw
* .liftAsync(original -> modified)
* .afterFinally(..) // B
* }</pre>
* The {@code original -> modified} "operator" MAY be "asynchronous" in that it may interact with the original
* {@link Subscriber} from outside the modified {@link Subscriber} or {@link Cancellable} threads. More
* specifically:
* <ul>
* <li>all of the {@link Subscriber} invocations going "downstream" (i.e. from <i>A</i> to <i>B</i> above) MAY be
* offloaded via an {@link Executor}</li>
* <li>all of the {@link Cancellable} invocations going "upstream" (i.e. from <i>B</i> to <i>A</i> above) MAY be
* offloaded via an {@link Executor}</li>
* </ul>
* This behavior exists to prevent blocking code negatively impacting the thread that powers the upstream source of
* data (e.g. an EventLoop).
* @param operator The custom operator logic. The input is the "original" {@link Subscriber} to this
* {@link Single} and the return is the "modified" {@link Subscriber} that provides custom operator business
* logic.
* @param <R> Type of the items emitted by the returned {@link Single}.
* @return a {@link Single} which when subscribed, the {@code operator} argument will be used to wrap the
* {@link Subscriber} before subscribing to this {@link Single}.
* @see #liftSync(SingleOperator)
*/
public final <R> Single<R> liftAsync(SingleOperator<? super T, ? extends R> operator) {
return new LiftAsynchronousSingleOperator<>(this, operator);
}
/**
* Creates a new {@link Single} that terminates with the result (either success or error) of either this
* {@link Single} or the passed {@code other} {@link Single}, whichever terminates first. Therefore the result is
* said to be <strong>ambiguous</strong> relative to which source it originated from. After the first source
* terminates the non-terminated source will be cancelled.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator will pick the first result from either of the futures.
* return ft.get();
* }
* }</pre>
*
* @param other {@link Single} to subscribe to and race with this {@link Single} to propagate to the return value.
* @return A new {@link Single} that terminates with the result (either success or error) of either this
* {@link Single} or the passed {@code other} {@link Single}, whichever terminates first. Therefore the result is
* said to be <strong>ambiguous</strong> relative to which source it originated from.
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX amb operator.</a>
*/
public final Single<T> ambWith(final Single<T> other) {
return new SingleAmbWith<>(this, other);
}
//
// Operators End
//
//
// Conversion Operators Begin
//
/**
* Converts this {@code Single} to a {@link Publisher}.
*
* @return A {@link Publisher} that emits at most a single item which is emitted by this {@code Single}.
*/
public final Publisher<T> toPublisher() {
return new SingleToPublisher<>(this);
}
/**
* Ignores the result of this {@link Single} and forwards the termination signal to the returned
* {@link Completable}.
*
* @return A {@link Completable} that mirrors the terminal signal from this {@code Single}.
*/
public final Completable toCompletable() {
return new SingleToCompletable<>(this);
}
/**
* Ignores the result of this {@link Single} and forwards the termination signal to the returned
* {@link Completable}.
*
* @return A {@link Completable} that mirrors the terminal signal from this {@code Single}.
*/
public final Completable ignoreElement() {
return toCompletable();
}
/**
* Convert this {@link Single} to a {@link CompletionStage}.
*
* @return A {@link CompletionStage} that mirrors the terminal signal from this {@link Single}.
*/
public final CompletionStage<T> toCompletionStage() {
return SingleToCompletableFuture.createAndSubscribe(this);
}
/**
* Convert this {@link Single} to a {@link Future}.
*
* @return A {@link Future} that mirrors the terminal signal from this {@link Single}.
*/
public final Future<T> toFuture() {
return SingleToFuture.createAndSubscribe(this);
}
//
// Conversion Operators End
//
/**
* A internal subscribe method similar to {@link SingleSource#subscribe(Subscriber)} which can be used by
* different implementations to subscribe.
*
* @param subscriber {@link Subscriber} to subscribe for the result.
*/
protected final void subscribeInternal(Subscriber<? super T> subscriber) {
subscribeAndReturnContext(subscriber, AsyncContext.provider());
}
/**
* Subscribe to this {@link Single}, emits the result to the passed {@link Consumer} and log any
* {@link Subscriber#onError(Throwable)}.
*
* @param resultConsumer {@link Consumer} to accept the result of this {@link Single}.
*
* @return {@link Cancellable} used to invoke {@link Cancellable#cancel()} on the parameter of
* {@link Subscriber#onSubscribe(Cancellable)} for this {@link Single}.
*/
public final Cancellable subscribe(Consumer<? super T> resultConsumer) {
SimpleSingleSubscriber<T> subscriber = new SimpleSingleSubscriber<>(resultConsumer);
subscribeInternal(subscriber);
return subscriber;
}
/**
* Handles a subscriber to this {@link Single}.
*
* @param subscriber the subscriber.
*/
protected abstract void handleSubscribe(Subscriber<? super T> subscriber);
// <editor-fold desc="Static Utility Methods">
/**
* Creates a realized {@link Single} which always completes successfully with the provided {@code value}.
*
* @param value result of the {@link Single}.
* @param <T> Type of the {@link Single}.
*
* @return A new {@link Single}.
*/
public static <T> Single<T> succeeded(@Nullable T value) {
return new SucceededSingle<>(value);
}
/**
* Creates a {@link Single} which when subscribed will invoke {@link Callable#call()} on the passed
* {@link Callable} and emit the value returned by that invocation from the returned {@link Single}. Any error
* emitted by the {@link Callable} will terminate the returned {@link Single} with the same error.
* <p>
* Blocking inside {@link Callable#call()} will in turn block the subscribe call to the returned {@link Single}. If
* this behavior is undesirable then the returned {@link Single} should be offloaded using
* {@link #subscribeOn(io.servicetalk.concurrent.Executor)} which offloads the subscribe call.
*
* @param callable {@link Callable} which supplies the result of the {@link Single}.
* @param <T> Type of the {@link Single}.
*
* @return A new {@link Single}.
*/
public static <T> Single<T> fromCallable(final Callable<T> callable) {
return new CallableSingle<>(callable);
}
/**
* Creates a {@link Single} which when subscribed will invoke {@link Supplier#get()} on the passed
* {@link Supplier} and emit the value returned by that invocation from the returned {@link Single}. Any error
* emitted by the {@link Supplier} will terminate the returned {@link Single} with the same error.
* <p>
* Blocking inside {@link Supplier#get()} will in turn block the subscribe call to the returned {@link Single}. If
* this behavior is undesirable then the returned {@link Single} should be offloaded using
* {@link #subscribeOn(io.servicetalk.concurrent.Executor)} which offloads the subscribe call.
*
* @param supplier {@link Supplier} which supplies the result of the {@link Single}.
* @param <T> Type of the {@link Single}.
*
* @return A new {@link Single}.
*/
public static <T> Single<T> fromSupplier(final Supplier<T> supplier) {
return fromCallable(supplier::get);
}
/**
* Creates a realized {@link Single} which always completes with the provided error {@code cause}.
*
* @param cause result of the {@link Single}.
* @param <T> Type of the {@link Single}.
*
* @return A new {@link Single}.
*/
public static <T> Single<T> failed(Throwable cause) {
return new FailedSingle<>(cause);
}
/**
* Creates a {@link Single} that never terminates.
*
* @param <T> Type of the {@link Single}.
* @return A new {@link Single}.
*/
public static <T> Single<T> never() {
return neverSingle();
}
/**
* Defer creation of a {@link Single} till it is subscribed to.
*
* @param singleSupplier {@link Supplier} to create a new {@link Single} every time the returned {@link Single} is
* subscribed.
* @param <T> Type of the {@link Single}.
* @return A new {@link Single} that creates a new {@link Single} using {@code singleSupplier} every time
* it is subscribed and forwards all items and terminal events from the newly created {@link Single} to its
* {@link Subscriber}.
*/
public static <T> Single<T> defer(Supplier<? extends Single<? extends T>> singleSupplier) {
return new SingleDefer<>(singleSupplier);
}
/**
* Convert from a {@link Future} to a {@link Single} via {@link Future#get()}.
* <p>
* Note that because {@link Future} only presents blocking APIs to extract the result, so the process of getting the
* results will block. The caller of subscribe is responsible for offloading if necessary, and also offloading if
* {@link Cancellable#cancel()} will be called and this operation may block.
* <p>
* To apply a timeout see {@link #timeout(long, TimeUnit)} and related methods.
* @param future The {@link Future} to convert.
* @param <T> The data type the {@link Future} provides when complete.
* @return A {@link Single} that derives results from {@link Future}.
* @see #timeout(long, TimeUnit)
*/
public static <T> Single<T> fromFuture(Future<? extends T> future) {
return new FutureToSingle<>(future);
}
/**
* Asynchronously collects results of individual {@link Single}s returned by the passed {@link Iterable} into a
* single {@link Collection}. <p>
* This will actively subscribe to a limited number of {@link Single}s concurrently, in order to alter the defaults,
* {@link #collectUnordered(Iterable, int)} should be used. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will immediately terminate with
* that error. In such a case, any in progress {@link Single}s will be cancelled. In order to delay error
* termination use {@link #collectUnorderedDelayError(Iterable)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* result.add(ft.get());
* }
* return result;
* }</pre>
*
* @param singles {@link Iterable} of {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
public static <T> Single<Collection<T>> collectUnordered(Iterable<? extends Single<? extends T>> singles) {
return fromIterable(singles).flatMapMergeSingle(identity()).collect(ArrayList::new, (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of the passed {@link Single}s into a single {@link Collection}. <p>
* This will actively subscribe to a limited number of {@link Single}s concurrently, in order to alter the defaults,
* {@link #collectUnordered(int, Single[])} should be used. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will immediately terminate with
* that error. In such a case, any in progress {@link Single}s will be cancelled. In order to delay error
* termination use {@link #collectUnorderedDelayError(Single[])}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* result.add(ft.get());
* }
* return result;
* }</pre>
*
* @param singles {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
@SafeVarargs
public static <T> Single<Collection<T>> collectUnordered(Single<? extends T>... singles) {
return from(singles).<T>flatMapMergeSingle(identity()).collect(() -> new ArrayList<>(singles.length),
(ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of individual {@link Single}s returned by the passed {@link Iterable} into a
* single {@link Collection}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will immediately terminate with
* that error. In such a case, any in progress {@link Single}s will be cancelled. In order to delay error
* termination use {@link #collectUnorderedDelayError(Iterable, int)}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* result.add(ft.get());
* }
* return result;
* }</pre>
*
* @param singles {@link Iterable} of {@link Single}s, results of which are to be collected.
* @param maxConcurrency Maximum number of {@link Single}s that will be active at any point in time.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
public static <T> Single<Collection<T>> collectUnordered(Iterable<? extends Single<? extends T>> singles,
int maxConcurrency) {
return fromIterable(singles)
.flatMapMergeSingle(identity(), maxConcurrency)
.collect(ArrayList::new, (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of the passed {@link Single}s into a single {@link Collection}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will immediately terminate with
* that error. In such a case, any in progress {@link Single}s will be cancelled. In order to delay error
* termination use {@link #collectUnorderedDelayError(int, Single[])}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* result.add(ft.get());
* }
* return result;
* }</pre>
*
* @param maxConcurrency Maximum number of {@link Single}s that will be active at any point in time.
* @param singles {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
@SafeVarargs
public static <T> Single<Collection<T>> collectUnordered(int maxConcurrency, Single<? extends T>... singles) {
return from(singles).<T>flatMapMergeSingle(identity(), maxConcurrency)
.collect(() -> new ArrayList<>(singles.length), (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of individual {@link Single}s returned by the passed {@link Iterable} into a
* single {@link Collection}. <p>
* This will actively subscribe to a limited number of {@link Single}s concurrently, in order to alter the defaults,
* {@link #collectUnorderedDelayError(Iterable, int)}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will wait for termination till all
* the other {@link Single}s have been subscribed and terminated. If it is expected for the returned {@link Single}
* to terminate on the first failing {@link Single}, {@link #collectUnordered(Iterable)} should be used.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* List<Throwable> errors = ...; // assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* try {
* result.add(ft.get());
* } catch(Throwable t) {
* errors.add(t);
* }
* }
* if (errors.isEmpty()) {
* return rResults;
* }
* createAndThrowACompositeException(errors);
* }</pre>
*
* @param singles {@link Iterable} of {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
public static <T> Single<Collection<T>> collectUnorderedDelayError(
Iterable<? extends Single<? extends T>> singles) {
return fromIterable(singles).flatMapMergeSingleDelayError(identity()).collect(ArrayList::new, (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of the passed {@link Single}s into a single {@link Collection}. <p>
* This will actively subscribe to a limited number of {@link Single}s concurrently, in order to alter the defaults,
* {@link #collectUnordered(int, Single[])}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will wait for termination till all
* the other {@link Single}s have been subscribed and terminated. If it is expected for the returned {@link Single}
* to terminate on the first failing {@link Single}, {@link #collectUnordered(Single[])} should be used.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* List<Throwable> errors = ...; // assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* try {
* result.add(ft.get());
* } catch(Throwable t) {
* errors.add(t);
* }
* }
* if (errors.isEmpty()) {
* return rResults;
* }
* createAndThrowACompositeException(errors);
* }</pre>
*
* @param singles {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
@SafeVarargs
public static <T> Single<Collection<T>> collectUnorderedDelayError(Single<? extends T>... singles) {
return from(singles).<T>flatMapMergeSingleDelayError(identity())
.collect(() -> new ArrayList<>(singles.length), (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of individual {@link Single}s returned by the passed {@link Iterable} into a
* single {@link Collection}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will wait for termination till all
* the other {@link Single}s have been subscribed and terminated. If it is expected for the returned {@link Single}
* to terminate on the first failing {@link Single}, {@link #collectUnordered(Iterable, int)} should be used.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* List<Throwable> errors = ...; // assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* try {
* result.add(ft.get());
* } catch(Throwable t) {
* errors.add(t);
* }
* }
* if (errors.isEmpty()) {
* return rResults;
* }
* createAndThrowACompositeException(errors);
* }</pre>
*
* @param singles {@link Iterable} of {@link Single}s, results of which are to be collected.
* @param maxConcurrency Maximum number of {@link Single}s that will be active at any point in time.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
public static <T> Single<Collection<T>> collectUnorderedDelayError(Iterable<? extends Single<? extends T>> singles,
int maxConcurrency) {
return fromIterable(singles).flatMapMergeSingleDelayError(identity(), maxConcurrency)
.collect(ArrayList::new, (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Asynchronously collects results of the passed {@link Single}s into a single {@link Collection}. <p>
* If any of the {@link Single}s terminate with an error, returned {@link Single} will wait for termination till all
* the other {@link Single}s have been subscribed and terminated. If it is expected for the returned {@link Single}
* to terminate on the first failing {@link Single}, {@link #collectUnordered(Iterable, int)} should be used.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* List<T> result = ...;// assume this is thread safe
* List<Throwable> errors = ...; // assume this is thread safe
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator does not provide any ordering guarantees for the results.
* try {
* result.add(ft.get());
* } catch(Throwable t) {
* errors.add(t);
* }
* }
* if (errors.isEmpty()) {
* return rResults;
* }
* createAndThrowACompositeException(errors);
* }</pre>
*
* @param maxConcurrency Maximum number of {@link Single}s that will be active at any point in time.
* @param singles {@link Single}s, results of which are to be collected.
* @param <T> Type of the result of the individual {@link Single}s
* @return A {@link Single} producing a {@link Collection} of all values produced by the individual {@link Single}s.
* There is no guarantee of the order of the values in the produced {@link Collection} as compared to the order of
* {@link Single}s passed to this method.
*/
@SafeVarargs
public static <T> Single<Collection<T>> collectUnorderedDelayError(int maxConcurrency,
Single<? extends T>... singles) {
return from(singles).<T>flatMapMergeSingleDelayError(identity(), maxConcurrency)
.collect(() -> new ArrayList<>(singles.length), (ts, t) -> {
ts.add(t);
return ts;
});
}
/**
* Convert from a {@link CompletionStage} to a {@link Single}.
* <p>
* A best effort is made to propagate {@link Cancellable#cancel()} to the {@link CompletionStage}. Cancellation for
* {@link CompletionStage} implementations will result in exceptional completion and invoke user
* callbacks. If there is any blocking code involved in the cancellation process (including invoking user callbacks)
* you should investigate if using an {@link Executor} is appropriate.
* @param stage The {@link CompletionStage} to convert.
* @param <T> The data type the {@link CompletionStage} provides when complete.
* @return A {@link Single} that derives results from {@link CompletionStage}.
*/
public static <T> Single<T> fromStage(CompletionStage<? extends T> stage) {
return new CompletionStageToSingle<>(stage);
}
/**
* Creates a new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first. Therefore the result is said to be <strong>ambiguous</strong>
* relative to which source it originated from. After the first source terminates the non-terminated sources will be
* cancelled.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator will pick the first result from any of the futures.
* return ft.get();
* }
* }</pre>
*
* @param singles {@link Single}s to subscribe to and race to propagate to the return value.
* @param <T> Type of the result of the individual {@link Single}s
* @return A new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first. Therefore the result is said to be <strong>ambiguous</strong>
* relative to which source it originated from.
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX amb operator.</a>
*/
@SafeVarargs
public static <T> Single<T> amb(final Single<? extends T>... singles) {
return new AmbSingles<>(singles);
}
/**
* Creates a new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first. Therefore the result is said to be <strong>ambiguous</strong>
* relative to which source it originated from. After the first source terminates the non-terminated sources will be
* cancelled.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator will pick the first result from any of the futures.
* return ft.get();
* }
* }</pre>
*
* @param singles {@link Single}s to subscribe to and race to propagate to the return value.
* @param <T> Type of the result of the individual {@link Single}s
* @return A new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first. Therefore the result is said to be <strong>ambiguous</strong>
* relative to which source it originated from.
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX amb operator.</a>
*/
public static <T> Single<T> amb(final Iterable<Single<? extends T>> singles) {
return new AmbSingles<>(singles);
}
/**
* Creates a new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator will pick the first result from any of the futures.
* return ft.get();
* }
* }</pre>
*
* @param singles {@link Single}s to subscribe to and race to propagate to the return value.
* @param <T> Type of the result of the individual {@link Single}s
* @return A new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first.
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX amb operator.</a>
*/
@SafeVarargs
public static <T> Single<T> anyOf(final Single<? extends T>... singles) {
return amb(singles);
}
/**
* Creates a new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* for (Future<T> ft: futures) { // Provided Futures (analogous to the Singles here)
* // This is an approximation, this operator will pick the first result from any of the futures.
* return ft.get();
* }
* }</pre>
*
* @param singles {@link Single}s to subscribe to and race to propagate to the return value.
* @param <T> Type of the result of the individual {@link Single}s
* @return A new {@link Single} that terminates with the result (either success or error) of whichever amongst the
* passed {@code singles} that terminates first.
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX amb operator.</a>
*/
public static <T> Single<T> anyOf(final Iterable<Single<? extends T>> singles) {
return amb(singles);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted
* by {@code s1} and {@code s2}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture.allOf(f1, f2).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code s1} and {@code s2}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, R> Single<R> zip(Single<? extends T1> s1, Single<? extends T2> s2,
BiFunction<? super T1, ? super T2, ? extends R> zipper) {
return SingleZipper.zip(s1, s2, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted
* by {@code s1} and {@code s2}. If any of the {@link Single}s terminate with an error, the returned {@link Single}
* will wait for termination till all the other {@link Single}s have been subscribed and terminated, and then
* terminate with the first error.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture.allOf(f1, f2).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link BiFunction} to items emitted by
* {@code s1} and {@code s2}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, R> Single<R> zipDelayError(Single<? extends T1> s1, Single<? extends T2> s2,
BiFunction<? super T1, ? super T2, ? extends R> zipper) {
return SingleZipper.zipDelayError(s1, s2, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function3} to items emitted by
* {@code s1}, {@code s2}, and {@code s3}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture<T3> f3 = ...; // s3
* CompletableFuture.allOf(f1, f2, f3).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get(), f3.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param s3 The third {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <T3> The type for the third {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function3} to items emitted by
* {@code s1}, {@code s2}, and {@code s3}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, T3, R> Single<R> zip(
Single<? extends T1> s1, Single<? extends T2> s2, Single<? extends T3> s3,
Function3<? super T1, ? super T2, ? super T3, ? extends R> zipper) {
return SingleZipper.zip(s1, s2, s3, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function3} to items emitted by
* {@code s1}, {@code s2}, and {@code s3}. If any of the {@link Single}s terminate with an error, the returned
* {@link Single} will wait for termination till all the other {@link Single}s have been subscribed and terminated,
* and then terminate with the first error.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture<T3> f3 = ...; // s3
* CompletableFuture.allOf(f1, f2, f3).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get(), f3.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param s3 The third {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <T3> The type for the third {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function3} to items emitted by
* {@code s1}, {@code s2}, and {@code s3}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, T3, R> Single<R> zipDelayError(
Single<? extends T1> s1, Single<? extends T2> s2, Single<? extends T3> s3,
Function3<? super T1, ? super T2, ? super T3, ? extends R> zipper) {
return SingleZipper.zipDelayError(s1, s2, s3, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function4} to items emitted by
* {@code s1}, {@code s2}, {@code s3}, and {@code s4}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture<T3> f3 = ...; // s3
* CompletableFuture<T4> f4 = ...; // s3
* CompletableFuture.allOf(f1, f2, f3, f4).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get(), f3.get(), f4.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param s3 The third {@link Single} to zip.
* @param s4 The fourth {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <T3> The type for the third {@link Single}.
* @param <T4> The type for the fourth {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function4} to items emitted by
* {@code s1}, {@code s2}, {@code s3}, and {@code s4}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, T3, T4, R> Single<R> zip(
Single<? extends T1> s1, Single<? extends T2> s2, Single<? extends T3> s3, Single<? extends T4> s4,
Function4<? super T1, ? super T2, ? super T3, ? super T4, ? extends R> zipper) {
return SingleZipper.zip(s1, s2, s3, s4, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function4} to items emitted by
* {@code s1}, {@code s2}, {@code s3}, and {@code s4}. If any of the {@link Single}s terminate with an error, the
* returned {@link Single} will wait for termination till all the other {@link Single}s have been subscribed and
* terminated, and then terminate with the first error.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* CompletableFuture<T1> f1 = ...; // s1
* CompletableFuture<T2> f2 = ...; // s2
* CompletableFuture<T3> f3 = ...; // s3
* CompletableFuture<T4> f4 = ...; // s3
* CompletableFuture.allOf(f1, f2, f3, f4).get(); // wait for all futures to complete
* return zipper.apply(f1.get(), f2.get(), f3.get(), f4.get());
* }</pre>
* @param s1 The first {@link Single} to zip.
* @param s2 The second {@link Single} to zip.
* @param s3 The third {@link Single} to zip.
* @param s4 The fourth {@link Single} to zip.
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param <T1> The type for the first {@link Single}.
* @param <T2> The type for the second {@link Single}.
* @param <T3> The type for the third {@link Single}.
* @param <T4> The type for the fourth {@link Single}.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function4} to items emitted by
* {@code s1}, {@code s2}, {@code s3}, and {@code s4}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <T1, T2, T3, T4, R> Single<R> zipDelayError(
Single<? extends T1> s1, Single<? extends T2> s2, Single<? extends T3> s3, Single<? extends T4> s4,
Function4<? super T1, ? super T2, ? super T3, ? super T4, ? extends R> zipper) {
return SingleZipper.zipDelayError(s1, s2, s3, s4, zipper);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function} to items emitted by
* {@code singles}.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* Function<? super CompletableFuture<?>[], ? extends R> zipper = ...;
* CompletableFuture<?>[] futures = ...; // Provided Futures (analogous to the Singles here)
* CompletableFuture.allOf(futures).get(); // wait for all futures to complete
* return zipper.apply(futures);
* }</pre>
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param singles The collection of {@link Single}s that when complete provides the results to "zip" (aka combine)
* together.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function} to items emitted by
* {@code singles}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <R> Single<R> zip(Function<? super Object[], ? extends R> zipper, Single<?>... singles) {
return SingleZipper.zip(zipper, singles);
}
/**
* Create a new {@link Single} that emits the results of a specified zipper {@link Function} to items emitted by
* {@code singles}. If any of the {@link Single}s terminate with an error, the returned {@link Single} will wait for
* termination till all the other {@link Single}s have been subscribed and terminated, and then terminate with the
* first error.
* <p>
* From a sequential programming point of view this method is roughly equivalent to the following:
* <pre>{@code
* Function<? super CompletableFuture<?>[], ? extends R> zipper = ...;
* CompletableFuture<?>[] futures = ...; // Provided Futures (analogous to the Singles here)
* CompletableFuture.allOf(futures).get(); // wait for all futures to complete
* return zipper.apply(futures);
* }</pre>
* @param zipper Used to combine the completed results for each item from {@code singles}.
* @param singles The collection of {@link Single}s that when complete provides the results to "zip" (aka combine)
* together.
* @param <R> The result type of the zipper.
* @return a new {@link Single} that emits the results of a specified zipper {@link Function} to items emitted by
* {@code singles}.
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX zip operator.</a>
*/
public static <R> Single<R> zipDelayError(Function<? super Object[], ? extends R> zipper, Single<?>... singles) {
return SingleZipper.zipDelayError(zipper, singles);
}
// </editor-fold>
// <editor-fold desc="Internal Methods">
/**
* Returns the {@link ContextMap} to be used for a subscribe.
*
* @param provider The {@link AsyncContextProvider} which is the source of the map
* @return {@link ContextMap} for this subscribe operation.
*/
ContextMap contextForSubscribe(AsyncContextProvider provider) {
// the default behavior is to copy the map. Some operators may want to use shared map
return provider.context().copy();
}
/**
* Subscribes to this {@link Single} and returns the {@link ContextMap} associated for this subscribe
* operation.
*
* @param subscriber the subscriber.
* @param provider {@link AsyncContextProvider} to use.
* @return {@link ContextMap} for this subscribe operation.
*/
final ContextMap subscribeAndReturnContext(Subscriber<? super T> subscriber, AsyncContextProvider provider) {
final ContextMap contextMap = contextForSubscribe(provider);
subscribeWithContext(subscriber, provider, contextMap);
return contextMap;
}
/**
* Delegate subscribe calls in an operator chain. This method is used by operators to subscribe to the upstream
* source.
* @param subscriber the subscriber.
* @param contextMap the {@link ContextMap} to use for this {@link Subscriber}.
* @param contextProvider the {@link AsyncContextProvider} used to wrap any objects to preserve {@link ContextMap}.
*/
final void delegateSubscribe(Subscriber<? super T> subscriber,
ContextMap contextMap, AsyncContextProvider contextProvider) {
handleSubscribe(subscriber, contextMap, contextProvider);
}
private void subscribeWithContext(Subscriber<? super T> subscriber,
AsyncContextProvider contextProvider, ContextMap contextMap) {
requireNonNull(subscriber);
Subscriber<? super T> wrapped = contextProvider.wrapCancellable(subscriber, contextMap);
if (contextProvider.context() == contextMap) {
// No need to wrap as we are sharing the AsyncContext
handleSubscribe(wrapped, contextMap, contextProvider);
} else {
// Ensure that AsyncContext used for handleSubscribe() is the contextMap for the subscribe()
contextProvider.wrapRunnable(() -> handleSubscribe(wrapped, contextMap, contextProvider), contextMap).run();
}
}
/**
* Override for {@link #handleSubscribe(SingleSource.Subscriber)} to perform context wrapping.
* <p>
* This method wraps the passed {@link Subscriber}. Operators that do not wish to wrap the passed {@link Subscriber}
* can override this method and omit the wrapping.
* @param subscriber the subscriber.
* @param contextMap the {@link ContextMap} to use for this {@link Subscriber}.
* @param contextProvider the {@link AsyncContextProvider} used to wrap any objects to preserve {@link ContextMap}.
*/
void handleSubscribe(Subscriber<? super T> subscriber,
ContextMap contextMap, AsyncContextProvider contextProvider) {
try {
Subscriber<? super T> wrapped = contextProvider.wrapSingleSubscriber(subscriber, contextMap);
handleSubscribe(wrapped);
} catch (Throwable t) {
LOGGER.warn("Unexpected exception from subscribe(), assuming no interaction with the Subscriber.", t);
// At this point we are unsure if any signal was sent to the Subscriber and if it is safe to invoke the
// Subscriber without violating specifications. However, not propagating the error to the Subscriber will
// result in hard to debug scenarios where no further signals may be sent to the Subscriber and hence it
// will be hard to distinguish between a "hung" source and a wrongly implemented source that violates the
// specifications and throw from subscribe() (Rule 1.9).
//
// By doing the following we may violate the rules:
// 1) Rule 2.12: onSubscribe() MUST be called at most once.
// 2) Rule 1.7: Once a terminal state has been signaled (onError, onComplete) it is REQUIRED that no
// further signals occur.
deliverErrorFromSource(subscriber, t);
}
}
// </editor-fold>
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.client.gateway.local;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.runtime.net.ConnectionUtils;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.client.SqlClientException;
import org.apache.flink.table.client.config.Deployment;
import org.apache.flink.table.client.config.Environment;
import org.apache.flink.table.client.gateway.SqlExecutionException;
import org.apache.flink.types.Row;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Maintains dynamic results.
*/
public class ResultStore {
private final Configuration flinkConfig;
private final Map<String, DynamicResult<?>> results;
public ResultStore(Configuration flinkConfig) {
this.flinkConfig = flinkConfig;
results = new HashMap<>();
}
/**
* Creates a result. Might start threads or opens sockets so every created result must be closed.
*/
public <T> DynamicResult<T> createResult(Environment env, TableSchema schema, ExecutionConfig config) {
final TypeInformation<Row> outputType = Types.ROW_NAMED(schema.getColumnNames(), schema.getTypes());
if (env.getExecution().isStreamingExecution()) {
// determine gateway address (and port if possible)
final InetAddress gatewayAddress = getGatewayAddress(env.getDeployment());
final int gatewayPort = getGatewayPort(env.getDeployment());
if (env.getExecution().isChangelogMode()) {
return new ChangelogCollectStreamResult<>(outputType, config, gatewayAddress, gatewayPort);
} else {
return new MaterializedCollectStreamResult<>(outputType, config, gatewayAddress, gatewayPort);
}
} else {
// Batch Execution
if (!env.getExecution().isTableMode()) {
throw new SqlExecutionException("Results of batch queries can only be served in table mode.");
}
return new MaterializedCollectBatchResult<>(outputType, config);
}
}
public void storeResult(String resultId, DynamicResult result) {
results.put(resultId, result);
}
@SuppressWarnings("unchecked")
public <T> DynamicResult<T> getResult(String resultId) {
return (DynamicResult<T>) results.get(resultId);
}
public void removeResult(String resultId) {
results.remove(resultId);
}
public List<String> getResults() {
return new ArrayList<>(results.keySet());
}
// --------------------------------------------------------------------------------------------
private int getGatewayPort(Deployment deploy) {
// try to get address from deployment configuration
return deploy.getGatewayPort();
}
private InetAddress getGatewayAddress(Deployment deploy) {
// try to get address from deployment configuration
final String address = deploy.getGatewayAddress();
// use manually defined address
if (!address.isEmpty()) {
try {
return InetAddress.getByName(address);
} catch (UnknownHostException e) {
throw new SqlClientException("Invalid gateway address '" + address + "' for result retrieval.", e);
}
} else {
// TODO cache this
// try to get the address by communicating to JobManager
final String jobManagerAddress = flinkConfig.getString(JobManagerOptions.ADDRESS);
final int jobManagerPort = flinkConfig.getInteger(JobManagerOptions.PORT);
if (jobManagerAddress != null && !jobManagerAddress.isEmpty()) {
try {
return ConnectionUtils.findConnectingAddress(
new InetSocketAddress(jobManagerAddress, jobManagerPort),
deploy.getResponseTimeout(),
400);
} catch (Exception e) {
throw new SqlClientException("Could not determine address of the gateway for result retrieval " +
"by connecting to the job manager. Please specify the gateway address manually.", e);
}
} else {
try {
return InetAddress.getLocalHost();
} catch (UnknownHostException e) {
throw new SqlClientException("Could not determine address of the gateway for result retrieval. " +
"Please specify the gateway address manually.", e);
}
}
}
}
}
|
package cellsociety.View.Buttons;
import static org.junit.jupiter.api.Assertions.*;
import cellsociety.Controller.Controller;
import cellsociety.Main;
import cellsociety.Model.BoardStructure;
import cellsociety.View.GameView;
import java.awt.Dimension;
import java.util.concurrent.TimeUnit;
import javafx.scene.control.Button;
import javafx.stage.Stage;
import org.junit.jupiter.api.Test;
import util.DukeApplicationTest;
class PlayButtonTest extends DukeApplicationTest {
private Controller controller;
private Main main;
private Button playButton;
@Override
public void start (Stage stage) {
main = new Main();
main.start(stage);
controller = main.getGameController();
}
@Test
void checkInitiallyDisabled() {
javafxRun(()->controller.createAndReadInNewBoardForTesting(
"data/TestingCSV/spreadingfirespreadtest.csv/",
"src/resources/SpreadingFire6.sim"));
playButton = lookup("#PlayButton").query();
assertTrue(playButton.isDisabled());
}
@Test
void checkPlaysGame() {
javafxRun(()->controller.createAndReadInNewBoardForTesting(
"data/TestingCSV/spreadingfirespreadtest.csv/",
"src/resources/SpreadingFire6.sim"));
playButton = lookup("#PlayButton").query();
playButton.setDisable(false);
int prevCell = controller.getModelBoard().getCurrBoard().getCurrCell(0, 0).getState();
clickOn(playButton);
javafxRun(()->main.stepIgnoringIfPaused());
assertFalse(checkFirePaused(prevCell));
}
/*
* Know that the cell at [0][0] changes because controlled testing board
*/
private boolean checkFirePaused(int prevCell) {
BoardStructure currentBoard = controller.getModelBoard().getCurrBoard();
return (currentBoard.getCurrCell(0,0).getState() == prevCell);
}
}
|
package com.vaadin.spring.ims.it;
import com.vaadin.testbench.IPAddress;
import com.vaadin.testbench.ScreenshotOnFailureRule;
import com.vaadin.testbench.parallel.ParallelTest;
import io.github.bonigarcia.wdm.WebDriverManager;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.slf4j.LoggerFactory;
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.Logger;
public abstract class AbstractTest extends ParallelTest {
@Rule
public ScreenshotOnFailureRule rule = new ScreenshotOnFailureRule(this, true);
static {
// Prevent debug logging from Apache HTTP client
Logger root = (Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
root.setLevel(Level.INFO);
}
@BeforeClass
public static void setupClass() {
WebDriverManager.chromedriver().setup();
}
private static final String SERVER_HOST = IPAddress.findSiteLocalAddress();
private static final int SERVER_PORT = 8080;
private final String route;
@Before
public void setup() throws Exception {
super.setup();
getDriver().get(getURL(route)); // Opens the given URL in the browser
}
protected AbstractTest(String route) {
this.route = route;
}
private static String getURL(String route) {
return String.format("http://%s:%d/%s", SERVER_HOST, SERVER_PORT, route);
}
}
|
package edu.illinois.jflow.jflow.wala.dataflowanalysis;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.ibm.wala.ipa.callgraph.propagation.PointerKey;
import com.ibm.wala.ssa.IR;
import com.ibm.wala.ssa.SSAInstruction;
import com.ibm.wala.util.collections.Pair;
/**
* Represents a statement in the source code.
*
* A statement contains a collection of WALA IR, all of which maps to that particular line number in
* the source code.
*
* @author nchen
*
*/
public class Statement implements PDGNode {
public static final Integer UNKNOWN_INSTRUCTION_INDEX= -1; // To support Phi instructions and other "non-real" instructions
private final int lineNumber;
private String sourceCode= "";
private IR ir;
private List<Pair<? extends SSAInstruction, Integer>> instructions;
private Set<PointerKey> mods= new HashSet<PointerKey>();
private Set<PointerKey> refs= new HashSet<PointerKey>();
public Statement(int lineNumber, IR ir) {
instructions= new ArrayList<Pair<? extends SSAInstruction, Integer>>();
this.lineNumber= lineNumber;
this.ir= ir;
}
public void add(Pair<? extends SSAInstruction, Integer> ssaInstruction) {
instructions.add(ssaInstruction);
}
public int getLineNumber() {
return lineNumber;
}
public String toString() {
StringBuilder sb= new StringBuilder();
sb.append(String.format("LINE: %d%n", lineNumber));
for (Pair<? extends SSAInstruction, Integer> instr : instructions) {
sb.append(instr.fst);
sb.append("\n");
}
return sb.toString();
}
public String getSourceCode() {
return sourceCode;
}
public void setSourceCode(String sourceCode) {
this.sourceCode= sourceCode;
}
@Override
public String getSimplifiedRepresentation() {
return sourceCode;
}
@Override
public boolean isOnLine(int lineNumber) {
return this.lineNumber == lineNumber;
}
private List<String> getVariableNamesForDefs(Pair<? extends SSAInstruction, Integer> pair) {
List<String> names= new ArrayList<String>();
SSAInstruction instr= pair.fst;
Integer index= pair.snd;
int numDefs= instr.getNumberOfDefs();
for (int i= 0; i < numDefs; i++) {
int def= instr.getDef(i);
String[] localNames= ir.getLocalNames(index, def);
names.addAll(Arrays.asList(localNames));
}
return names;
}
@Override
public List<String> defs() {
List<String> defs= new ArrayList<String>();
for (Pair<? extends SSAInstruction, Integer> pair : instructions) {
defs.addAll(getVariableNamesForDefs(pair));
}
return defs;
}
public List<SSAInstruction> retrieveAllSSAInstructions() {
List<SSAInstruction> ssaInstructions= new ArrayList<SSAInstruction>();
for (Pair<? extends SSAInstruction, Integer> pair : instructions) {
ssaInstructions.add(pair.fst);
}
return ssaInstructions;
}
@Override
public Set<PointerKey> getRefs() {
return refs;
}
@Override
public Set<PointerKey> getMods() {
return mods;
}
@Override
public void addRef(PointerKey key) {
refs.add(key);
}
@Override
public void addMod(PointerKey key) {
mods.add(key);
}
}
|
package hex.genmodel.algos.tree;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.drf.DrfMojoModel;
import hex.genmodel.algos.gbm.GbmMojoModel;
import hex.genmodel.utils.ByteBufferWrapper;
import hex.genmodel.utils.GenmodelBitSet;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Common ancestor for {@link DrfMojoModel} and {@link GbmMojoModel}.
* See also: `hex.tree.SharedTreeModel` and `hex.tree.TreeVisitor` classes.
*/
public abstract class SharedTreeMojoModel extends MojoModel {
private static final int NsdNaVsRest = NaSplitDir.NAvsREST.value();
private static final int NsdNaLeft = NaSplitDir.NALeft.value();
private static final int NsdLeft = NaSplitDir.Left.value();
protected double _mojo_version;
private ScoreTree _scoreTree;
/**
* {@code _ntree_groups} is the number of trees requested by the user. For
* binomial case or regression this is also the total number of trees
* trained; however in multinomial case each requested "tree" is actually
* represented as a group of trees, with {@code _ntrees_per_group} trees
* in each group. Each of these individual trees assesses the likelihood
* that a given observation belongs to class A, B, C, etc. of a
* multiclass response.
*/
protected int _ntree_groups;
protected int _ntrees_per_group;
/**
* Array of binary tree data, each tree being a {@code byte[]} array. The
* trees are logically grouped into a rectangular grid of dimensions
* {@link #_ntree_groups} x {@link #_ntrees_per_group}, however physically
* they are stored as 1-dimensional list, and an {@code [i, j]} logical
* tree is mapped to the index {@link #treeIndex(int, int)}.
*/
protected byte[][] _compressed_trees;
/**
* Array of auxiliary binary tree data, each being a {@code byte[]} array.
*/
protected byte[][] _compressed_trees_aux;
/**
* GLM's beta used for calibrating output probabilities using Platt Scaling.
*/
protected double[] _calib_glm_beta;
protected void postInit() {
if (_mojo_version == 1.0) {
_scoreTree = new ScoreTree0(); // First version
} else if (_mojo_version == 1.1) {
_scoreTree = new ScoreTree1(); // Second version
} else
_scoreTree = new ScoreTree2(); // Current version
}
public final int getNTreeGroups() {
return _ntree_groups;
}
public final int getNTreesPerGroup() {
return _ntrees_per_group;
}
/**
* Highly efficient (critical path) tree scoring
*
* Given a tree (in the form of a byte array) and the row of input data, compute either this tree's
* predicted value when `computeLeafAssignment` is false, or the the decision path within the tree (but no more
* than 64 levels) when `computeLeafAssignment` is true.
*
* Note: this function is also used from the `hex.tree.CompressedTree` class in `h2o-algos` project.
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment, String[][] domains) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null;
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) {
if (computeLeafAssignment) {
bitsRight |= 1 << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3(tree, ab);
}
}
// This logic:
//
// double d = row[colId];
// if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) ) || (domains != null && domains[colId] != null && domains[colId].length <= (int)d)
// ? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// Really does this:
//
// if (value is NaN or value is not in the range of the bitset or is outside the domain map length (but an integer) ) {
// if (leftward) {
// go left
// }
// else {
// go right
// }
// }
// else {
// if (naVsRest) {
// go left
// }
// else {
// if (numeric) {
// if (value < split value) {
// go left
// }
// else {
// go right
// }
// }
// else {
// if (value not in bitset) {
// go left
// }
// else {
// go right
// }
// }
// }
// }
double d = row[colId];
if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) ) || (domains != null && domains[colId] != null && domains[colId].length <= (int)d)
? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 16: ab.skip(nclasses < 256? 1 : 2); break; // Small leaf
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment && level < 64) bitsRight |= 1 << level;
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
bitsRight |= 1 << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
public interface DecisionPathTracker<T> {
boolean go(int depth, boolean right);
T terminate();
}
public static class StringDecisionPathTracker implements DecisionPathTracker<String> {
private final char[] _sb = new char[64];
private int _pos = 0;
@Override
public boolean go(int depth, boolean right) {
_sb[depth] = right ? 'R' : 'L';
if (right) _pos = depth;
return true;
}
@Override
public String terminate() {
String path = new String(_sb, 0, _pos);
_pos = 0;
return path;
}
}
public static class LeafDecisionPathTracker implements DecisionPathTracker<LeafDecisionPathTracker> {
private final AuxInfoLightReader _auxInfo;
private boolean _wentRight = false; // Was the last step _right_?
// OUT
private int _nodeId = 0; // Returned when the tree is empty (consistent with SharedTreeNode of an empty tree)
private LeafDecisionPathTracker(byte[] auxTree) {
_auxInfo = new AuxInfoLightReader(new ByteBufferWrapper(auxTree));
}
@Override
public boolean go(int depth, boolean right) {
if (!_auxInfo.hasNext()) {
assert _wentRight || depth == 0; // this can only happen if previous step was _right_ or the tree has no nodes
return false;
}
_auxInfo.readNext();
if (right) {
if (_wentRight && _nodeId != _auxInfo._nid)
return false;
_nodeId = _auxInfo.getRightNodeIdAndSkipNode();
_auxInfo.skipNodes(_auxInfo._numLeftChildren);
_wentRight = true;
} else { // left
_wentRight = false;
if (_auxInfo._numLeftChildren == 0) {
_nodeId = _auxInfo.getLeftNodeIdAndSkipNode();
return false;
} else {
_auxInfo.skipNode(); // proceed to next _left_ node
}
}
return true;
}
@Override
public LeafDecisionPathTracker terminate() {
return this;
}
final int getLeafNodeId() {
return _nodeId;
}
}
public static <T> T getDecisionPath(double leafAssignment, DecisionPathTracker<T> tr) {
long l = Double.doubleToRawLongBits(leafAssignment);
for (int i = 0; i < 64; ++i) {
boolean right = ((l>>i) & 0x1L) == 1;
if (! tr.go(i, right)) break;
}
return tr.terminate();
}
public static String getDecisionPath(double leafAssignment) {
return getDecisionPath(leafAssignment, new StringDecisionPathTracker());
}
public static int getLeafNodeId(double leafAssignment, byte[] auxTree) {
LeafDecisionPathTracker tr = new LeafDecisionPathTracker(auxTree);
return getDecisionPath(leafAssignment, tr).getLeafNodeId();
}
//------------------------------------------------------------------------------------------------------------------
// Computing a Tree Graph
//------------------------------------------------------------------------------------------------------------------
private static void computeTreeGraph(SharedTreeSubgraph sg, SharedTreeNode node, byte[] tree, ByteBufferWrapper ab, HashMap<Integer, AuxInfo> auxMap,
int nclasses, String names[], String[][] domains) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) {
float leafValue = ab.get4f();
node.setPredValue(leafValue);
return;
}
String colName = names[colId];
node.setCol(colId, colName);
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
node.setLeftward(leftward);
node.setNaVsRest(naVsRest);
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
float splitVal = ab.get4f(); // Get the float to compare
node.setSplitValue(splitVal);
} else {
// Bitset test
GenmodelBitSet bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3(tree, ab);
node.setBitset(domains[colId], bs);
}
}
AuxInfo auxInfo = auxMap.get(node.getNodeNumber());
// go RIGHT
{
ByteBufferWrapper ab2 = new ByteBufferWrapper(tree);
ab2.skip(ab.position());
switch (lmask) {
case 0:
ab2.skip(ab2.get1U());
break;
case 1:
ab2.skip(ab2.get2());
break;
case 2:
ab2.skip(ab2.get3());
break;
case 3:
ab2.skip(ab2.get4());
break;
case 16:
ab2.skip(nclasses < 256 ? 1 : 2);
break; // Small leaf
case 48:
ab2.skip(4);
break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
int lmask2 = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
SharedTreeNode newNode = sg.makeRightChildNode(node);
newNode.setWeight(auxInfo.weightR);
newNode.setNodeNumber(auxInfo.nidR);
newNode.setPredValue(auxInfo.predR);
newNode.setSquaredError(auxInfo.sqErrR);
if ((lmask2 & 16) != 0) {
float leafValue = ab2.get4f();
newNode.setPredValue(leafValue);
auxInfo.predR = leafValue;
}
else {
computeTreeGraph(sg, newNode, tree, ab2, auxMap, nclasses, names, domains);
}
}
// go LEFT
{
ByteBufferWrapper ab2 = new ByteBufferWrapper(tree);
ab2.skip(ab.position());
if (lmask <= 3)
ab2.skip(lmask + 1);
SharedTreeNode newNode = sg.makeLeftChildNode(node);
newNode.setWeight(auxInfo.weightL);
newNode.setNodeNumber(auxInfo.nidL);
newNode.setPredValue(auxInfo.predL);
newNode.setSquaredError(auxInfo.sqErrL);
if ((lmask & 16) != 0) {
float leafValue = ab2.get4f();
newNode.setPredValue(leafValue);
auxInfo.predL = leafValue;
}
else {
computeTreeGraph(sg, newNode, tree, ab2, auxMap, nclasses, names, domains);
}
}
if (node.getNodeNumber() == 0) {
float p = (float)(((double)auxInfo.predL*(double)auxInfo.weightL + (double)auxInfo.predR*(double)auxInfo.weightR)/((double)auxInfo.weightL + (double)auxInfo.weightR));
if (Math.abs(p) < 1e-7) p = 0;
node.setPredValue(p);
node.setSquaredError(auxInfo.sqErrR + auxInfo.sqErrL);
node.setWeight(auxInfo.weightL + auxInfo.weightR);
}
checkConsistency(auxInfo, node);
}
/**
* Compute a graph of the forest.
*
* @return A graph of the forest.
*/
public SharedTreeGraph _computeGraph(int treeToPrint) {
SharedTreeGraph g = new SharedTreeGraph();
if (treeToPrint >= _ntree_groups) {
throw new IllegalArgumentException("Tree " + treeToPrint + " does not exist (max " + _ntree_groups + ")");
}
int j;
if (treeToPrint >= 0) {
j = treeToPrint;
}
else {
j = 0;
}
for (; j < _ntree_groups; j++) {
for (int i = 0; i < _ntrees_per_group; i++) {
int itree = treeIndex(j, i);
String treeName = treeName(j, i, getDomainValues(getResponseIdx()));
SharedTreeSubgraph sg = g.makeSubgraph(treeName);
computeTreeGraph(sg, _compressed_trees[itree], _compressed_trees_aux[itree],
_nclasses, getNames(), getDomainValues());
}
if (treeToPrint >= 0) {
break;
}
}
return g;
}
public static SharedTreeSubgraph computeTreeGraph(int treeNum, String treeName, byte[] tree, byte[] auxTreeInfo,
int nclasses, String names[], String[][] domains) {
SharedTreeSubgraph sg = new SharedTreeSubgraph(treeNum, treeName);
computeTreeGraph(sg, tree, auxTreeInfo, nclasses, names, domains);
return sg;
}
private static void computeTreeGraph(SharedTreeSubgraph sg, byte[] tree, byte[] auxTreeInfo,
int nclasses, String names[], String[][] domains) {
SharedTreeNode node = sg.makeRootNode();
node.setSquaredError(Float.NaN);
node.setPredValue(Float.NaN);
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
ByteBufferWrapper abAux = new ByteBufferWrapper(auxTreeInfo);
HashMap<Integer, AuxInfo> auxMap = readAuxInfos(abAux);
computeTreeGraph(sg, node, tree, ab, auxMap, nclasses, names, domains);
}
private static HashMap<Integer, AuxInfo> readAuxInfos(ByteBufferWrapper abAux) {
HashMap<Integer, AuxInfo> auxMap = new HashMap<>();
Map<Integer, AuxInfo> nodeIdToParent = new HashMap<>();
nodeIdToParent.put(0, new AuxInfo());
boolean reservedFieldIsParentId = false; // In older H2O versions `reserved` field was used for parent id
while (abAux.hasRemaining()) {
AuxInfo auxInfo = new AuxInfo(abAux);
if (auxMap.size() == 0) {
reservedFieldIsParentId = auxInfo.reserved < 0; // `-1` indicates No Parent, reserved >= 0 indicates reserved is not used for parent ids!
}
AuxInfo parent = nodeIdToParent.get(auxInfo.nid);
if (parent == null)
throw new IllegalStateException("Parent for nodeId=" + auxInfo.nid + " not found.");
assert !reservedFieldIsParentId || parent.nid == auxInfo.reserved : "Corrupted Tree Info: parent nodes do not correspond (pid: " +
parent.nid + ", reserved: " + auxInfo.reserved + ")";
auxInfo.setPid(parent.nid);
nodeIdToParent.put(auxInfo.nidL, auxInfo);
nodeIdToParent.put(auxInfo.nidR, auxInfo);
auxMap.put(auxInfo.nid, auxInfo);
}
return auxMap;
}
public static String treeName(int groupIndex, int classIndex, String[] domainValues) {
String className = "";
{
if (domainValues != null) {
className = ", Class " + domainValues[classIndex];
}
}
return "Tree " + groupIndex + className;
}
// Please see AuxInfo for details of the serialized format
private static class AuxInfoLightReader {
private final ByteBufferWrapper _abAux;
int _nid;
int _numLeftChildren;
private AuxInfoLightReader(ByteBufferWrapper abAux) {
_abAux = abAux;
}
private void readNext() {
_nid = _abAux.get4();
_numLeftChildren = _abAux.get4();
}
private boolean hasNext() {
return _abAux.hasRemaining();
}
private int getLeftNodeIdAndSkipNode() {
_abAux.skip(4 * 6);
int n = _abAux.get4();
_abAux.skip(4);
return n;
}
private int getRightNodeIdAndSkipNode() {
_abAux.skip(4 * 7);
return _abAux.get4();
}
private void skipNode() {
_abAux.skip(AuxInfo.SIZE - 8);
}
private void skipNodes(int num) {
_abAux.skip(AuxInfo.SIZE * num);
}
}
static class AuxInfo {
private static int SIZE = 10 * 4;
private AuxInfo() {
nid = -1;
reserved = -1;
}
// Warning: any changes in this structure need to be reflected also in AuxInfoLightReader!!!
AuxInfo(ByteBufferWrapper abAux) {
// node ID
nid = abAux.get4();
// ignored - can contain either parent id or number of children (depending on a MOJO version)
reserved = abAux.get4();
//sum of observation weights (typically, that's just the count of observations)
weightL = abAux.get4f();
weightR = abAux.get4f();
//predicted values
predL = abAux.get4f();
predR = abAux.get4f();
//squared error
sqErrL = abAux.get4f();
sqErrR = abAux.get4f();
//node IDs (consistent with tree construction)
nidL = abAux.get4();
nidR = abAux.get4();
}
final void setPid(int parentId) {
pid = parentId;
}
@Override public String toString() {
return "nid: " + nid + "\n" +
"pid: " + pid + "\n" +
"nidL: " + nidL + "\n" +
"nidR: " + nidR + "\n" +
"weightL: " + weightL + "\n" +
"weightR: " + weightR + "\n" +
"predL: " + predL + "\n" +
"predR: " + predR + "\n" +
"sqErrL: " + sqErrL + "\n" +
"sqErrR: " + sqErrR + "\n" +
"reserved: " + reserved + "\n";
}
public int nid, pid, nidL, nidR;
private final int reserved;
public float weightL, weightR, predL, predR, sqErrL, sqErrR;
}
static void checkConsistency(AuxInfo auxInfo, SharedTreeNode node) {
boolean ok = true;
ok &= (auxInfo.nid == node.getNodeNumber());
double sum = 0;
if (node.leftChild!=null) {
ok &= (auxInfo.nidL == node.leftChild.getNodeNumber());
ok &= (auxInfo.weightL == node.leftChild.getWeight());
ok &= (auxInfo.predL == node.leftChild.predValue);
ok &= (auxInfo.sqErrL == node.leftChild.squaredError);
sum += node.leftChild.getWeight();
}
if (node.rightChild!=null) {
ok &= (auxInfo.nidR == node.rightChild.getNodeNumber());
ok &= (auxInfo.weightR == node.rightChild.getWeight());
ok &= (auxInfo.predR == node.rightChild.predValue);
ok &= (auxInfo.sqErrR == node.rightChild.squaredError);
sum += node.rightChild.getWeight();
}
if (node.parent!=null) {
ok &= (auxInfo.pid == node.parent.getNodeNumber());
ok &= (Math.abs(node.getWeight() - sum) < 1e-5 * (node.getWeight() + sum));
}
if (!ok) {
System.out.println("\nTree inconsistency found:");
node.print();
node.leftChild.print();
node.rightChild.print();
System.out.println(auxInfo.toString());
}
}
//------------------------------------------------------------------------------------------------------------------
// Private
//------------------------------------------------------------------------------------------------------------------
protected SharedTreeMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
/**
* Score all trees and fill in the `preds` array.
*/
protected void scoreAllTrees(double[] row, double[] preds) {
java.util.Arrays.fill(preds, 0);
scoreTreeRange(row, 0, _ntree_groups, preds);
}
/**
* Transforms tree predictions into the final model predictions.
* For classification: converts tree preds into probability distribution and picks predicted class.
* For regression: projects tree prediction from link-space into the original space.
* @param row input row.
* @param offset offset.
* @param preds final output, same structure as of {@link SharedTreeMojoModel#score0}.
* @return preds array.
*/
public abstract double[] unifyPreds(double[] row, double offset, double[] preds);
/**
* Generates a (per-class) prediction using only a single tree.
* @param row input row
* @param index index of the tree (0..N-1)
* @param preds array of partial predictions.
*/
public final void scoreSingleTree(double[] row, int index, double preds[]) {
scoreTreeRange(row, index, index + 1, preds);
}
/**
* Generates (partial, per-class) predictions using only trees from a given range.
* @param row input row
* @param fromIndex low endpoint (inclusive) of the tree range
* @param toIndex high endpoint (exclusive) of the tree range
* @param preds array of partial predictions.
* To get final predictions pass the result to {@link SharedTreeMojoModel#unifyPreds}.
*/
public final void scoreTreeRange(double[] row, int fromIndex, int toIndex, double[] preds) {
final int clOffset = _nclasses == 1 ? 0 : 1;
for (int classIndex = 0; classIndex < _ntrees_per_group; classIndex++) {
int k = clOffset + classIndex;
int itree = treeIndex(fromIndex, classIndex);
for (int groupIndex = fromIndex; groupIndex < toIndex; groupIndex++) {
if (_compressed_trees[itree] != null) { // Skip all empty trees
preds[k] += _scoreTree.scoreTree(_compressed_trees[itree], row, _nclasses, false, _domains);
}
itree++;
}
}
}
// note that _ntree_group = _treekeys.length
// ntrees_per_group = _treeKeys[0].length
public String[] getDecisionPathNames() {
int classTrees = 0;
for (int i = 0; i < _ntrees_per_group; ++i) {
int itree = treeIndex(0, i);
if (_compressed_trees[itree] != null) classTrees++;
}
final int outputcols = _ntree_groups * classTrees;
final String[] names = new String[outputcols];
for (int c = 0; c < _ntrees_per_group; c++) {
for (int tidx = 0; tidx < _ntree_groups; tidx++) {
int itree = treeIndex(tidx, c);
if (_compressed_trees[itree] != null) {
names[itree] = "T" + (tidx + 1) + ".C" + (c + 1);
}
}
}
return names;
}
public static class LeafNodeAssignments {
public String[] _paths;
public int[] _nodeIds;
}
public LeafNodeAssignments getLeafNodeAssignments(final double[] row) {
LeafNodeAssignments assignments = new LeafNodeAssignments();
assignments._paths = new String[_compressed_trees.length];
if (_mojo_version >= 1.3 && _compressed_trees_aux != null) { // enable only for compatible MOJOs
assignments._nodeIds = new int[_compressed_trees_aux.length];
}
traceDecisions(row, assignments._paths, assignments._nodeIds);
return assignments;
}
public String[] getDecisionPath(final double[] row) {
String[] paths = new String[_compressed_trees.length];
traceDecisions(row, paths, null);
return paths;
}
private void traceDecisions(final double[] row, String[] paths, int[] nodeIds) {
if (_mojo_version < 1.2) {
throw new IllegalArgumentException("You can only obtain decision tree path with mojo versions 1.2 or higher");
}
for (int j = 0; j < _ntree_groups; j++) {
for (int i = 0; i < _ntrees_per_group; i++) {
int itree = treeIndex(j, i);
double d = scoreTree(_compressed_trees[itree], row, _nclasses, true, _domains);
if (paths != null)
paths[itree] = SharedTreeMojoModel.getDecisionPath(d);
if (nodeIds != null) {
assert _mojo_version >= 1.3;
nodeIds[itree] = SharedTreeMojoModel.getLeafNodeId(d, _compressed_trees_aux[itree]);
}
}
}
}
/**
* Locates a tree in the array of compressed trees.
* @param groupIndex index of the tree in a class-group of trees
* @param classIndex index of the class
* @return index of the tree in _compressed_trees.
*/
final int treeIndex(int groupIndex, int classIndex) {
return classIndex * _ntree_groups + groupIndex;
}
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
/////////////////////////////////////////////////////
/**
* SET IN STONE FOR MOJO VERSION "1.00" - DO NOT CHANGE
* @param tree
* @param row
* @param nclasses
* @param computeLeafAssignment
* @return
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree0(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null; // Lazily set on hitting first group test
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) return ab.get4f();
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3_1(tree, ab);
}
}
double d = row[colId];
if (Double.isNaN(d)? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains0((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 16: ab.skip(nclasses < 256? 1 : 2); break; // Small leaf
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment && level < 64) bitsRight |= 1 << level;
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
bitsRight |= 1 << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
/**
* SET IN STONE FOR MOJO VERSION "1.10" - DO NOT CHANGE
* @param tree
* @param row
* @param nclasses
* @param computeLeafAssignment
* @return
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree1(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null;
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) return ab.get4f();
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3_1(tree, ab);
}
}
double d = row[colId];
if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) )
? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 16: ab.skip(nclasses < 256? 1 : 2); break; // Small leaf
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment && level < 64) bitsRight |= 1 << level;
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
bitsRight |= 1 << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
@Override
public boolean calibrateClassProbabilities(double[] preds) {
if (_calib_glm_beta == null)
return false;
assert _nclasses == 2; // only supported for binomial classification
assert preds.length == _nclasses + 1;
double p = GLM_logitInv((preds[1] * _calib_glm_beta[0]) + _calib_glm_beta[1]);
preds[1] = 1 - p;
preds[2] = p;
return true;
}
}
|
package edu.upc.eetac.dsa.beeter.dao;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Enumeration;
import java.util.Properties;
import java.util.PropertyResourceBundle;
import java.util.ResourceBundle;
/**
* Created by Guillermo on 09/03/2016.
*/
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Database {
private static Database instance = null;
private DataSource ds;
private Database() {
PropertyResourceBundle prb = (PropertyResourceBundle) ResourceBundle.getBundle("hikari");
Enumeration<String> keys = prb.getKeys();
Properties properties = new Properties();
while(keys.hasMoreElements()){
String key = keys.nextElement();
properties.setProperty(key, prb.getString(key));
}
//HikariConfig config = new HikariConfig(Database.class.getClassLoader().getResource("hikari.properties").getFile());
HikariConfig config = new HikariConfig(properties);
ds = new HikariDataSource(config);
}
private final static Database getInstance() {
if (instance == null)
instance = new Database();
return instance;
}
public final static Connection getConnection() throws SQLException {
return getInstance().ds.getConnection();
}
}
|
/**
* Copyright 2016 Nikita Koksharov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.redisson.spring.cache;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentHashMap;
import org.redisson.api.RMap;
import org.redisson.api.RMapCache;
import org.redisson.api.RedissonClient;
import org.redisson.client.codec.Codec;
import org.springframework.beans.factory.BeanDefinitionStoreException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.context.ResourceLoaderAware;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
/**
* A {@link org.springframework.cache.CacheManager} implementation
* backed by Redisson instance.
*
* @author Nikita Koksharov
*
*/
@SuppressWarnings("unchecked")
public class RedissonSpringCacheManager implements CacheManager, ResourceLoaderAware, InitializingBean {
ResourceLoader resourceLoader;
private boolean dynamic = true;
private boolean allowNullValues = true;
Codec codec;
RedissonClient redisson;
Map<String, CacheConfig> configMap = new ConcurrentHashMap<String, CacheConfig>();
ConcurrentMap<String, Cache> instanceMap = new ConcurrentHashMap<String, Cache>();
String configLocation;
/**
* Creates CacheManager supplied by Redisson instance
*
* @param redisson object
*/
public RedissonSpringCacheManager(RedissonClient redisson) {
this(redisson, (String)null, null);
}
/**
* Creates CacheManager supplied by Redisson instance and
* Cache config mapped by Cache name
*
* @param redisson object
* @param config object
*/
public RedissonSpringCacheManager(RedissonClient redisson, Map<String, ? extends CacheConfig> config) {
this(redisson, config, null);
}
/**
* Creates CacheManager supplied by Redisson instance, Codec instance
* and Cache config mapped by Cache name.
* <p>
* Each Cache instance share one Codec instance.
*
* @param redisson object
* @param config object
* @param codec object
*/
public RedissonSpringCacheManager(RedissonClient redisson, Map<String, ? extends CacheConfig> config, Codec codec) {
this.redisson = redisson;
this.configMap = (Map<String, CacheConfig>) config;
this.codec = codec;
}
/**
* Creates CacheManager supplied by Redisson instance
* and Cache config mapped by Cache name.
* <p>
* Loads the config file from the class path, interpreting plain paths as class path resource names
* that include the package path (e.g. "mypackage/myresource.txt").
*
* @param redisson object
* @param configLocation path
*/
public RedissonSpringCacheManager(RedissonClient redisson, String configLocation) {
this(redisson, configLocation, null);
}
/**
* Creates CacheManager supplied by Redisson instance, Codec instance
* and Config location path.
* <p>
* Each Cache instance share one Codec instance.
* <p>
* Loads the config file from the class path, interpreting plain paths as class path resource names
* that include the package path (e.g. "mypackage/myresource.txt").
*
* @param redisson object
* @param configLocation path
* @param codec object
*/
public RedissonSpringCacheManager(RedissonClient redisson, String configLocation, Codec codec) {
this.redisson = redisson;
this.configLocation = configLocation;
this.codec = codec;
}
/**
* Defines possibility of storing {@code null} values.
* <p>
* Default is <code>true</code>
*
* @param allowNullValues - stores if <code>true</code>
*/
public void setAllowNullValues(boolean allowNullValues) {
this.allowNullValues = allowNullValues;
}
/**
* Defines 'fixed' cache names.
* A new cache instance will not be created in dynamic for non-defined names.
* <p>
* `null` parameter setups dynamic mode
*
* @param names of caches
*/
public void setCacheNames(Collection<String> names) {
if (names != null) {
for (String name : names) {
getCache(name);
}
dynamic = false;
} else {
dynamic = true;
}
}
/**
* Set cache config location
*
* @param configLocation object
*/
public void setConfigLocation(String configLocation) {
this.configLocation = configLocation;
}
/**
* Set cache config mapped by cache name
*
* @param config object
*/
public void setConfig(Map<String, ? extends CacheConfig> config) {
this.configMap = (Map<String, CacheConfig>) config;
}
/**
* Set Redisson instance
*
* @param redisson instance
*/
public void setRedisson(RedissonClient redisson) {
this.redisson = redisson;
}
/**
* Set Codec instance shared between all Cache instances
*
* @param codec object
*/
public void setCodec(Codec codec) {
this.codec = codec;
}
protected CacheConfig createDefaultConfig() {
return new CacheConfig();
}
@Override
public Cache getCache(String name) {
Cache cache = instanceMap.get(name);
if (cache != null) {
return cache;
}
if (!dynamic) {
return cache;
}
CacheConfig config = configMap.get(name);
if (config == null) {
config = createDefaultConfig();
configMap.put(name, config);
}
if (config.getMaxIdleTime() == 0 && config.getTTL() == 0 && config.getMaxSize() == 0) {
return createMap(name, config);
}
return createMapCache(name, config);
}
private Cache createMap(String name, CacheConfig config) {
RMap<Object, Object> map = getMap(name, config);
Cache cache = new RedissonCache(map, allowNullValues);
Cache oldCache = instanceMap.putIfAbsent(name, cache);
if (oldCache != null) {
cache = oldCache;
}
return cache;
}
protected RMap<Object, Object> getMap(String name, CacheConfig config) {
if (codec != null) {
return redisson.getMap(name, codec);
}
return redisson.getMap(name);
}
private Cache createMapCache(String name, CacheConfig config) {
RMapCache<Object, Object> map = getMapCache(name, config);
Cache cache = new RedissonCache(map, config, allowNullValues);
Cache oldCache = instanceMap.putIfAbsent(name, cache);
if (oldCache != null) {
cache = oldCache;
} else {
map.setMaxSize(config.getMaxSize());
}
return cache;
}
protected RMapCache<Object, Object> getMapCache(String name, CacheConfig config) {
if (codec != null) {
return redisson.getMapCache(name, codec);
}
return redisson.getMapCache(name);
}
@Override
public Collection<String> getCacheNames() {
return Collections.unmodifiableSet(configMap.keySet());
}
@Override
public void setResourceLoader(ResourceLoader resourceLoader) {
this.resourceLoader = resourceLoader;
}
@Override
public void afterPropertiesSet() throws Exception {
if (configLocation == null) {
return;
}
Resource resource = resourceLoader.getResource(configLocation);
try {
this.configMap = (Map<String, CacheConfig>) CacheConfig.fromJSON(resource.getInputStream());
} catch (IOException e) {
// try to read yaml
try {
this.configMap = (Map<String, CacheConfig>) CacheConfig.fromYAML(resource.getInputStream());
} catch (IOException e1) {
throw new BeanDefinitionStoreException(
"Could not parse cache configuration at [" + configLocation + "]", e1);
}
}
}
}
|
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jaxp.library;
import java.net.SocketPermission;
import org.testng.ITestContext;
/**
* This policy can access network.
*/
public class NetAccessPolicy extends BasePolicy {
@Override
public void onStart(ITestContext arg0) {
// suppose to only run othervm mode
if (isRunWithSecurityManager()) {
JAXPPolicyManager policyManager = JAXPPolicyManager.getJAXPPolicyManager(true);
policyManager.addPermission(new SocketPermission("openjdk.java.net:80", "connect,resolve"));
policyManager.addPermission(new SocketPermission("www.w3.org:80", "connect,resolve"));
}
}
}
|
package io.choerodon.file.api.controller.vo;
import java.util.Date;
import io.swagger.annotations.ApiModelProperty;
public class DevopsCdJobRecordDTO {
private Long id;
private String name;
private Long jobId;
private Long stageRecordId;
private String type;
private String status;
private String triggerType;
private String triggerValue;
private Long projectId;
private String metadata;
@ApiModelProperty("是否会签")
private Integer countersigned;
@ApiModelProperty("任务顺序")
private Long sequence;
private Date startedDate;
private Date finishedDate;
private Long durationSeconds;
@ApiModelProperty("主机部署 制品库详情")
private String deployMetadata;
private Long deployInfoId;
private Long commandId;
private Long apiTestTaskRecordId;
@ApiModelProperty("外部卡点任务回调认证token")
private String callbackToken;
@ApiModelProperty("流水线记录id")
private Long pipelineRecordId;
@ApiModelProperty("阶段名称")
private String stageName;
@ApiModelProperty("流水线名称")
private String pipelineName;
private Long pipelineId;
@ApiModelProperty("ci 和 cd 关联关系id")
private Long devopsPipelineRecordRelId;
@ApiModelProperty("日志信息")
private String log;
public String getLog() {
return log;
}
public void setLog(String log) {
this.log = log;
}
public Long getPipelineRecordId() {
return pipelineRecordId;
}
public DevopsCdJobRecordDTO setPipelineRecordId(Long pipelineRecordId) {
this.pipelineRecordId = pipelineRecordId;
return this;
}
public String getStageName() {
return stageName;
}
public DevopsCdJobRecordDTO setStageName(String stageName) {
this.stageName = stageName;
return this;
}
public String getPipelineName() {
return pipelineName;
}
public void setPipelineName(String pipelineName) {
this.pipelineName = pipelineName;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Long getStageRecordId() {
return stageRecordId;
}
public void setStageRecordId(Long stageRecordId) {
this.stageRecordId = stageRecordId;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getTriggerType() {
return triggerType;
}
public void setTriggerType(String triggerType) {
this.triggerType = triggerType;
}
public String getTriggerValue() {
return triggerValue;
}
public void setTriggerValue(String triggerValue) {
this.triggerValue = triggerValue;
}
public Long getProjectId() {
return projectId;
}
public void setProjectId(Long projectId) {
this.projectId = projectId;
}
public String getMetadata() {
return metadata;
}
public void setMetadata(String metadata) {
this.metadata = metadata;
}
public Integer getCountersigned() {
return countersigned;
}
public void setCountersigned(Integer countersigned) {
this.countersigned = countersigned;
}
public Long getSequence() {
return sequence;
}
public void setSequence(Long sequence) {
this.sequence = sequence;
}
public Date getStartedDate() {
return startedDate;
}
public void setStartedDate(Date startedDate) {
this.startedDate = startedDate;
}
public Date getFinishedDate() {
return finishedDate;
}
public void setFinishedDate(Date finishedDate) {
this.finishedDate = finishedDate;
}
public String getDeployMetadata() {
return deployMetadata;
}
public void setDeployMetadata(String deployMetadata) {
this.deployMetadata = deployMetadata;
}
public Long getDurationSeconds() {
return durationSeconds;
}
public void setDurationSeconds(Long durationSeconds) {
this.durationSeconds = durationSeconds;
}
public Long getJobId() {
return jobId;
}
public void setJobId(Long jobId) {
this.jobId = jobId;
}
public Long getDeployInfoId() {
return deployInfoId;
}
public void setDeployInfoId(Long deployInfoId) {
this.deployInfoId = deployInfoId;
}
public Long getCommandId() {
return commandId;
}
public void setCommandId(Long commandId) {
this.commandId = commandId;
}
public Long getPipelineId() {
return pipelineId;
}
public DevopsCdJobRecordDTO setPipelineId(Long pipelineId) {
this.pipelineId = pipelineId;
return this;
}
public Long getDevopsPipelineRecordRelId() {
return devopsPipelineRecordRelId;
}
public DevopsCdJobRecordDTO setDevopsPipelineRecordRelId(Long devopsPipelineRecordRelId) {
this.devopsPipelineRecordRelId = devopsPipelineRecordRelId;
return this;
}
public Long getApiTestTaskRecordId() {
return apiTestTaskRecordId;
}
public void setApiTestTaskRecordId(Long apiTestTaskRecordId) {
this.apiTestTaskRecordId = apiTestTaskRecordId;
}
public String getCallbackToken() {
return callbackToken;
}
public void setCallbackToken(String callbackToken) {
this.callbackToken = callbackToken;
}
@Override
public String toString() {
return "DevopsCdJobRecordDTO{" +
"id=" + id +
", name='" + name + '\'' +
", jobId=" + jobId +
", stageRecordId=" + stageRecordId +
", type='" + type + '\'' +
", status='" + status + '\'' +
", triggerType='" + triggerType + '\'' +
", triggerValue='" + triggerValue + '\'' +
", projectId=" + projectId +
", metadata='" + metadata + '\'' +
", countersigned=" + countersigned +
", sequence=" + sequence +
", startedDate=" + startedDate +
", finishedDate=" + finishedDate +
", durationSeconds=" + durationSeconds +
", deployMetadata='" + deployMetadata + '\'' +
", deployInfoId=" + deployInfoId +
", commandId=" + commandId +
", apiTestTaskRecordId=" + apiTestTaskRecordId +
", callbackToken='" + callbackToken + '\'' +
", pipelineRecordId=" + pipelineRecordId +
", stageName='" + stageName + '\'' +
", pipelineName='" + pipelineName + '\'' +
", pipelineId=" + pipelineId +
", devopsPipelineRecordRelId=" + devopsPipelineRecordRelId +
", log='" + log + '\'' +
'}';
}
}
|
package com.jivesoftware.os.filer.io.map;
import java.util.Arrays;
/**
*
* @author jonathan
*/
public class SkipListMapContext {
public final MapContext mapContext;
byte maxHeight;
SkipListComparator keyComparator;
int headIndex;
byte[] headKey;
public SkipListMapContext(MapContext mapContext,
byte maxHeight,
int headIndex,
byte[] headKey,
SkipListComparator valueComparator) {
this.mapContext = mapContext;
this.maxHeight = maxHeight;
this.headIndex = headIndex;
this.headKey = headKey;
this.keyComparator = valueComparator;
}
@Override
public String toString() {
return "SkipListMapContext{"
+ "mapContext=" + mapContext
+ ", maxHeight=" + maxHeight
+ ", keyComparator=" + keyComparator
+ ", headIndex=" + headIndex
+ ", headKey=" + Arrays.toString(headKey)
+ '}';
}
}
|
// Copyright 2009 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.visualization.datasource.query;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import java.util.List;
/**
* Grouping definition for a query. Grouping is defined as a list of column IDs
* to group by.
*
* @author Yoav G.
* @author Yonatan B.Y.
* @author Liron L.
*/
public class QueryGroup {
/**
* The list of group-by columns.
*/
private List<AbstractColumn> columns;
/**
* Constructs a query group with empty lists.
*/
public QueryGroup() {
columns = Lists.newArrayList();
}
/**
* Add a column to group by.
*
* @param column
* The column to add.
*/
public void addColumn(AbstractColumn column) {
columns.add(column);
}
/**
* Returns the list of group-by IDs. This list is immutable.
*
* @return The list of group-by IDs. This list is immutable.
*/
public List<String> getColumnIds() {
List<String> columnIds = Lists.newArrayList();
for (AbstractColumn col : columns) {
columnIds.add(col.getId());
}
return ImmutableList.copyOf(columnIds);
}
/**
* Returns a list of all simple columns' IDs in this group.
*
* @return A list of all simple columns' IDs in this group.
*/
public List<String> getSimpleColumnIds() {
List<String> columnIds = Lists.newArrayList();
for (AbstractColumn col : columns) {
columnIds.addAll(col.getAllSimpleColumnIds());
}
return columnIds;
}
/**
* Returns the list of group-by columns. This list is immutable.
*
* @return The list of group-by columns. This list is immutable.
*/
public List<AbstractColumn> getColumns() {
return ImmutableList.copyOf(columns);
}
/**
* Returns the list of simple columns included in the group-by section.
*
* @return The list of simple columns included in the group-by section.
*/
public List<SimpleColumn> getSimpleColumns() {
List<SimpleColumn> simpleColumns = Lists.newArrayList();
for (AbstractColumn col : columns) {
simpleColumns.addAll(col.getAllSimpleColumns());
}
return simpleColumns;
}
/**
* Returns the list of scalar function columns included in the group-by
* section.
*
* @return The list of scalar function columns included in the group-by
* section
*/
public List<ScalarFunctionColumn> getScalarFunctionColumns() {
List<ScalarFunctionColumn> scalarFunctionColumns = Lists.newArrayList();
for (AbstractColumn col : columns) {
scalarFunctionColumns.addAll(col.getAllScalarFunctionColumns());
}
return scalarFunctionColumns;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((columns == null) ? 0 : columns.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
QueryGroup other = (QueryGroup) obj;
if (columns == null) {
if (other.columns != null) {
return false;
}
} else if (!columns.equals(other.columns)) {
return false;
}
return true;
}
/**
* Returns a string that when fed to the query parser would produce an equal
* QueryGroup. The string is returned without the GROUP BY keywords.
*
* @return The query string.
*/
public String toQueryString() {
return Query.columnListToQueryString(columns);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.interpreter.launcher;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.zeppelin.conf.ZeppelinConfiguration;
import org.apache.zeppelin.interpreter.remote.RemoteInterpreterProcess;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/**
* Start interpreter in yarn container.
*/
public class YarnRemoteInterpreterProcess extends RemoteInterpreterProcess {
private static Logger LOGGER = LoggerFactory.getLogger(YarnRemoteInterpreterProcess.class);
private String host;
private int port = -1;
private ZeppelinConfiguration zConf;
private final InterpreterLaunchContext launchContext;
private final Properties properties;
private final Map<String, String> envs;
private AtomicBoolean isYarnAppRunning = new AtomicBoolean(false);
private String errorMessage;
/************** Hadoop related **************************/
private Configuration hadoopConf;
private FileSystem fs;
private FileSystem localFs;
private YarnClient yarnClient;
private ApplicationId appId;
private Path stagingDir;
// App files are world-wide readable and owner writable -> rw-r--r--
private static final FsPermission APP_FILE_PERMISSION =
FsPermission.createImmutable(Short.parseShort("644", 8));
public YarnRemoteInterpreterProcess(
InterpreterLaunchContext launchContext,
Properties properties,
Map<String, String> envs,
int connectTimeout,
int connectionPoolSize) {
super(connectTimeout, connectionPoolSize, launchContext.getIntpEventServerHost(), launchContext.getIntpEventServerPort());
this.zConf = ZeppelinConfiguration.create();
this.launchContext = launchContext;
this.properties = properties;
this.envs = envs;
yarnClient = YarnClient.createYarnClient();
this.hadoopConf = new YarnConfiguration();
// Add core-site.xml and yarn-site.xml. This is for integration test where using MiniHadoopCluster.
if (properties.containsKey("HADOOP_CONF_DIR") &&
!org.apache.commons.lang3.StringUtils.isBlank(properties.getProperty("HADOOP_CONF_DIR"))) {
File hadoopConfDir = new File(properties.getProperty("HADOOP_CONF_DIR"));
if (hadoopConfDir.exists() && hadoopConfDir.isDirectory()) {
File coreSite = new File(hadoopConfDir, "core-site.xml");
try {
this.hadoopConf.addResource(coreSite.toURI().toURL());
} catch (MalformedURLException e) {
LOGGER.warn("Fail to add core-site.xml: " + coreSite.getAbsolutePath(), e);
}
File yarnSite = new File(hadoopConfDir, "yarn-site.xml");
try {
this.hadoopConf.addResource(yarnSite.toURI().toURL());
} catch (MalformedURLException e) {
LOGGER.warn("Fail to add yarn-site.xml: " + yarnSite.getAbsolutePath(), e);
}
} else {
throw new RuntimeException("HADOOP_CONF_DIR: " + hadoopConfDir.getAbsolutePath() +
" doesn't exist or is not a directory");
}
}
yarnClient.init(this.hadoopConf);
yarnClient.start();
try {
this.fs = FileSystem.get(hadoopConf);
this.localFs = FileSystem.getLocal(hadoopConf);
} catch (IOException e) {
throw new RuntimeException("Fail to create FileSystem", e);
}
}
@Override
public void processStarted(int port, String host) {
this.port = port;
this.host = host;
}
@Override
public String getErrorMessage() {
return this.errorMessage;
}
@Override
public String getInterpreterGroupId() {
return launchContext.getInterpreterGroupId();
}
@Override
public String getInterpreterSettingName() {
return launchContext.getInterpreterSettingName();
}
@Override
public void start(String userName) throws IOException {
try {
LOGGER.info("Submitting zeppelin-interpreter app to yarn");
final YarnClientApplication yarnApplication = yarnClient.createApplication();
final GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();
this.appId = appResponse.getApplicationId();
ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
appContext = createApplicationSubmissionContext(appContext);
yarnClient.submitApplication(appContext);
long start = System.currentTimeMillis();
ApplicationReport appReport = getApplicationReport(appId);
while (appReport.getYarnApplicationState() != YarnApplicationState.FAILED &&
appReport.getYarnApplicationState() != YarnApplicationState.FINISHED &&
appReport.getYarnApplicationState() != YarnApplicationState.KILLED &&
appReport.getYarnApplicationState() != YarnApplicationState.RUNNING) {
LOGGER.info("Wait for zeppelin interpreter yarn app to be started");
Thread.sleep(2000);
if ((System.currentTimeMillis() - start) > getConnectTimeout()) {
yarnClient.killApplication(this.appId);
throw new IOException("Launching zeppelin interpreter in yarn is time out, kill it now");
}
appReport = getApplicationReport(appId);
}
if (appReport.getYarnApplicationState() != YarnApplicationState.RUNNING) {
this.errorMessage = appReport.getDiagnostics();
throw new Exception("Failed to submit application to YARN"
+ ", applicationId=" + appId
+ ", diagnostics=" + appReport.getDiagnostics());
}
isYarnAppRunning.set(true);
} catch (Exception e) {
LOGGER.error("Fail to launch yarn interpreter process", e);
throw new IOException(e);
} finally {
if (stagingDir != null) {
this.fs.delete(stagingDir, true);
}
}
}
private ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException {
ApplicationReport report = yarnClient.getApplicationReport(appId);
if (report.getYarnApplicationState() == null) {
// The state can be null when the ResourceManager does not know about the app but the YARN
// application history server has an incomplete entry for it. Treat this scenario as if the
// application does not exist, since the final app status cannot be determined. This also
// matches the behavior for this scenario if the history server was not configured.
throw new ApplicationNotFoundException("YARN reports no state for application "
+ appId);
}
return report;
}
private ApplicationSubmissionContext createApplicationSubmissionContext(
ApplicationSubmissionContext appContext) throws Exception {
setResources(appContext);
setPriority(appContext);
setQueue(appContext);
appContext.setApplicationId(appId);
setApplicationName(appContext);
appContext.setApplicationType("ZEPPELIN INTERPRETER");
appContext.setMaxAppAttempts(1);
ContainerLaunchContext amContainer = setUpAMLaunchContext();
appContext.setAMContainerSpec(amContainer);
appContext.setCancelTokensWhenComplete(true);
return appContext;
}
private ContainerLaunchContext setUpAMLaunchContext() throws IOException {
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
// Set the resources to localize
this.stagingDir = new Path(fs.getHomeDirectory() + "/.zeppelinStaging", appId.toString());
Map<String, LocalResource> localResources = new HashMap<>();
File interpreterZip = createInterpreterZip();
Path srcPath = localFs.makeQualified(new Path(interpreterZip.toURI()));
Path destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "zeppelin");
FileUtils.forceDelete(interpreterZip);
// TODO(zjffdu) Should not add interpreter specific logic here.
if (launchContext.getInterpreterSettingGroup().equals("flink")) {
File flinkZip = createFlinkZip();
srcPath = localFs.makeQualified(new Path(flinkZip.toURI()));
destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "flink");
FileUtils.forceDelete(flinkZip);
String hiveConfDir = launchContext.getProperties().getProperty("HIVE_CONF_DIR");
if (!org.apache.commons.lang3.StringUtils.isBlank(hiveConfDir)) {
File hiveConfZipFile = createHiveConfZip(new File(hiveConfDir));
srcPath = localFs.makeQualified(new Path(hiveConfZipFile.toURI()));
destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "hive_conf");
}
}
amContainer.setLocalResources(localResources);
// Setup the command to run the AM
List<String> vargs = new ArrayList<>();
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/bin/interpreter.sh");
vargs.add("-d");
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/interpreter/"
+ launchContext.getInterpreterSettingGroup());
vargs.add("-c");
vargs.add(launchContext.getIntpEventServerHost());
vargs.add("-p");
vargs.add(launchContext.getIntpEventServerPort() + "");
vargs.add("-r");
vargs.add(zConf.getInterpreterPortRange() + "");
vargs.add("-i");
vargs.add(launchContext.getInterpreterGroupId());
vargs.add("-l");
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/" +
ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_LOCALREPO.getStringValue()
+ "/" + launchContext.getInterpreterSettingName());
vargs.add("-g");
vargs.add(launchContext.getInterpreterSettingName());
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
File.separator + ApplicationConstants.STDOUT);
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
File.separator + ApplicationConstants.STDERR);
// Setup ContainerLaunchContext for AM container
amContainer.setCommands(vargs);
// pass the interpreter ENV to yarn container and also add hadoop jars to CLASSPATH
populateHadoopClasspath(this.envs);
if (this.launchContext.getInterpreterSettingGroup().equals("flink")) {
// Update the flink related env because the all these are different in yarn container
this.envs.put("FLINK_HOME", ApplicationConstants.Environment.PWD.$() + "/flink");
this.envs.put("FLINK_CONF_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/conf");
this.envs.put("FLINK_LIB_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/lib");
this.envs.put("FLINK_PLUGINS_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/plugins");
this.envs.put("HIVE_CONF_DIR", ApplicationConstants.Environment.PWD.$() + "/hive_conf");
}
// set -Xmx
int memory = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memory", "1024"));
this.envs.put("ZEPPELIN_INTP_MEM", "-Xmx" + memory + "m");
amContainer.setEnvironment(this.envs);
return amContainer;
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private void populateHadoopClasspath(Map<String, String> envs) {
List<String> yarnClassPath = Lists.newArrayList(getYarnAppClasspath());
List<String> mrClassPath = Lists.newArrayList(getMRAppClasspath());
yarnClassPath.addAll(mrClassPath);
LOGGER.info("Adding hadoop classpath: " + org.apache.commons.lang3.StringUtils.join(yarnClassPath, ":"));
for (String path : yarnClassPath) {
String newValue = path;
if (envs.containsKey(ApplicationConstants.Environment.CLASSPATH.name())) {
newValue = envs.get(ApplicationConstants.Environment.CLASSPATH.name()) +
ApplicationConstants.CLASS_PATH_SEPARATOR + newValue;
}
envs.put(ApplicationConstants.Environment.CLASSPATH.name(), newValue);
}
// set HADOOP_MAPRED_HOME explicitly, otherwise it won't work for hadoop3
// see https://stackoverflow.com/questions/50719585/unable-to-run-mapreduce-wordcount
this.envs.put("HADOOP_MAPRED_HOME", "${HADOOP_HOME}");
}
private String[] getYarnAppClasspath() {
String[] classpaths = hadoopConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
if (classpaths == null || classpaths.length == 0) {
return getDefaultYarnApplicationClasspath();
} else {
return classpaths;
}
}
private String[] getMRAppClasspath() {
String[] classpaths = hadoopConf.getStrings("mapreduce.application.classpath");
if (classpaths == null || classpaths.length == 0) {
return getDefaultMRApplicationClasspath();
} else {
return classpaths;
}
}
private String[] getDefaultYarnApplicationClasspath() {
return YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH;
}
private String[] getDefaultMRApplicationClasspath() {
return StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH);
}
private void setResources(ApplicationSubmissionContext appContext) {
int memory = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memory", "1024"));
int memoryOverHead = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memoryOverhead", "384"));
if (memoryOverHead < memory * 0.1) {
memoryOverHead = 384;
}
int cores = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.cores", "1"));
final Resource resource = Resource.newInstance(memory + memoryOverHead, cores);
appContext.setResource(resource);
}
private void setPriority(ApplicationSubmissionContext appContext) {
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(1);
appContext.setPriority(pri);
}
private void setQueue(ApplicationSubmissionContext appContext) {
String queue = properties.getProperty("zeppelin.interpreter.yarn.queue", "default");
appContext.setQueue(queue);
}
private void setApplicationName(ApplicationSubmissionContext appContext) {
appContext.setApplicationName("Zeppelin Interpreter " + launchContext.getInterpreterGroupId());
}
/**
* @param zos
* @param srcFile
* @param parentDirectoryName
* @throws IOException
*/
private void addFileToZipStream(ZipOutputStream zos,
File srcFile,
String parentDirectoryName) throws IOException {
if (srcFile == null || !srcFile.exists()) {
return;
}
String zipEntryName = srcFile.getName();
if (parentDirectoryName != null && !parentDirectoryName.isEmpty()) {
zipEntryName = parentDirectoryName + "/" + srcFile.getName();
}
if (srcFile.isDirectory()) {
for (File file : srcFile.listFiles()) {
addFileToZipStream(zos, file, zipEntryName);
}
} else {
zos.putNextEntry(new ZipEntry(zipEntryName));
Files.copy(srcFile, zos);
zos.closeEntry();
}
}
/**
*
* Create zip file to interpreter.
* The contents are all the stuff under ZEPPELIN_HOME/interpreter/{interpreter_name}
* @return
* @throws IOException
*/
private File createInterpreterZip() throws IOException {
File interpreterArchive = File.createTempFile("zeppelin_interpreter_", ".zip", Files.createTempDir());
ZipOutputStream interpreterZipStream = new ZipOutputStream(new FileOutputStream(interpreterArchive));
interpreterZipStream.setLevel(0);
String zeppelinHomeEnv = System.getenv("ZEPPELIN_HOME");
if (org.apache.commons.lang3.StringUtils.isBlank(zeppelinHomeEnv)) {
throw new IOException("ZEPPELIN_HOME is not specified");
}
File zeppelinHome = new File(zeppelinHomeEnv);
File binDir = new File(zeppelinHome, "bin");
addFileToZipStream(interpreterZipStream, binDir, null);
File confDir = new File(zeppelinHome, "conf");
addFileToZipStream(interpreterZipStream, confDir, null);
File interpreterDir = new File(zeppelinHome, "interpreter/" + launchContext.getInterpreterSettingGroup());
addFileToZipStream(interpreterZipStream, interpreterDir, "interpreter");
File localRepoDir = new File(zConf.getInterpreterLocalRepoPath() + "/"
+ launchContext.getInterpreterSettingName());
if (localRepoDir.exists() && localRepoDir.isDirectory()) {
LOGGER.debug("Adding localRepoDir {} to interpreter zip: ", localRepoDir.getAbsolutePath());
addFileToZipStream(interpreterZipStream, localRepoDir, "local-repo");
}
// add zeppelin-interpreter-shaded jar
File[] interpreterShadedFiles = new File(zeppelinHome, "interpreter").listFiles(
file -> file.getName().startsWith("zeppelin-interpreter-shaded")
&& file.getName().endsWith(".jar"));
if (interpreterShadedFiles.length == 0) {
throw new IOException("No zeppelin-interpreter-shaded jar found under " +
zeppelinHome.getAbsolutePath() + "/interpreter");
}
if (interpreterShadedFiles.length > 1) {
throw new IOException("More than 1 zeppelin-interpreter-shaded jars found under "
+ zeppelinHome.getAbsolutePath() + "/interpreter");
}
addFileToZipStream(interpreterZipStream, interpreterShadedFiles[0], "interpreter");
interpreterZipStream.flush();
interpreterZipStream.close();
return interpreterArchive;
}
private File createFlinkZip() throws IOException {
File flinkArchive = File.createTempFile("flink_", ".zip", Files.createTempDir());
ZipOutputStream flinkZipStream = new ZipOutputStream(new FileOutputStream(flinkArchive));
flinkZipStream.setLevel(0);
String flinkHomeEnv = envs.get("FLINK_HOME");
File flinkHome = new File(flinkHomeEnv);
if (!flinkHome.exists() || !flinkHome.isDirectory()) {
throw new IOException("FLINK_HOME " + flinkHome.getAbsolutePath() +
" doesn't exist or is not a directory.");
}
for (File file : flinkHome.listFiles()) {
addFileToZipStream(flinkZipStream, file, null);
}
flinkZipStream.flush();
flinkZipStream.close();
return flinkArchive;
}
private File createHiveConfZip(File hiveConfDir) throws IOException {
File hiveConfArchive = File.createTempFile("hive_conf", ".zip", Files.createTempDir());
ZipOutputStream hiveConfZipStream = new ZipOutputStream(new FileOutputStream(hiveConfArchive));
hiveConfZipStream.setLevel(0);
if (!hiveConfDir.exists()) {
throw new IOException("HIVE_CONF_DIR " + hiveConfDir.getAbsolutePath() + " doesn't exist");
}
for (File file : hiveConfDir.listFiles()) {
addFileToZipStream(hiveConfZipStream, file, null);
}
hiveConfZipStream.flush();
hiveConfZipStream.close();
return hiveConfArchive;
}
private Path copyFileToRemote(
Path destDir,
Path srcPath,
Short replication) throws IOException {
FileSystem destFs = destDir.getFileSystem(hadoopConf);
FileSystem srcFs = srcPath.getFileSystem(hadoopConf);
Path destPath = new Path(destDir, srcPath.getName());
LOGGER.info("Uploading resource " + srcPath + " to " + destPath);
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf);
destFs.setReplication(destPath, replication);
destFs.setPermission(destPath, APP_FILE_PERMISSION);
return destPath;
}
private void addResource(
FileSystem fs,
Path destPath,
Map<String, LocalResource> localResources,
LocalResourceType resourceType,
String link) throws IOException {
FileStatus destStatus = fs.getFileStatus(destPath);
LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
amJarRsrc.setType(resourceType);
amJarRsrc.setVisibility(LocalResourceVisibility.PUBLIC);
amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(destPath));
amJarRsrc.setTimestamp(destStatus.getModificationTime());
amJarRsrc.setSize(destStatus.getLen());
localResources.put(link, amJarRsrc);
}
@Override
public void stop() {
if (isRunning()) {
LOGGER.info("Kill interpreter process");
try {
callRemoteFunction(client -> {
client.shutdown();
return null;
});
} catch (Exception e) {
LOGGER.warn("ignore the exception when shutting down", e);
}
// Shutdown connection
shutdown();
}
yarnClient.stop();
LOGGER.info("Remote process terminated");
}
@Override
public String getHost() {
return this.host;
}
@Override
public int getPort() {
return this.port;
}
@Override
public boolean isRunning() {
return isYarnAppRunning.get();
}
}
|
package org.springframework.beans.factory.config;
import java.lang.reflect.InvocationTargetException;
import org.springframework.beans.TypeConverter;
import org.springframework.beans.factory.BeanClassLoaderAware;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.support.ArgumentConvertingMethodInvoker;
import org.springframework.lang.Nullable;
import org.springframework.util.ClassUtils;
/**
* Simple method invoker bean: just invoking a target method, not expecting a result
* to expose to the container (in contrast to {@link MethodInvokingFactoryBean}).
*
* This invoker supports any kind of target method. A static method may be specified
* by setting the {@link #setTargetMethod targetMethod} property to a String representing
* the static method name, with {@link #setTargetClass targetClass} specifying the Class
* that the static method is defined on. Alternatively, a target instance method may be
* specified, by setting the {@link #setTargetObject targetObject} property as the target
* object, and the {@link #setTargetMethod targetMethod} property as the name of the
* method to call on that target object. Arguments for the method invocation may be
* specified by setting the {@link #setArguments arguments} property.
*
* This class depends on {@link #afterPropertiesSet()} being called once
* all properties have been set, as per the InitializingBean contract.
*
* An example (in an XML based bean factory definition) of a bean definition
* which uses this class to call a static initialization method:
*
* <pre class="code">
* <bean id="myObject" class="org.springframework.beans.factory.config.MethodInvokingBean">
* <property name="staticMethod" value="com.whatever.MyClass.init"/>
* </bean></pre>
*
* An example of calling an instance method to start some server bean:
*
* <pre class="code">
* <bean id="myStarter" class="org.springframework.beans.factory.config.MethodInvokingBean">
* <property name="targetObject" ref="myServer"/>
* <property name="targetMethod" value="start"/>
* </bean></pre>
* @since 4.0.3
* @see MethodInvokingFactoryBean
* @see org.springframework.util.MethodInvoker
*/
public class MethodInvokingBean extends ArgumentConvertingMethodInvoker implements BeanClassLoaderAware, BeanFactoryAware, InitializingBean {
@Nullable
private ClassLoader beanClassLoader = ClassUtils.getDefaultClassLoader();
@Nullable
private ConfigurableBeanFactory beanFactory;
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.beanClassLoader = classLoader;
}
@Override
protected Class<?> resolveClassName(String className) throws ClassNotFoundException {
return ClassUtils.forName(className, this.beanClassLoader);
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
if (beanFactory instanceof ConfigurableBeanFactory) {
this.beanFactory = (ConfigurableBeanFactory) beanFactory;
}
}
/**
* Obtain the TypeConverter from the BeanFactory that this bean runs in,
* if possible.
* @see ConfigurableBeanFactory#getTypeConverter()
*/
@Override
protected TypeConverter getDefaultTypeConverter() {
if (this.beanFactory != null) {
return this.beanFactory.getTypeConverter();
}else {
return super.getDefaultTypeConverter();
}
}
@Override
public void afterPropertiesSet() throws Exception {
prepare();
invokeWithTargetException();
}
/**
* Perform the invocation and convert InvocationTargetException
* into the underlying target exception.
*/
@Nullable
protected Object invokeWithTargetException() throws Exception {
try {
return invoke();
}catch (InvocationTargetException ex) {
if (ex.getTargetException() instanceof Exception) throw (Exception) ex.getTargetException();
if (ex.getTargetException() instanceof Error) throw (Error) ex.getTargetException();
throw ex;
}
}
}
|
package bitwise.appservice.requests;
import bitwise.apps.BaseAppFactory;
import bitwise.apps.BaseAppHandle;
import bitwise.appservice.AppService;
import bitwise.appservice.AppServiceRequest;
import bitwise.engine.service.RequestContext;
public final class StartApp<H extends BaseAppHandle<?, ?>> extends AppServiceRequest<StartAppRequester> {
private final BaseAppFactory<H> factory;
private H handle = null;
public StartApp(AppService in_service, StartAppRequester in_requester, BaseAppFactory<H> in_factory) {
super(in_service, in_requester);
factory = in_factory;
}
public BaseAppFactory<H> getAppFactory() {
return factory;
}
public H getHandle() {
return handle;
}
@Override
protected void onServeRequest(RequestContext ctx) throws InterruptedException {
handle = getService().startApp(getRequester(), factory);
}
@Override
protected void onEpilogueRequest(RequestContext ctx) throws InterruptedException {
getRequester().notifyRequestComplete(this);
}
}
|
package ro.ase.acs.classes;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import ro.ase.acs.interfaces.Taxable;
//Car is Serializable because it extends Vehicle
//which is Serializable
public final class Car extends Vehicle implements Taxable, Comparable<Car> {
private static final long serialVersionUID = 1L;
private String color;
private int capacity;
public Car() {
super();
color = "black";
capacity = 50;
}
public Car(String name, int speed, String color, int capacity) {
super(name, speed);
this.color = color;
this.capacity = capacity;
}
public String getColor() {
return color;
}
public void setColor(String color) {
this.color = color;
}
public int getCapacity() {
return capacity;
}
public void setCapacity(int capacity) {
this.capacity = capacity;
}
@Override
public Object clone() throws CloneNotSupportedException {
Car copy = (Car)super.clone();
copy.color = color;
copy.capacity = capacity;
return copy;
}
//The toString method is called in order to
//convert to object into a String
//for example when we want to display it to the console
//
//String concatenation is not the best approach
//so we used StringBuilder instead
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Car [color=");
builder.append(color);
builder.append(", capacity=");
builder.append(capacity);
builder.append(", name=");
builder.append(getName());
builder.append(", speed=");
builder.append(getSpeed());
builder.append("]");
return builder.toString();
}
@Override
public float computeTax() {
float tax = 0;
if(capacity < 2000) {
tax = (float)capacity / 1000 * 50;
}
else {
tax = (float)capacity / 1000 * 100;
}
return (tax > MIN_TAX) ? tax : MIN_TAX;
}
@Override
public final void move() {
System.out.println("The car is moving with " + getSpeed() +
" km/h");
}
//compareTo method from the Comparable interface
//is used by TreeSet or TreeMap collections
//in order to insert the elements/keys
@Override
public int compareTo(Car o) {
if(capacity < o.capacity) {
return -1;
}
else if (capacity == o.capacity) {
return 0;
}
else {
return 1;
}
}
//every time we override hashCode we should override equals too
//and vice-versa
//for two objects for which equals return true, the hashCode
//should be the same (the opposite is not true)
@Override
public boolean equals(Object obj) {
if(obj instanceof Car) {
Car o = (Car)obj;
return getName().equals(o.getName()) && getSpeed() == o.getSpeed() &&
color.equals(o.color) && capacity == o.capacity;
}
return false;
}
//usually we use prime numbers in order to generate unique integer values
//the probability to have collisions is lower in this manner
@Override
public int hashCode() {
return (31 * getName().hashCode() + getSpeed()) * 31 * color.hashCode() + capacity;
}
//binary serialization
public void serialize() {
FileOutputStream fileOutputStream = null;
ObjectOutputStream outputStream = null;
try {
fileOutputStream =
new FileOutputStream("object.bin");
if(fileOutputStream != null) {
outputStream =
new ObjectOutputStream(fileOutputStream);
outputStream.writeObject(this);
}
} catch(FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
finally {
if(outputStream != null) {
try {
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
//binary deserialization
public static Car deserialize() throws IOException, ClassNotFoundException {
FileInputStream fileInputStream =
new FileInputStream("object.bin");
ObjectInputStream objectInputStream =
new ObjectInputStream(fileInputStream);
Car c = (Car)objectInputStream.readObject();
objectInputStream.close();
return c;
}
}
|
copyright ibm corporation rights reserved program accompanying materials terms eclipse license accompanies distribution http eclipse org legal epl html contributors ibm corporation initial api implementation annotations enum to enum toenum
|
/*
* Copyright 2015-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.rest.resources;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import org.onlab.util.ItemNotFoundException;
import org.onosproject.app.ApplicationService;
import org.onosproject.core.ApplicationId;
import org.onosproject.net.Device;
import org.onosproject.net.DeviceId;
import org.onosproject.net.device.DeviceService;
import org.onosproject.net.flow.FlowEntry;
import org.onosproject.net.flow.FlowRule;
import org.onosproject.net.flow.FlowRuleService;
import org.onosproject.rest.AbstractWebResource;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.StreamSupport;
import static org.onlab.util.Tools.nullIsIllegal;
import static org.onlab.util.Tools.nullIsNotFound;
import static org.onlab.util.Tools.readTreeFromStream;
/**
* Query and program flow rules.
*/
@Path("flows")
public class FlowsWebResource extends AbstractWebResource {
@Context
private UriInfo uriInfo;
private static final String DEVICE_NOT_FOUND = "Device is not found";
private static final String FLOW_NOT_FOUND = "Flow is not found";
private static final String APP_ID_NOT_FOUND = "Application Id is not found";
private static final String FLOW_ARRAY_REQUIRED = "Flows array was not specified";
private static final String FLOWS = "flows";
private static final String DEVICE_ID = "deviceId";
private static final String FLOW_ID = "flowId";
private final FlowRuleService service = get(FlowRuleService.class);
private final ObjectNode root = mapper().createObjectNode();
private final ArrayNode flowsNode = root.putArray(FLOWS);
/**
* Gets all flow entries. Returns array of all flow rules in the system.
*
* @return 200 OK with a collection of flows
* @onos.rsModel FlowEntries
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response getFlows() {
final Iterable<Device> devices = get(DeviceService.class).getDevices();
for (final Device device : devices) {
final Iterable<FlowEntry> flowEntries = service.getFlowEntries(device.id());
if (flowEntries != null) {
for (final FlowEntry entry : flowEntries) {
flowsNode.add(codec(FlowEntry.class).encode(entry, this));
}
}
}
return ok(root).build();
}
/**
* Creates new flow rules. Creates and installs a new flow rules.<br>
* Flow rule criteria and instruction description:
* https://wiki.onosproject.org/display/ONOS/Flow+Rules
*
* @param appId application id
* @param stream flow rules JSON
* @return status of the request - CREATED if the JSON is correct,
* BAD_REQUEST if the JSON is invalid
* @onos.rsModel FlowsBatchPost
*/
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response createFlows(@QueryParam("appId") String appId, InputStream stream) {
try {
ObjectNode jsonTree = readTreeFromStream(mapper(), stream);
ArrayNode flowsArray = nullIsIllegal((ArrayNode) jsonTree.get(FLOWS),
FLOW_ARRAY_REQUIRED);
if (appId != null) {
flowsArray.forEach(flowJson -> ((ObjectNode) flowJson).put("appId", appId));
}
List<FlowRule> rules = codec(FlowRule.class).decode(flowsArray, this);
service.applyFlowRules(rules.toArray(new FlowRule[rules.size()]));
rules.forEach(flowRule -> {
ObjectNode flowNode = mapper().createObjectNode();
flowNode.put(DEVICE_ID, flowRule.deviceId().toString())
.put(FLOW_ID, Long.toString(flowRule.id().value()));
flowsNode.add(flowNode);
});
} catch (IOException ex) {
throw new IllegalArgumentException(ex);
}
return Response.ok(root).build();
}
/**
* Gets flow entries of a device. Returns array of all flow rules for the
* specified device.
*
* @param deviceId device identifier
* @return 200 OK with a collection of flows of given device
* @onos.rsModel FlowEntries
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
// TODO: we need to add "/device" suffix to the path to differentiate with appId
@Path("{deviceId}")
public Response getFlowByDeviceId(@PathParam("deviceId") String deviceId) {
final Iterable<FlowEntry> flowEntries =
service.getFlowEntries(DeviceId.deviceId(deviceId));
if (flowEntries == null || !flowEntries.iterator().hasNext()) {
throw new ItemNotFoundException(DEVICE_NOT_FOUND);
}
for (final FlowEntry entry : flowEntries) {
flowsNode.add(codec(FlowEntry.class).encode(entry, this));
}
return ok(root).build();
}
/**
* Gets flow rules. Returns the flow entry specified by the device id and
* flow rule id.
*
* @param deviceId device identifier
* @param flowId flow rule identifier
* @return 200 OK with a collection of flows of given device and flow
* @onos.rsModel FlowEntries
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("{deviceId}/{flowId}")
public Response getFlowByDeviceIdAndFlowId(@PathParam("deviceId") String deviceId,
@PathParam("flowId") long flowId) {
final Iterable<FlowEntry> flowEntries =
service.getFlowEntries(DeviceId.deviceId(deviceId));
if (flowEntries == null || !flowEntries.iterator().hasNext()) {
throw new ItemNotFoundException(DEVICE_NOT_FOUND);
}
for (final FlowEntry entry : flowEntries) {
if (entry.id().value() == flowId) {
flowsNode.add(codec(FlowEntry.class).encode(entry, this));
}
}
return ok(root).build();
}
/**
* Gets flow rules generated by an application.
* Returns the flow rule specified by the application id.
*
* @param appId application identifier
* @return 200 OK with a collection of flows of given application id
* @onos.rsModel FlowRules
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("application/{appId}")
public Response getFlowByAppId(@PathParam("appId") String appId) {
final ApplicationService appService = get(ApplicationService.class);
final ApplicationId idInstant = nullIsNotFound(appService.getId(appId), APP_ID_NOT_FOUND);
final Iterable<FlowEntry> flowEntries = service.getFlowEntriesById(idInstant);
flowEntries.forEach(flow -> flowsNode.add(codec(FlowEntry.class).encode(flow, this)));
return ok(root).build();
}
/**
* Removes flow rules by application ID.
* Removes a collection of flow rules generated by the given application.
*
* @param appId application identifier
* @return 204 NO CONTENT
*/
@DELETE
@Produces(MediaType.APPLICATION_JSON)
@Path("application/{appId}")
public Response removeFlowByAppId(@PathParam("appId") String appId) {
final ApplicationService appService = get(ApplicationService.class);
final ApplicationId idInstant = nullIsNotFound(appService.getId(appId), APP_ID_NOT_FOUND);
service.removeFlowRulesById(idInstant);
return Response.noContent().build();
}
/**
* Creates new flow rule. Creates and installs a new flow rule for the
* specified device. <br>
* Flow rule criteria and instruction description:
* https://wiki.onosproject.org/display/ONOS/Flow+Rules
*
* @param deviceId device identifier
* @param appId application identifier
* @param stream flow rule JSON
* @return status of the request - CREATED if the JSON is correct,
* BAD_REQUEST if the JSON is invalid
* @onos.rsModel FlowsPost
*/
@POST
@Path("{deviceId}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response createFlow(@PathParam("deviceId") String deviceId,
@QueryParam("appId") String appId,
InputStream stream) {
try {
ObjectNode jsonTree = readTreeFromStream(mapper(), stream);
JsonNode specifiedDeviceId = jsonTree.get("deviceId");
if (specifiedDeviceId != null &&
!specifiedDeviceId.asText().equals(deviceId)) {
throw new IllegalArgumentException(
"Invalid deviceId in flow creation request");
}
jsonTree.put("deviceId", deviceId);
if (appId != null) {
jsonTree.put("appId", appId);
}
FlowRule rule = codec(FlowRule.class).decode(jsonTree, this);
service.applyFlowRules(rule);
UriBuilder locationBuilder = uriInfo.getBaseUriBuilder()
.path("flows")
.path(deviceId)
.path(Long.toString(rule.id().value()));
return Response
.created(locationBuilder.build())
.build();
} catch (IOException ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Removes flow rule. Removes the specified flow rule.
*
* @param deviceId device identifier
* @param flowId flow rule identifier
* @return 204 NO CONTENT
*/
@DELETE
@Path("{deviceId}/{flowId}")
public Response deleteFlowByDeviceIdAndFlowId(@PathParam("deviceId") String deviceId,
@PathParam("flowId") long flowId) {
final Iterable<FlowEntry> flowEntries =
service.getFlowEntries(DeviceId.deviceId(deviceId));
if (!flowEntries.iterator().hasNext()) {
throw new ItemNotFoundException(DEVICE_NOT_FOUND);
}
StreamSupport.stream(flowEntries.spliterator(), false)
.filter(entry -> entry.id().value() == flowId)
.forEach(service::removeFlowRules);
return Response.noContent().build();
}
/**
* Removes a batch of flow rules.
*
* @param stream stream for posted JSON
* @return 204 NO CONTENT
*/
@DELETE
public Response deleteFlows(InputStream stream) {
ListMultimap<DeviceId, Long> deviceMap = ArrayListMultimap.create();
List<FlowEntry> rulesToRemove = new ArrayList<>();
try {
ObjectNode jsonTree = readTreeFromStream(mapper(), stream);
JsonNode jsonFlows = jsonTree.get("flows");
jsonFlows.forEach(node -> {
DeviceId deviceId =
DeviceId.deviceId(
nullIsNotFound(node.get(DEVICE_ID),
DEVICE_NOT_FOUND).asText());
long flowId = nullIsNotFound(node.get(FLOW_ID),
FLOW_NOT_FOUND).asLong();
deviceMap.put(deviceId, flowId);
});
} catch (IOException ex) {
throw new IllegalArgumentException(ex);
}
deviceMap.keySet().forEach(deviceId -> {
List<Long> flowIds = deviceMap.get(deviceId);
Iterable<FlowEntry> entries = service.getFlowEntries(deviceId);
flowIds.forEach(flowId -> {
StreamSupport.stream(entries.spliterator(), false)
.filter(entry -> flowId == entry.id().value())
.forEach(rulesToRemove::add);
});
});
service.removeFlowRules(rulesToRemove.toArray(new FlowEntry[0]));
return Response.noContent().build();
}
}
|
package com.kinetica.kafka;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.gpudb.GPUdb;
import com.gpudb.GPUdbException;
import com.gpudb.protocol.ShowTableResponse;
import com.kinetica.kafka.data.utils.SchemaRegistryUtils;
import com.kinetica.kafka.utils.ConnectorConfigHelper;
import org.apache.kafka.common.KafkaException;
public class SchemaTestSerializerDataPumpSinkTest {
private final static String TOPIC = "SchemaVersion";
private final static String COLLECTION = "TEST";
private final static int batch_size = 10;
private final static int TASK_NUM = 1;
private HashMap<String, String> tableSizeProps;
private GPUdb gpudb;
@Before
public void setup() throws Exception {
this.gpudb = ConnectorConfigHelper.getGPUdb();
this.tableSizeProps = new HashMap<String, String>();
this.tableSizeProps.put("get_sizes", "true");
String[] tables = new String[6];
for (int i = 0; i < 6; i++) {
tables[i] = "test" + (i+1) + TOPIC;
}
ConnectorConfigHelper.tableCleanUp(this.gpudb, ConnectorConfigHelper.addCollection(tables, COLLECTION));
}
@After
public void cleanup() throws GPUdbException {
String[] tables = new String[6];
for (int i = 0; i < 6; i++) {
tables[i] = "test" + (i+1) + TOPIC;
}
ConnectorConfigHelper.tableCleanUp(this.gpudb, ConnectorConfigHelper.addCollection(tables, COLLECTION));
this.gpudb = null;
}
// Kafka connector supports schema evolution,
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test
public void testAvroSchemaEvolutionSchema1to2() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test1";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {1, 2});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size*2);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(1).fields().size();
assertEquals(columnCount, controlCount);
}
// Kafka connector supports schema evolution,
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test
public void testAvroSchemaEvolutionSchema1to3() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test2";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {1, 2});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size*2);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(1).fields().size();
assertEquals(columnCount, controlCount);
}
// Kafka connector supports schema evolution,
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test
public void testAvroSchemaEvolutionSchema3to1() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test3";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {3, 1});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size*2);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(3).fields().size();
assertEquals(columnCount, controlCount);
}
// Kafka connector supports schema evolution,
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test
public void testAvroSchemaEvolutionSchema1to4() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test4";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {1, 4});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size*2);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(1).fields().size();
assertEquals(columnCount, controlCount);
}
// Kafka connector supports schema evolution,
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test
public void testAvroSchemaEvolutionSchema4to3() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test5";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {4, 3});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size*2);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(4).fields().size();
assertEquals(columnCount, controlCount);
}
// Kafka connector supports schema evolution, but due to wrong schema version error, no evolution would happen
// does not allow to add columns to Kinetica table or convert columns for missing data to nullable
// Table prefix used
@Test (expected = KafkaException.class)
public void testAvroSchemaEvolutionSchema4to0() throws Exception {
// Connector is configured to have an additional table prefix
String prefix = "test6";
Map<String, String> config = ConnectorConfigHelper.getParameterizedConfig(TOPIC, COLLECTION, prefix, "", true, true, true, false, false, true);
// Configure and start connector and sinktask
KineticaSinkConnector connector = ConnectorConfigHelper.startConnector(config);
KineticaSinkTask task = startSinkTask(connector);
// run data generation
runSinkTask(task, TOPIC, new int[] {4, 0});
Thread.sleep(1000);
boolean tableExists = gpudb.hasTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), null).getTableExists();
assertTrue(tableExists);
// expect table size to match number of Kafka messages generated/ingested
ShowTableResponse response = gpudb.showTable(ConnectorConfigHelper.addCollection(prefix+TOPIC, COLLECTION), tableSizeProps);
int size = response.getFullSizes().get(0).intValue();
assertEquals(size, batch_size);
Map<String, List<String>> props = response.getProperties().get(0);
int columnCount = props.size();
int controlCount = SchemaRegistryUtils.getKafkaSchemaByVersion(4).fields().size();
assertEquals(columnCount, controlCount);
}
/**
* Helper function - starts SinkTask
* @param connector KineticaSinkConnector
* @return KineticaSinkTask
*/
private KineticaSinkTask startSinkTask(KineticaSinkConnector connector) {
// retrieve taskConfigs from connector
List<Map<String, String>> taskConfigs = connector.taskConfigs(TASK_NUM);
Map<String, String> taskConfig = taskConfigs.get(0);
taskConfig.put(SinkTask.TOPICS_CONFIG, TOPIC);
// create a new task off taskConfig
KineticaSinkTask task = new KineticaSinkTask();
task.start(taskConfig);
return task;
}
/**
* Helper function using AvroSerializerDataPump test utility
* @param task KineticaSinkTask to run
* @param topic Kafka topic name
* @throws Exception
*/
private void runSinkTask(KineticaSinkTask task, String topic, int[] versions) throws Exception {
List<SinkRecord> sinkRecords;
for (int version : versions) {
sinkRecords = SchemaTestSerializerDataPump.mockAvroSerialized(topic, version, batch_size);
task.put(sinkRecords);
}
Thread.sleep(1000);
task.flush(null);
task.stop();
}
}
|
package com.example.missj.goaduch.com.example.missj.control;
import android.app.Activity;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ListView;
import android.widget.RelativeLayout;
import com.example.missj.goaduch.com.example.missj.fragment.FrameBase;
import com.example.missj.goaduch.R;
import com.example.missj.goaduch.com.example.missj.Adapter.AdapterSlideMenu;
import java.util.ArrayList;
import java.util.List;
/**
* Created by miss.j on 2017/10/28.
*/
public class SliderMenuView {
private Activity mActivity;
private List mMenuList;
private Boolean mIsClosed; //是否关闭
private RelativeLayout layBottomBox;
private OnSlideMenuListenner mOnSlideMenuListenner;
public interface OnSlideMenuListenner
{
public abstract void onSlidMenuItemClick(View pView, SliderMenuItem pSlideMenuItemClick);
}
private void open()
{
RelativeLayout.LayoutParams _LayoutParams = new RelativeLayout.LayoutParams(RelativeLayout.LayoutParams.MATCH_PARENT,
RelativeLayout.LayoutParams.MATCH_PARENT);
_LayoutParams.addRule(RelativeLayout.BELOW, R.id.IncludeTitle);
layBottomBox.setLayoutParams(_LayoutParams);
mIsClosed = false;
}
private void close()
{
RelativeLayout.LayoutParams _LayoutParams = new RelativeLayout.LayoutParams(RelativeLayout.LayoutParams.MATCH_PARENT,
200);
_LayoutParams.addRule(RelativeLayout.ALIGN_PARENT_BOTTOM);
layBottomBox.setLayoutParams(_LayoutParams);
mIsClosed = true;
}
public void Toggle()
{
if(mIsClosed)
open();
else
close();
}
public void Add(SliderMenuItem pSliderMenuItem )
{
mMenuList.add(pSliderMenuItem );
}
public void BindList()
{
AdapterSlideMenu _AdapterSlideMenu = new AdapterSlideMenu(mActivity, mMenuList);
ListView _ListView = mActivity.findViewById(R.id.lvSlideList);
_ListView.setAdapter(_AdapterSlideMenu);
_ListView.setOnItemClickListener(new onSlideMenuItemClickListener());
}
private void OnSlideMenuView()
{
}
public void InitVarible()
{
mMenuList = new ArrayList();
mIsClosed = true;
}
public void InitView()
{
layBottomBox = (RelativeLayout)mActivity.findViewById(R.id.IncludeBottom);
}
public void InitListeners()
{
layBottomBox.setOnClickListener(new onSlideMenuClick());
}
private class onSlideMenuClick implements View.OnClickListener
{
@Override
public void onClick(View view) {
Toggle();}
}
private class onSlideMenuItemClickListener implements AdapterView.OnItemClickListener
{
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int position, long l) {
SliderMenuItem _SliderMenuItem = (SliderMenuItem)adapterView.getItemAtPosition(position);
mOnSlideMenuListenner.onSlidMenuItemClick(view, _SliderMenuItem);
}
}
public SliderMenuView(Activity pActivity, FrameBase mFrameBase) {
mActivity = pActivity;
mOnSlideMenuListenner = (OnSlideMenuListenner)mFrameBase;
InitVarible();
InitView();
InitListeners();
}
}
|
package com.stackroute.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Document(collection = "User")
//@Data
//@NoArgsConstructor
//@AllArgsConstructor
public class User {
private String username;
private String password;
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public String getGender() {
return gender;
}
public void setGender(String gender) {
this.gender = gender;
}
public String getCompany() {
return company;
}
public void setCompany(String company) {
this.company = company;
}
public String getCourse() {
return course;
}
public void setCourse(String course) {
this.course = course;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public String getInterest() {
return interest;
}
public void setInterest(String interest) {
this.interest = interest;
}
public String getDiscipline() {
return discipline;
}
public void setDiscipline(String discipline) {
this.discipline = discipline;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getEmailId() {
return emailId;
}
public void setEmailId(String emailId) {
this.emailId = emailId;
}
public String getCollege() {
return college;
}
public void setCollege(String college) {
this.college = college;
}
private int age;
private String gender;
private String company;
private String course;
private String firstName;
private String lastName;
private String interest;
private String discipline;
@Id
private String id;
private String emailId;
private String college;
// private String token;
}
|
/*
* #%L
* ImageJ software for multidimensional image processing and analysis.
* %%
* Copyright (C) 2009 - 2016 Board of Regents of the University of
* Wisconsin-Madison, Broad Institute of MIT and Harvard, and Max Planck
* Institute of Molecular Cell Biology and Genetics.
* %%
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* #L%
*/
package net.imagej.ui.swing.updater;
import java.awt.Color;
import java.awt.Cursor;
import java.awt.Point;
import java.awt.Rectangle;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.awt.event.MouseMotionAdapter;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.SortedMap;
import java.util.TreeMap;
import javax.swing.JTextPane;
import javax.swing.event.UndoableEditEvent;
import javax.swing.event.UndoableEditListener;
import javax.swing.text.AttributeSet;
import javax.swing.text.BadLocationException;
import javax.swing.text.Document;
import javax.swing.text.Element;
import javax.swing.text.Position;
import javax.swing.text.SimpleAttributeSet;
import javax.swing.text.StyleConstants;
import javax.swing.text.StyledDocument;
import net.imagej.updater.FileObject;
import net.imagej.updater.FilesCollection;
import net.imagej.updater.util.UpdaterUserInterface;
/**
* TODO
*
* @author Johannes Schindelin
*/
@SuppressWarnings("serial")
public class FileDetails extends JTextPane implements UndoableEditListener {
private final static AttributeSet bold, italic, normal, title;
private final static Cursor hand, defaultCursor;
private final static String LINK_ATTRIBUTE = "URL";
SortedMap<Position, EditableRegion> editables;
Position dummySpace;
UpdaterFrame updaterFrame;
static {
italic = getStyle(Color.black, true, false, "Verdana", 12);
bold = getStyle(Color.black, false, true, "Verdana", 12);
normal = getStyle(Color.black, false, false, "Verdana", 12);
title = getStyle(Color.black, false, false, "Impact", 18);
hand = new Cursor(Cursor.HAND_CURSOR);
defaultCursor = new Cursor(Cursor.DEFAULT_CURSOR);
}
public FileDetails(final UpdaterFrame updaterFrame) {
this.updaterFrame = updaterFrame;
addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(final MouseEvent e) {
final String url = getLinkAt(e.getPoint());
try {
if (url != null) UpdaterUserInterface.get().openURL(url);
}
catch (final Exception exception) {
updaterFrame.log.error(exception);
UpdaterUserInterface.get().error(
"Could not open " + url + ": " + exception.getMessage());
}
}
});
addMouseMotionListener(new MouseMotionAdapter() {
@Override
public void mouseMoved(final MouseEvent e) {
setCursor(e.getPoint());
}
});
reset();
getDocument().addUndoableEditListener(this);
}
public void reset() {
setEditable(false);
setText("");
final Comparator<Position> comparator = new Comparator<Position>() {
@Override
public int compare(final Position p1, final Position p2) {
return p1.getOffset() - p2.getOffset();
}
};
editables = new TreeMap<>(comparator);
dummySpace = null;
}
public void setEditableForDevelopers() {
removeDummySpace();
setEditable(true);
}
private String getLinkAt(final Point p) {
final StyledDocument document = getStyledDocument();
final Element e = document.getCharacterElement(viewToModel(p));
return (String) e.getAttributes().getAttribute(LINK_ATTRIBUTE);
}
protected void setCursor(final Point p) {
setCursor(getLinkAt(p) == null ? defaultCursor : hand);
}
private AttributeSet getLinkAttribute(final String url) {
// TODO: Verdana? Java is platform-independent, if this introduces a
// platform dependency, it needs to be thrown out, quickly!
final SimpleAttributeSet style =
getStyle(Color.blue, false, false, "Verdana", 12);
style.addAttribute(LINK_ATTRIBUTE, url);
return style;
}
public static SimpleAttributeSet getStyle(final Color color,
final boolean italic, final boolean bold, final String fontName,
final int fontSize)
{
final SimpleAttributeSet style = new SimpleAttributeSet();
StyleConstants.setForeground(style, color);
StyleConstants.setItalic(style, italic);
StyleConstants.setBold(style, bold);
StyleConstants.setFontFamily(style, fontName);
StyleConstants.setFontSize(style, fontSize);
return style;
}
public void styled(final String text, final AttributeSet set) {
final Document document = getDocument();
try {
document.insertString(document.getLength(), text, set);
}
catch (final BadLocationException e) {
updaterFrame.log.error(e);
throw new RuntimeException(e);
}
}
public void link(final String url) {
styled(url, getLinkAttribute(url));
}
public void italic(final String text) {
styled(text, italic);
}
public void bold(final String text) {
styled(text, bold);
}
public void normal(final String text) {
styled(text, normal);
}
public void title(final String text) {
styled(text, title);
}
public void description(final String description, final FileObject file) {
if (!updaterFrame.files.hasUploadableSites() &&
(description == null || description.trim().equals(""))) return;
blankLine();
bold("Description " + (file.descriptionFromPOM ? " (from pom.xml) " : "") + ":\n");
final int offset = getCaretPosition();
normal(description);
if (!file.descriptionFromPOM)
addEditableRegion(offset, "Description", file);
}
public void executable(final FileObject file) {
if (!updaterFrame.files.hasUploadableSites() && !file.executable) return;
blankLine();
bold("Executable:\n");
final int offset = getCaretPosition();
normal(file.executable ? "true" : "false");
addEditableRegion(offset, "Executable", file);
}
public void list(String label, final boolean showLinks,
final Iterable<?> items, final String delim, final FileObject file)
{
final List<Object> list = new ArrayList<>();
for (final Object object : items)
list.add(object);
if (!updaterFrame.files.hasUploadableSites() && list.size() == 0) return;
blankLine();
final String tag = label;
if (list.size() > 1 && label.endsWith("y")) label =
label.substring(0, label.length() - 1) + "ie";
bold(label + (list.size() > 1 ? "s" : "") + ":\n");
final int offset = getCaretPosition();
String delimiter = "";
for (final Object object : list) {
normal(delimiter);
delimiter = delim;
if (showLinks) link(object.toString());
else normal(object.toString());
}
addEditableRegion(offset, tag, file);
}
public void blankLine() {
final int offset = getCaretPosition();
try {
if (offset > 1 && getText(offset - 2, 2).equals("\n ")) {
normal("\n");
return;
}
}
catch (final BadLocationException e) {
updaterFrame.log.error(e);
}
normal("\n\n");
}
final String[] months = { "Zero", "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
String prettyPrintTimestamp(final long timestamp) {
final String t = "" + timestamp + "00000000";
return t.substring(6, 8) + " " +
months[Integer.parseInt(t.substring(4, 6))] + " " + t.substring(0, 4);
}
public void showFileDetails(final FileObject file) {
setCaretPosition(getDocument().getLength());
if (!getText().equals("")) blankLine();
title(file.getLocalFilename(true));
if (file.isUpdateable()) italic("\n(Update available)");
else if (file.isLocalOnly()) italic("(Local-only)");
if (file.isLocallyModified()) {
blankLine();
bold("Warning: ");
italic("This file was locally modified.");
}
blankLine();
if (file.current == null) bold("This file is no longer needed");
else {
bold("Release date:\n");
normal(prettyPrintTimestamp(file.current.timestamp));
}
description(file.getDescription(), file);
list("Author", false, file.getAuthors(), ", ", file);
if (updaterFrame.files.hasUploadableSites()) list("Platform", false, file
.getPlatforms(), ", ", file);
list("Category", false, file.getCategories(), ", ", file);
list("Link", true, file.getLinks(), "\n", file);
list("Dependency", false, file.getDependencies(), ",\n", file);
if (file.executable) executable(file);
if (file.updateSite != null &&
!file.updateSite.equals(FilesCollection.DEFAULT_UPDATE_SITE))
{
blankLine();
bold("Update site:\n");
normal(file.updateSite);
}
// scroll to top
scrollRectToVisible(new Rectangle(0, 0, 1, 1));
setCaretPosition(0);
}
class EditableRegion implements Comparable<EditableRegion> {
FileObject file;
String tag;
Position start, end;
public EditableRegion(final FileObject file, final String tag,
final Position start, final Position end)
{
this.file = file;
this.tag = tag;
this.start = start;
this.end = end;
}
@Override
public int compareTo(final EditableRegion other) {
return start.getOffset() - other.start.getOffset();
}
@Override
public String toString() {
return "EditableRegion(" + tag + ":" + start.getOffset() + "-" +
(end == null ? "null" : end.getOffset()) + ")";
}
}
void addEditableRegion(final int startOffset, final String tag,
final FileObject file)
{
final int endOffset = getCaretPosition();
try {
// make sure end position does not move further
normal(" ");
Position start, end;
start = getDocument().createPosition(startOffset - 1);
end = getDocument().createPosition(endOffset);
editables.put(start, new EditableRegion(file, tag, start, end));
removeDummySpace();
dummySpace = end;
}
catch (final BadLocationException e) {
updaterFrame.log.error(e);
}
}
void removeDummySpace() {
if (dummySpace != null) try {
getDocument().remove(dummySpace.getOffset(), 1);
dummySpace = null;
}
catch (final BadLocationException e) {
updaterFrame.log.error(e);
}
}
boolean handleEdit() {
EditableRegion editable;
try {
final int offset = getCaretPosition();
final Position current = getDocument().createPosition(offset);
final Position last = editables.headMap(current).lastKey();
editable = editables.get(last);
if (offset > editable.start.getOffset() &&
offset > editable.end.getOffset()) return false;
}
catch (final NoSuchElementException e) {
return false;
}
catch (final BadLocationException e) {
return false;
}
if (!editable.file.isUploadable(updaterFrame.files, true))
return false;
final int start = editable.start.getOffset() + 1;
final int end = editable.end.getOffset();
String text;
try {
text = getDocument().getText(start, end + 1 - start);
}
catch (final BadLocationException e) {
return false;
}
editable.file.metadataChanged = true;
if (editable.tag.equals("Description")) {
editable.file.description = text.trim();
return true;
}
else if (editable.tag.equals("Executable")) {
editable.file.executable = "true".equalsIgnoreCase(text.trim());
return true;
}
final String[] list = text.split(editable.tag.equals("Link") ? "\n" : ",");
editable.file.replaceList(editable.tag, list);
return true;
}
// Do not process key events when on bold parts of the text
// or when not developer
@Override
public void undoableEditHappened(final UndoableEditEvent e) {
if (isEditable()) {
if (!handleEdit()) e.getEdit().undo();
else updaterFrame.markUploadable();
}
}
}
|
package m5tt.com.smsimagetransfer.SMS;
/**
* Created by Mark on 13-Nov-2017.
*/
public interface OnSMSSendCompleteListener
{
void SMSSendComplete(SMSSendingTask.SMSSendingResult smsSendingResult);
}
|
package com.harmazing.openbridge.portal;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import com.harmazing.framework.authorization.IUser;
import com.harmazing.framework.common.controller.AbstractController;
import com.harmazing.framework.util.StringUtil;
import com.harmazing.framework.util.WebUtil;
import com.harmazing.openbridge.paasos.kubectl.K8RestApiUtil;
import com.harmazing.openbridge.paasos.project.dao.PaasProjectDeployMapper;
import com.harmazing.openbridge.paasos.project.dao.PaasProjectMapper;
@Controller()
@RequestMapping("/portal")
public class PortalController extends AbstractController {
@Autowired
private PaasProjectMapper paasProjectMapper;
@Autowired
private PaasProjectDeployMapper PaasProjectDeployMapper;
@RequestMapping("/*")
public String welcome(HttpServletRequest request,
HttpServletResponse response) {
String path = request.getServletPath();
String prefix = getUrlPrefix();
if (path.startsWith(prefix)) {
path = path.substring(prefix.length());
}
int end = path.indexOf(".do");
path = path.substring(0, end);
return prefix + path;
}
@RequestMapping("/overview")
public String overview(HttpServletRequest request, HttpServletResponse response) {
try {
IUser user = WebUtil.getUserByRequest(request);
if(user.isAnonymous()){
return getUrlPrefix() + "/overview";
}
Map<String, Object> params = new HashMap<String, Object>();
params.put("userId", user.getUserId());
params.put("tenantId", user.getTenantId());
int countstore= paasProjectMapper.getCountstore(params);
int count=paasProjectMapper.getCount(params);
int openbridge=count-countstore;
int countrun= PaasProjectDeployMapper.getCountrun(params);
int countstop=PaasProjectDeployMapper.getCountstop(params);
Map<String,Object> host=new HashMap<String, Object>();
Map<String,Object> Image=new HashMap<String, Object>();
if(StringUtil.isNotNull(user.getTenantId())){
try{
// int m = 1/0;
host=K8RestApiUtil.getTenantNodeStatistics(user.getTenantId());
Image=K8RestApiUtil.getTenantImageStatistics(user.getTenantId());
int hosttotal= (int) host.get("total");
int hostonline= (int) host.get("online");
int hostoutline=hosttotal-hostonline;
request.setAttribute("hostoutline", hostoutline);
}
catch(Exception e){
logger.error("k8s失败",e);
request.setAttribute("hostoutline", "-");
}
}else{
host.put("total", 0);
host.put("online", 0);
host.put("offline", 0);
host.put("unknown", 0);
}
request.setAttribute("host", host);
request.setAttribute("Image", Image);
request.setAttribute("openbridge", openbridge);
request.setAttribute("store", countstore);
request.setAttribute("Tenantcount", count);
request.setAttribute("countrun", countrun);
request.setAttribute("countstop", countstop);
request.setAttribute("TenantName",user.getTenantName());
return getUrlPrefix() + "/overview";
} catch (Exception e) {
logger.error("服务列表页面出错", e);
request.setAttribute("exception", e);
return forward(ERROR);
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.ra;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import java.util.Arrays;
import java.util.Enumeration;
/**
* A wrapper for a message
*/
public class ActiveMQRAMapMessage extends ActiveMQRAMessage implements MapMessage {
/**
* Create a new wrapper
*
* @param message the message
* @param session the session
*/
public ActiveMQRAMapMessage(final MapMessage message, final ActiveMQRASession session) {
super(message, session);
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("constructor(" + message + ", " + session + ")");
}
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public boolean getBoolean(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getBoolean(" + name + ")");
}
return ((MapMessage) message).getBoolean(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public byte getByte(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getByte(" + name + ")");
}
return ((MapMessage) message).getByte(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public byte[] getBytes(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getBytes(" + name + ")");
}
return ((MapMessage) message).getBytes(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public char getChar(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getChar(" + name + ")");
}
return ((MapMessage) message).getChar(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public double getDouble(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getDouble(" + name + ")");
}
return ((MapMessage) message).getDouble(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public float getFloat(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getFloat(" + name + ")");
}
return ((MapMessage) message).getFloat(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public int getInt(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getInt(" + name + ")");
}
return ((MapMessage) message).getInt(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public long getLong(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getLong(" + name + ")");
}
return ((MapMessage) message).getLong(name);
}
/**
* Get the map names
*
* @return The values
* @throws JMSException Thrown if an error occurs
*/
@Override
public Enumeration getMapNames() throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getMapNames()");
}
return ((MapMessage) message).getMapNames();
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public Object getObject(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getObject(" + name + ")");
}
return ((MapMessage) message).getObject(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public short getShort(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getShort(" + name + ")");
}
return ((MapMessage) message).getShort(name);
}
/**
* Get
*
* @param name The name
* @return The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public String getString(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("getString(" + name + ")");
}
return ((MapMessage) message).getString(name);
}
/**
* Does the item exist
*
* @param name The name
* @return True / false
* @throws JMSException Thrown if an error occurs
*/
@Override
public boolean itemExists(final String name) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("itemExists(" + name + ")");
}
return ((MapMessage) message).itemExists(name);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setBoolean(final String name, final boolean value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setBoolean(" + name + ", " + value + ")");
}
((MapMessage) message).setBoolean(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setByte(final String name, final byte value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setByte(" + name + ", " + value + ")");
}
((MapMessage) message).setByte(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @param offset The offset
* @param length The length
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setBytes(final String name, final byte[] value, final int offset, final int length) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setBytes(" + name + ", " + Arrays.toString(value) + ", " + offset + ", " +
length + ")");
}
((MapMessage) message).setBytes(name, value, offset, length);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setBytes(final String name, final byte[] value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setBytes(" + name + ", " + Arrays.toString(value) + ")");
}
((MapMessage) message).setBytes(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setChar(final String name, final char value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setChar(" + name + ", " + value + ")");
}
((MapMessage) message).setChar(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setDouble(final String name, final double value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setDouble(" + name + ", " + value + ")");
}
((MapMessage) message).setDouble(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setFloat(final String name, final float value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setFloat(" + name + ", " + value + ")");
}
((MapMessage) message).setFloat(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setInt(final String name, final int value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setInt(" + name + ", " + value + ")");
}
((MapMessage) message).setInt(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setLong(final String name, final long value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setLong(" + name + ", " + value + ")");
}
((MapMessage) message).setLong(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setObject(final String name, final Object value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setObject(" + name + ", " + value + ")");
}
((MapMessage) message).setObject(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setShort(final String name, final short value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setShort(" + name + ", " + value + ")");
}
((MapMessage) message).setShort(name, value);
}
/**
* Set
*
* @param name The name
* @param value The value
* @throws JMSException Thrown if an error occurs
*/
@Override
public void setString(final String name, final String value) throws JMSException {
if (ActiveMQRALogger.LOGGER.isTraceEnabled()) {
ActiveMQRALogger.LOGGER.trace("setString(" + name + ", " + value + ")");
}
((MapMessage) message).setString(name, value);
}
}
|
package com.eagleeye.event;
/**
* Triggered event
*/
public interface TriggeredEvent {
/**
* @return event type
*/
String getType();
}
|
package com.sequenceiq.cloudbreak.service.stack.flow;
import com.sequenceiq.cloudbreak.domain.Stack;
import com.sequenceiq.cloudbreak.service.StackContext;
import net.schmizz.sshj.transport.verification.HostKeyVerifier;
public class SshCheckerTaskContext extends StackContext {
private HostKeyVerifier hostKeyVerifier;
private String publicIp;
private int sshPort;
private String user;
private String sshPrivateFileLocation;
public SshCheckerTaskContext(Stack stack, HostKeyVerifier hostKeyVerifier, String publicIp, int sshPort, String user, String sshPrivateFileLocation) {
super(stack);
this.hostKeyVerifier = hostKeyVerifier;
this.publicIp = publicIp;
this.sshPort = sshPort;
this.user = user;
this.sshPrivateFileLocation = sshPrivateFileLocation;
}
public HostKeyVerifier getHostKeyVerifier() {
return hostKeyVerifier;
}
public String getPublicIp() {
return publicIp;
}
public int getSshPort() {
return sshPort;
}
public String getSshPrivateFileLocation() {
return sshPrivateFileLocation;
}
public String getUser() {
return user;
}
}
|
package leetcode500.p22_dp.plan2;
import java.util.Arrays;
public class P646_MaximumLengthOfPairChain {
public int findLongestChain(int[][] pairs) {
if (pairs == null || pairs.length == 0) return 0;
Arrays.sort(pairs, (a, b) -> (a[0] - b[0]));
int[] dp = new int[pairs.length];
Arrays.fill(dp, 1);
for (int i = 0; i < dp.length; i++) {
for (int j = 0; j < i; j++) {
dp[i] = Math.max(dp[i], pairs[i][0] > pairs[j][1] ? dp[j] + 1 : dp[j]);
}
}
return dp[pairs.length - 1];
}
}
|
package com.qingchi.base.constant;
/**
* 封禁原因类型,基于举报,人工审核,智能审核
*
* @author qinkaiyuan
* @date 2018-09-16 10:58
*/
public class ViolationReasonType {
//系统
public static final String report = "report";
//手工
public static final String manual = "manual";
//系统自动
public static final String auto = "auto";
}
|
package org.povworld.collection;
/**
* Unordered collection of elements which explicitly allows duplicates (elements
* that are equal to each other).
*
* @param <E> the element type
*
* @see Sequence
* @see Set
* @see OrderedSet
*/
public interface Bag<E> extends UnOrderedCollection<E> {
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.streampipes.storage.couchdb;
import org.apache.streampipes.storage.api.*;
import org.apache.streampipes.storage.couchdb.impl.*;
public enum CouchDbStorageManager implements INoSqlStorage {
INSTANCE;
@Override
public ICategoryStorage getCategoryStorageAPI() { return new CategoryStorageImpl(); }
@Override
public ILabelStorage getLabelStorageAPI() { return new LabelStorageImpl(); }
@Override
public IPipelineStorage getPipelineStorageAPI() {
return new PipelineStorageImpl();
}
@Override
public IPipelineElementConnectionStorage getConnectionStorageApi() {
return new ConnectionStorageImpl();
}
@Override
public IUserStorage getUserStorageAPI() {
return new UserStorage();
}
@Override
public IPipelineMonitoringDataStorage getMonitoringDataStorageApi() {
return new MonitoringDataStorageImpl();
}
@Override
public INotificationStorage getNotificationStorageApi() {
return new NotificationStorageImpl();
}
@Override
public IPipelineCategoryStorage getPipelineCategoryStorageApi() {
return new PipelineCategoryStorageImpl();
}
@Override
public IVisualizationStorage getVisualizationStorageApi() {
return new VisualizationStorageImpl();
}
@Override
public IRdfEndpointStorage getRdfEndpointStorage() {
return new RdfEndpointStorageImpl();
}
@Override
public IAssetDashboardStorage getAssetDashboardStorage() {
return new AssetDashboardStorageImpl();
}
@Override
public IDataLakeStorage getDataLakeStorage() {
return new DataLakeStorageImpl();
}
@Override
public IFileMetadataStorage getFileMetadataStorage() {
return new FileMetadataStorageImpl();
}
@Override
public IDashboardStorage getDashboardStorage() {
return new DashboardStorageImpl();
}
@Override
public IDashboardStorage getDataExplorerDashboardStorage() {
return new DataExplorerDashboardStorageImpl();
}
@Override
public IDashboardWidgetStorage getDashboardWidgetStorage() {
return new DashboardWidgetStorageImpl();
}
@Override
public IDataExplorerWidgetStorage getDataExplorerWidgetStorage() {
return new DataExplorerWidgetStorageImpl();
}
@Override
public IVisualizablePipelineStorage getVisualizablePipelineStorage() {
return new VisualizablePipelineStorageImpl();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.lealone.hansql.exec.physical.base;
/** Writer physical operator */
public interface Writer extends PhysicalOperator{
}
|
/*
* Copyright (C) 2011 Thomas Akehurst
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.tomakehurst.wiremock.common;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.Map;
import static com.github.tomakehurst.wiremock.common.Exceptions.throwUnchecked;
public final class Json {
private static final ThreadLocal<ObjectMapper> objectMapperHolder = new ThreadLocal<ObjectMapper>() {
@Override
protected ObjectMapper initialValue() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
objectMapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
return objectMapper;
}
};
private Json() {}
public static <T> T read(String json, Class<T> clazz) {
try {
ObjectMapper mapper = getObjectMapper();
return mapper.readValue(json, clazz);
} catch (IOException ioe) {
return throwUnchecked(ioe, clazz);
}
}
public static <T> String write(T object) {
try {
ObjectMapper mapper = getObjectMapper();
return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(object);
} catch (IOException ioe) {
return throwUnchecked(ioe, String.class);
}
}
public static ObjectMapper getObjectMapper() {
return objectMapperHolder.get();
}
public static byte[] toByteArray(Object object) {
try {
ObjectMapper mapper = getObjectMapper();
return mapper.writeValueAsBytes(object);
} catch (IOException ioe) {
return throwUnchecked(ioe, byte[].class);
}
}
public static JsonNode node(String json) {
return read(json, JsonNode.class);
}
public static int maxDeepSize(JsonNode one, JsonNode two) {
return Math.max(deepSize(one), deepSize(two));
}
public static int deepSize(JsonNode node) {
if (node == null) {
return 0;
}
int acc = 0;
if (node.isContainerNode()) {
for (JsonNode child : node) {
acc++;
if (child.isContainerNode()) {
acc += deepSize(child);
}
}
} else {
acc++;
}
return acc;
}
public static String prettyPrint(String json) {
ObjectMapper mapper = getObjectMapper();
try {
return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(
mapper.readValue(json, JsonNode.class)
);
} catch (IOException e) {
return throwUnchecked(e, String.class);
}
}
public static <T> T mapToObject(Map<String, Object> map, Class<T> targetClass) {
ObjectMapper mapper = getObjectMapper();
return mapper.convertValue(map, targetClass);
}
public static <T> Map<String, Object> objectToMap(T theObject) {
ObjectMapper mapper = getObjectMapper();
return mapper.convertValue(theObject, new TypeReference<Map<String, Object>>() {});
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.quicksight.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.quicksight.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* DeleteTemplateAliasResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DeleteTemplateAliasResultJsonUnmarshaller implements Unmarshaller<DeleteTemplateAliasResult, JsonUnmarshallerContext> {
public DeleteTemplateAliasResult unmarshall(JsonUnmarshallerContext context) throws Exception {
DeleteTemplateAliasResult deleteTemplateAliasResult = new DeleteTemplateAliasResult();
deleteTemplateAliasResult.setStatus(context.getHttpResponse().getStatusCode());
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return deleteTemplateAliasResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("TemplateId", targetDepth)) {
context.nextToken();
deleteTemplateAliasResult.setTemplateId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("AliasName", targetDepth)) {
context.nextToken();
deleteTemplateAliasResult.setAliasName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("Arn", targetDepth)) {
context.nextToken();
deleteTemplateAliasResult.setArn(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("RequestId", targetDepth)) {
context.nextToken();
deleteTemplateAliasResult.setRequestId(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return deleteTemplateAliasResult;
}
private static DeleteTemplateAliasResultJsonUnmarshaller instance;
public static DeleteTemplateAliasResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new DeleteTemplateAliasResultJsonUnmarshaller();
return instance;
}
}
|
/*
* Copyright 2002-2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.maxkey.authz.oauth2.provider.approval;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.maxkey.authz.oauth2.common.OAuth2AccessToken;
import org.maxkey.authz.oauth2.common.util.OAuth2Utils;
import org.maxkey.authz.oauth2.provider.AuthorizationRequest;
import org.maxkey.authz.oauth2.provider.ClientDetailsService;
import org.maxkey.authz.oauth2.provider.ClientRegistrationException;
import org.maxkey.authz.oauth2.provider.OAuth2Authentication;
import org.maxkey.authz.oauth2.provider.OAuth2Request;
import org.maxkey.authz.oauth2.provider.OAuth2RequestFactory;
import org.maxkey.authz.oauth2.provider.token.TokenStore;
import org.maxkey.domain.apps.oauth2.provider.ClientDetails;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.security.core.Authentication;
import org.springframework.util.Assert;
/**
* A user approval handler that remembers approval decisions by consulting existing tokens.
*
* @author Dave Syer
*
*/
public class TokenStoreUserApprovalHandler implements UserApprovalHandler, InitializingBean {
private static Log logger = LogFactory.getLog(TokenStoreUserApprovalHandler.class);
private String approvalParameter = OAuth2Utils.USER_OAUTH_APPROVAL;
private TokenStore tokenStore;
private ClientDetailsService clientDetailsService;
/**
* Service to load client details (optional) for auto approval checks.
*
* @param clientDetailsService a client details service
*/
public void setClientDetailsService(ClientDetailsService clientDetailsService) {
this.clientDetailsService = clientDetailsService;
}
/**
* @param approvalParameter the approvalParameter to set
*/
public void setApprovalParameter(String approvalParameter) {
this.approvalParameter = approvalParameter;
}
/**
* @param tokenStore the token store to set
*/
public void setTokenStore(TokenStore tokenStore) {
this.tokenStore = tokenStore;
}
private OAuth2RequestFactory requestFactory;
public void setRequestFactory(OAuth2RequestFactory requestFactory) {
this.requestFactory = requestFactory;
}
@Override
public void afterPropertiesSet() {
Assert.state(tokenStore != null, "TokenStore must be provided");
Assert.state(requestFactory != null, "OAuth2RequestFactory must be provided");
}
/**
* Basic implementation just requires the authorization request to be explicitly approved and the user to be
* authenticated.
*
* @param authorizationRequest The authorization request.
* @param userAuthentication the current user authentication
*
* @return Whether the specified request has been approved by the current user.
*/
@Override
public boolean isApproved(AuthorizationRequest authorizationRequest, Authentication userAuthentication) {
return authorizationRequest.isApproved();
}
@Override
public AuthorizationRequest checkForPreApproval(AuthorizationRequest authorizationRequest, Authentication userAuthentication) {
boolean approved = false;
String clientId = authorizationRequest.getClientId();
Set<String> scopes = authorizationRequest.getScope();
if (clientDetailsService!=null) {
try {
ClientDetails client = clientDetailsService.loadClientByClientId(clientId);
approved = true;
for (String scope : scopes) {
if (!client.isAutoApprove(scope)) {
approved = false;
}
}
if (approved) {
authorizationRequest.setApproved(true);
return authorizationRequest;
}
}
catch (ClientRegistrationException e) {
logger.warn("Client registration problem prevent autoapproval check for client=" + clientId);
}
}
OAuth2Request storedOAuth2Request = requestFactory.createOAuth2Request(authorizationRequest);
OAuth2Authentication authentication = new OAuth2Authentication(storedOAuth2Request, userAuthentication);
if (logger.isDebugEnabled()) {
StringBuilder builder = new StringBuilder("Looking up existing token for ");
builder.append("client_id=" + clientId);
builder.append(", scope=" + scopes);
builder.append(" and username=" + userAuthentication.getName());
logger.debug(builder.toString());
}
OAuth2AccessToken accessToken = tokenStore.getAccessToken(authentication);
logger.debug("Existing access token=" + accessToken);
if (accessToken != null && !accessToken.isExpired()) {
logger.debug("User already approved with token=" + accessToken);
// A token was already granted and is still valid, so this is already approved
approved = true;
}
else {
logger.debug("Checking explicit approval");
approved = userAuthentication.isAuthenticated() && approved;
}
authorizationRequest.setApproved(approved);
return authorizationRequest;
}
@Override
public AuthorizationRequest updateAfterApproval(AuthorizationRequest authorizationRequest, Authentication userAuthentication) {
Map<String, String> approvalParameters = authorizationRequest.getApprovalParameters();
String flag = approvalParameters.get(approvalParameter);
boolean approved = flag != null && flag.toLowerCase().equals("true");
authorizationRequest.setApproved(approved);
return authorizationRequest;
}
@Override
public Map<String, Object> getUserApprovalRequest(AuthorizationRequest authorizationRequest,
Authentication userAuthentication) {
Map<String, Object> model = new HashMap<String, Object>();
// In case of a redirect we might want the request parameters to be included
model.putAll(authorizationRequest.getRequestParameters());
return model;
}
}
|
/* Copyright 2018 Mickael Gaillard <mick.gaillard@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ros2.rcljava.node;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.ros2.rcljava.exception.NotImplementedException;
import org.ros2.rcljava.internal.message.Message;
import org.ros2.rcljava.internal.service.MessageService;
import org.ros2.rcljava.node.service.Client;
import org.ros2.rcljava.node.service.Service;
import org.ros2.rcljava.node.service.ServiceCallback;
import org.ros2.rcljava.node.topic.Publisher;
import org.ros2.rcljava.node.topic.Subscription;
import org.ros2.rcljava.node.topic.SubscriptionCallback;
import org.ros2.rcljava.qos.QoSProfile;
import org.ros2.rcljava.time.WallTimer;
import org.ros2.rcljava.time.WallTimerCallback;
/**
* This class is JVM Node of RCLJava.
* <b>Actually not implemented !!!</b>
*/
public class JavaNode extends BaseNode {
public JavaNode(final String namespace, final String defaultName, final String... args) {
super(namespace, defaultName, args);
throw new NotImplementedException();
}
/* (non-Javadoc)
* @see org.ros2.rcljava.node.Node#getTopicNamesAndTypes(boolean)
*/
@Override
public Map<String, List<String>> getTopicNamesAndTypes(final boolean noDemangle) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see org.ros2.rcljava.node.internal.NodeServices#createClient(java.lang.Class, java.lang.String, org.ros2.rcljava.qos.QoSProfile)
*/
@Override
public <T extends MessageService> Client<T> createClient(
final Class<T> serviceType,
final String serviceName,
final QoSProfile qos) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see org.ros2.rcljava.node.internal.NodeServices#createService(java.lang.Class, java.lang.String, org.ros2.rcljava.node.service.ServiceCallback, org.ros2.rcljava.qos.QoSProfile)
*/
@Override
public <T extends MessageService> Service<T> createService(
final Class<T> serviceType,
final String serviceName,
final ServiceCallback<?, ?> callback,
final QoSProfile qos) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see org.ros2.rcljava.node.internal.NodeTopics#createPublisher(java.lang.Class, java.lang.String, org.ros2.rcljava.qos.QoSProfile)
*/
@Override
public <T extends Message> Publisher<T> createPublisher(
final Class<T> messageType,
final String topicName,
final QoSProfile qos) {
// TODO Auto-generated method stub
return null;
}
/* (non-Javadoc)
* @see org.ros2.rcljava.node.internal.NodeTopics#createSubscription(java.lang.Class, java.lang.String, org.ros2.rcljava.node.topic.SubscriptionCallback, org.ros2.rcljava.qos.QoSProfile, boolean)
*/
@Override
public <T extends Message> Subscription<T> createSubscription(
final Class<T> messageType,
final String topicName,
final SubscriptionCallback<T> callback,
final QoSProfile qos,
final boolean ignoreLocalPublications) {
// TODO Auto-generated method stub
return null;
}
@Override
public List<String> getNodeNames() {
// TODO Auto-generated method stub
return null;
}
@Override
public WallTimer createWallTimer(final long period, final TimeUnit unit, final WallTimerCallback callback) {
// TODO Auto-generated method stub
return null;
}
}
|
package com.riiablo.attributes;
import java.util.Arrays;
import java.util.Iterator;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import com.riiablo.codec.excel.ItemStatCost;
import com.riiablo.logger.LogManager;
import com.riiablo.logger.Logger;
import com.riiablo.math.Fixed;
public final class StatList {
private static final Logger log = LogManager.getLogger(StatList.class);
public static StatListRef obtain() {
return new StatList().reset(1).buildList();
}
static final int MAX_LISTS = Byte.SIZE;
static final int DEFAULT_SIZE = 32;
static final int MAX_SIZE = 1 << Byte.SIZE;
/** @see #encodeFlags */
private static final int ENCODING_MASK = (1 << 3) - 1;
private static final int FLAG_PARAMS = 1 << 3;
private static final int FLAG_FIXED = 1 << 4;
private static final int FLAG_LONG = 1 << 5;
private static final int FLAG_MODIFIED = 1 << 6;
private static final long UINT_MAX_VALUE = (1L << Integer.SIZE) - 1;
private final byte[] offsets = new byte[index(MAX_LISTS)];
private final short[] ids;
private final int[] params;
private final int[] values;
private final byte[] flags;
private final int maxSize;
private int maxLists;
private int numLists;
private int tail;
private byte immutable;
private int listsMask;
private IndexIterator INDEX_ITERATOR;
private StatIterator STAT_ITERATOR;
private StatListIterator STAT_LIST_ITERATOR;
StatList() {
this(DEFAULT_SIZE);
}
StatList(int maxSize) {
assert maxSize >= 0 && maxSize <= MAX_SIZE;
this.maxSize = maxSize;
ids = new short[maxSize];
params = new int[maxSize];
values = new int[maxSize];
flags = new byte[maxSize];
}
StatList reset(final int maxLists) {
log.traceEntry("reset(maxLists: {})", maxLists);
assertMutable();
assert maxLists > 0 : "maxLists(" + maxLists + ") <= " + 0;
assert maxLists <= MAX_LISTS : "maxLists(" + maxLists + ") > MAX_LISTS(" + MAX_LISTS + ")";
this.maxLists = maxLists;
listsMask = index(maxLists) - 1;
clear();
return this;
}
StatList truncate(final int maxLists) {
log.traceEntry("truncate(maxLists: {})", maxLists);
assert maxLists > 0 : "maxLists(" + maxLists + ") <= " + 0;
assert maxLists <= this.maxLists : "maxLists(" + maxLists + ") > this.maxLists(" + this.maxLists + ")";
for (int i = maxLists, s = this.maxLists; i < s; i++) {
assertMutable(i); // TODO: replace with bitmask check
}
immutable &= (listsMask = index(maxLists) - 1);
if (tail >= index(maxLists)) {
tail = endingOffset(maxLists);
}
return this;
}
StatList freeze() {
immutable = (byte) listsMask;
return this;
}
StatList freeze(final int list) {
immutable |= index(list);
return this;
}
private boolean isMutable(final int list) {
return (immutable & index(list)) == 0;
}
private void assertMutable() {
if (!isMutable(listsMask)) {
throw new UnsupportedOperationException("Stat list has been frozen");
}
}
private void assertMutable(final int list) {
if (!isMutable(list)) {
throw new UnsupportedOperationException("Stat list(" + list + ") has been frozen");
}
}
StatList forceClear() {
immutable = 0;
return clear();
}
StatList clear() {
log.traceEntry("clear()");
assertMutable();
numLists = 0;
tail = 0;
return this;
}
StatList clear(final int list) {
log.traceEntry("clear(list: {})", list);
assertMutable(list);
setEndingOffset(list, startingOffset(list));
return this;
}
int size(final int list) {
return endingOffset(list) - startingOffset(list);
}
boolean isEmpty(final int list) {
return size(list) == 0;
}
public int numLists() {
return numLists;
}
public int maxLists() {
return maxLists;
}
int maxSize() {
return maxSize;
}
int newList() {
return newList(0);
}
int newList(final int capacity) {
log.traceEntry("newList(capacity: {})", capacity);
assertMutable(numLists);
if (numLists >= maxLists) {
throw new IndexOutOfBoundsException("Max number of lists has already been created: maxLists(" + maxLists + ")");
}
if (tail + capacity > maxSize) {
throw new IndexOutOfBoundsException("capacity(" + capacity + ") would exceed maxSize(" + maxSize + ")");
}
final int list = numLists++;
setEndingOffset(list, setStartingOffset(list, tail));
ensureCapacity(list, tail, capacity);
return list;
}
StatListRef buildList() {
return buildList(0);
}
StatListRef buildList(final int capacity) {
assertMutable(numLists + 1);
return new StatListRef(this, newList(capacity));
}
StatListRef first() {
return get(0);
}
public StatListRef get(final int list) {
if (!contains(list)) throw new IndexOutOfBoundsException("StatList does not contain list(" + list + "): numLists(" + numLists + ")");
return new StatListRef(this, list);
}
public boolean contains(final int list) {
return list >= 0 && list < numLists;
}
boolean contains(final int list, final int index) {
return contains(list) && index >= startingOffset(list) && index < endingOffset(list);
}
boolean containsAny(final int list, final short stat) {
return contains(list) && Arrays.binarySearch(ids, startingOffset(list), endingOffset(list), stat) >= 0;
}
boolean containsEncoded(final int list, final short stat, final int encodedParams) {
return contains(list) && indexOfEncoded(list, stat, encodedParams) >= 0;
}
boolean contains(final int list, final StatRef ref) {
return containsEncoded(list, ref.id(), ref.encodedParams());
}
int putEncoded(final int list, final short stat, final int encodedParams, final int encodedValues) {
final ItemStatCost.Entry entry = entry(stat);
if (log.traceEnabled()) log.tracefEntry(
"putEncoded(stat: %d (%s), encodedParams: %d (0x%3$x), encodedValues: %d (0x%4$x))",
stat, entry, encodedParams, encodedValues);
assertMutable(list);
final int encoding = entry.Encode;
if (log.warnEnabled() && !Stat.encodingSupported(encoding)) log.warn(
"stat: {} ({}) has unsupported encoding({})", stat, entry, encoding);
final int index = indexOfEncoded(list, stat, encodedParams);
if (index >= 0 && equalsEncoded(index, stat, encodedParams)) {
return setEncoded(list, index, stat, entry, encodedParams, encodedValues);
} else {
return insertEncodedAt(list, ~index, stat, entry, encodedParams, encodedValues);
}
}
int putEncoded(final int list, final short stat, final int encodedValues) {
assertSimple(stat);
return putEncoded(list, stat, 0, encodedValues);
}
int put(
final int list, final short stat,
final int param0, final int param1,
final int value0, final int value1, final int value2) {
final ItemStatCost.Entry entry = entry(stat);
if (log.traceEnabled()) log.traceEntry(
"put(list: {}, stat: {}, param0: {}, param1: {}, value0: {}, value1: {}, value2: {})",
list, stat, param0, param1, value0, value1, value2);
final int encoding = entry.Encode;
return putEncoded(list, stat,
Stat.encodeParams(encoding, param0, param1),
Stat.encodeValues(encoding, value0, value1, value2));
}
int put(final int list, final short stat, final int value) {
assertSimple(stat);
return putEncoded(list, stat, 0, Stat.encode(stat, value));
}
int put(final int list, final short stat, final long value) {
assertSimple(stat);
assert value <= UINT_MAX_VALUE : "value(" + value + ") > " + UINT_MAX_VALUE;
return putEncoded(list, stat, 0, asInt(value));
}
int put(final int list, final short stat, final float value) {
assertSimple(stat);
return putEncoded(list, stat, 0, asInt(stat, value));
}
int setEncoded(final int list, final int index, final int encodedValues) {
final short stat = ids[index];
assertSimple(stat);
assert equalsEncoded(index, stat, 0);
return setEncoded(list, index, stat, entry(stat), 0, encodedValues);
}
int set(final int list, final int index, final int value) {
final short stat = ids[index];
assertSimple(stat);
assert equalsEncoded(index, stat, 0);
return setEncoded(list, index, stat, entry(stat), 0, Stat.encode(stat, value));
}
int set(final int list, final int index, final long value) {
final short stat = ids[index];
assertSimple(stat);
assert value <= UINT_MAX_VALUE : "value(" + value + ") > " + UINT_MAX_VALUE;
return setEncoded(list, index, stat, entry(stat), 0, asInt(value));
}
int set(final int list, final int index, final float value) {
final short stat = ids[index];
assertSimple(stat);
return setEncoded(list, index, stat, entry(stat), 0, asInt(stat, value));
}
int set(final int list, final int index, final StatRef src) {
assert equalsEncoded(index, src.id(), src.encodedParams());
return setEncoded(list, index, src.id(), src.entry(), src.encodedParams(), src.encodedValues());
}
void setAll(final StatList src) {
if (log.traceEnabled()) log.traceEntry("setAll(src: {})", src);
if (maxSize < src.tail) {
throw new IndexOutOfBoundsException("maxSize(" + maxSize + ") cannot fit src.tail(" + src.tail + ")");
}
assertMutable();
clear();
System.arraycopy(src.offsets, 0, this.offsets, 0, index(src.numLists));
arraycopy(src, 0, this, 0, src.tail);
maxLists = src.maxLists;
numLists = src.numLists;
tail = src.tail;
}
void setAll(final int list, final StatListRef src) {
if (log.traceEnabled()) log.traceEntry("setAll(list: {}, src: {})", list, src);
final int srcSize = src.size();
if (maxSize < srcSize) {
throw new IndexOutOfBoundsException("maxSize(" + maxSize + ") cannot fit src.size(" + srcSize + ")");
}
assertMutable(list);
clear(list);
final int startOffset = startingOffset(list);
ensureCapacity(list, startOffset, src.size());
final StatList srcParent = src.parent();
arraycopy(srcParent, srcParent.startingOffset(src.list), this, startOffset, srcSize);
}
int addEncoded(final int list, final int index, final int encodedValues) {
assert contains(list, index);
assert encoding(index) <= 2 : "#addEncoded() unsupported for encoding(" + encoding(index) + ")";
final short stat = ids[index];
assertSimple(stat);
assert equalsEncoded(index, stat, 0);
if (log.traceEnabled()) log.tracefEntry(
"addEncoded(stat: %d (%s), encodedValues: %d (0x%3$x))",
stat, entry(index), encodedValues);
values[index] += encodedValues;
flags[index] |= FLAG_MODIFIED;
if (log.debugEnabled()) log.debug(indexDebugString(index));
return index;
}
int add(final int list, final int index, final int value) {
return addEncoded(list, index, Stat.encode(ids[index], value));
}
int add(final int list, final int index, final long value) {
return addEncoded(list, index, asInt(value));
}
int add(final int list, final int index, final float value) {
return addEncoded(list, index, asInt(ids[index], value));
}
int subEncoded(final int list, final int index, final int encodedValues) {
assert contains(list, index);
assert encoding(index) <= 2 : "#subEncoded() unsupported for encoding(" + encoding(index) + ")";
final short stat = ids[index];
assertSimple(stat);
assert equalsEncoded(index, stat, 0);
if (log.traceEnabled()) log.tracefEntry(
"subEncoded(stat: %d (%s), encodedValues: %d (0x%3$x))",
stat, entry(index), encodedValues);
values[index] -= encodedValues;
if (log.debugEnabled()) log.debug(indexDebugString(index));
return index;
}
int sub(final int list, final int index, final int value) {
return subEncoded(list, index, Stat.encode(ids[index], value));
}
int sub(final int list, final int index, final long value) {
return subEncoded(list, index, asInt(value));
}
int sub(final int list, final int index, final float value) {
return subEncoded(list, index, asInt(ids[index], value));
}
private static int asInt(final long value) {
return (int) value;
}
private static int asInt(final short stat, final float value) {
assert entry(stat).ValShift == 8 : "entry.ValShift(" + entry(stat).ValShift + ") != " + 8;
return Fixed.floatToIntBits(value, 8);
}
int asInt(final int index) {
assert Stat.numEncodedValues(encoding(index)) == 1;
return (flags[index] & FLAG_FIXED) == FLAG_FIXED
? Stat.decode(ids[index], encodedValues(index))
: encodedValues(index);
}
long asLong(final int index) {
assert Stat.numEncodedValues(encoding(index)) == 1;
return (flags[index] & FLAG_FIXED) == FLAG_FIXED
? Stat.decode(ids[index], encodedValues(index))
: encodedValues(index);
}
float asFixed(final int index) {
assert Stat.numEncodedValues(encoding(index)) == 1;
assert entry(ids[index]).ValShift == 8 : "entry.ValShift(" + entry(ids[index]).ValShift + ") != " + 8;
return Fixed.intBitsToFloat(encodedValues(index), 8);
}
String asString(final int index) {
final byte flags = this.flags[index];
return (flags & FLAG_FIXED) == 0
? (flags & FLAG_LONG) == 0
? String.valueOf(asLong(index))
: String.valueOf(asInt(index))
: String.valueOf(asFixed(index));
}
int value0(final int index) {
switch (encoding(index)) {
default: // fall-through
case 0: return values[index];
case 1: return values[index];
case 2: return values[index];
case 3: return values[index] & 0xFF;
case 4: return values[index] & 0x3;
}
}
int value1(final int index) {
switch (encoding(index)) {
default: // fall-through
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3: return (values[index] >>> 8) & 0xFF;
case 4: return (values[index] >>> 2) & 0x3FF;
}
}
int value2(final int index) {
switch (encoding(index)) {
default: // fall-through
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3: return 0;
case 4: return (values[index] >>> 12) & 0x3FF;
}
}
int param0(final int index) {
switch (encoding(index)) {
default: // fall-through
case 0: return params[index];
case 1: return params[index];
case 2: return params[index] & 0x3F;
case 3: return params[index] & 0x3F;
case 4: return params[index];
}
}
int param1(final int index) {
switch (encoding(index)) {
default: // fall-through
case 0: return 0;
case 1: return 0;
case 2: return (params[index] >>> 6) & 0x3FF;
case 3: return (params[index] >>> 6) & 0x3FF;
case 4: return 0;
}
}
String indexDebugString(final int index) {
final byte flags = this.flags[index];
final StringBuilder sb = new StringBuilder(32);
if ((flags & FLAG_MODIFIED) == FLAG_MODIFIED) {
sb.append('*');
}
sb.append(entry(index))
.append('(')
.append(ids[index])
.append(")=");
final int encoding = flags & ENCODING_MASK;
switch (Stat.numEncodedParams(encoding)) {
case 2:
sb.append(param0(index)).append(':').append(param1(index)).append(':');
break;
case 1:
sb.append(param0(index)).append(':');
break;
}
switch (Stat.numEncodedValues(encoding)) {
case 3:
sb.append(value0(index)).append(':').append(value1(index)).append(':').append(value2(index));
break;
case 2:
sb.append(value0(index)).append(':').append(value1(index));
break;
case 1:
sb.append(asString(index));
break;
}
return sb.toString();
}
String listDebugString(final int list) {
final int startIndex = startingOffset(list);
final int endIndex = endingOffset(list);
return new ToStringBuilder(this)
.append("list", list)
.append("immutable", !isMutable(list))
.append("size", endIndex - startIndex)
.append("ids", '{' + StringUtils.join(ids, ',', startIndex, endIndex) + '}')
.append("values", '{' + StringUtils.join(values, ',', startIndex, endIndex) + '}')
.append("params", '{' + StringUtils.join(params, ',', startIndex, endIndex) + '}')
.append("flags", '{' + StringUtils.join(flags, ',', startIndex, endIndex) + '}')
.build();
}
@Override
public String toString() {
return new ToStringBuilder(this)
.append("numLists", numLists)
.append("maxLists", maxLists)
.append("immutable", StringUtils.leftPad(String.valueOf(immutable), 8, '0'))
.append("tail", tail)
.append("maxSize", maxSize)
.append("offsets", '{' + StringUtils.join(offsets, ',', 0, index(numLists)) + '}')
.append("ids", '{' + StringUtils.join(ids, ',', 0, tail) + '}')
.append("values", '{' + StringUtils.join(values, ',', 0, tail) + '}')
.append("params", '{' + StringUtils.join(params, ',', 0, tail) + '}')
.append("flags", '{' + StringUtils.join(flags, ',', 0, tail) + '}')
.build();
}
boolean equalsEncoded(final int index, final short stat, final int encodedParams) {
return ids[index] == stat && params[index] == encodedParams;
}
short id(final int index) {
return ids[index];
}
ItemStatCost.Entry entry(final int index) {
return entry(ids[index]);
}
static ItemStatCost.Entry entry(final short stat) {
return Stat.entry(stat);
}
int encoding(final int index) {
return flags[index] & ENCODING_MASK;
}
boolean modified(final int index) {
return (flags[index] & FLAG_MODIFIED) == FLAG_MODIFIED;
}
void forceUnmodified(final int index) {
flags[index] &= ~FLAG_MODIFIED;
}
int encodedValues(final int index) {
return values[index];
}
int encodedParams(final int index) {
return params[index];
}
private final void assertSimple(final short stat) {
final int encoding = entry(stat).Encode;
assert Stat.numEncodedParams(encoding) == 0 && Stat.numEncodedValues(encoding) == 1
: "stat(" + stat + ") requires " + Stat.numEncodedParams(encoding) + " params and " + Stat.numEncodedValues(encoding) + " values";
}
private void arraycopy(final int srcIndex, final int dstIndex, final int length) {
arraycopy(this, srcIndex, this, dstIndex, length);
}
private static void arraycopy(
final StatList src, final int srcIndex,
final StatList dst, final int dstIndex,
final int length) {
if (length <= 0) return;
if (log.traceEnabled()) log.traceEntry(
"arraycopy(src: {}, srcIndex: {}, dst: {}, dstIndex: {}, length: {})",
src, srcIndex, dst, dstIndex, length);
System.arraycopy(src.ids, srcIndex, dst.ids, dstIndex, length);
System.arraycopy(src.params, srcIndex, dst.params, dstIndex, length);
System.arraycopy(src.values, srcIndex, dst.values, dstIndex, length);
System.arraycopy(src.flags, srcIndex, dst.flags, dstIndex, length);
}
private void ensureCapacity(final int list, final int index, final int capacity) {
log.traceEntry("ensureCapacity(list: {}, index: {}, capacity: {})", list, index, capacity);
assertMutable();
final int endOffset = endingOffset(list);
assert index <= endOffset : "index(" + index + ") > list.endOffset(" + endOffset + ")";
final int shiftLength = endOffset - index;
final int newEndOffset = endOffset + capacity;
assert newEndOffset <= maxSize : "capacity(" + capacity + ") would exceed maxSize(" + maxSize + ")";
final int nextStartOffset = (list + 1) < numLists ? startingOffset(list + 1) : maxSize;
if (shiftLength > 0 && newEndOffset <= nextStartOffset) {
arraycopy(index, index + capacity, shiftLength);
setEndingOffset(list, newEndOffset);
return;
}
final int additionalCapacity = newEndOffset - nextStartOffset;
final int copyLength = tail - index;
if (copyLength > 0) arraycopy(index, index + capacity, copyLength);
if (additionalCapacity > 0) {
tail += additionalCapacity;
final byte[] offsets = this.offsets;
for (int i = index(list + 1), s = index(numLists); i < s; i++) {
offsets[i] += additionalCapacity;
}
assertSorted();
} else {
setEndingOffset(list, newEndOffset);
}
}
private int setEncoded(
final int list,
final int index,
final short stat,
final ItemStatCost.Entry entry,
final int encodedParams,
final int encodedValue) {
if (log.traceEnabled()) log.tracefEntry(
"setEncoded(index: %d, stat: %d (%s), param: %d (0x%4$x), value: %d (0x%5$x))",
index, stat, entry, encodedParams, encodedValue);
assert isMutable(list);
ids[index] = stat;
params[index] = encodedParams;
values[index] = encodedValue;
flags[index] = encodeFlags(entry);
if (log.debugEnabled()) log.debug(indexDebugString(index));
return index;
}
private int insertEncodedAt(
final int list,
final int index,
final short stat,
final ItemStatCost.Entry entry,
final int encodedParams,
final int encodedValue) {
if (log.traceEnabled()) log.tracefEntry(
"insertEncodedAt(index: %d, stat: %d (%s), param: %d (0x%4$x), value: %d (0x%5$x))",
index, stat, entry, encodedParams, encodedValue);
assert isMutable(list);
ensureCapacity(list, index, 1);
setEncoded(list, index, stat, entry, encodedParams, encodedValue);
if (log.traceEnabled()) log.trace(listDebugString(list));
return index;
}
private static byte encodeFlags(final ItemStatCost.Entry entry) {
byte flags = (byte) (entry.Encode & ENCODING_MASK);
if (entry.Save_Param_Bits > 0) flags |= FLAG_PARAMS;
if (Stat.numEncodedParams(entry.Encode) > 0) flags |= FLAG_PARAMS;
if (entry.Send_Bits >= Integer.SIZE) flags |= FLAG_LONG;
if (entry.ValShift > 0) flags |= FLAG_FIXED;
return flags;
}
int indexOfEncoded(final int list, final short stat, final int encodedParams) {
return indexOfEncoded(list, stat, encodedParams, false);
}
int indexOfEncoded(final int list, final short stat, final int encodedParams, boolean first) {
final int listStart = startingOffset(list);
final int listEnd = endingOffset(list);
final int index = Arrays.binarySearch(ids, listStart, listEnd, stat);
if (index >= 0) {
final int startIndex = firstIndexOf(stat, index, listStart);
if (first) return startIndex;
final int endIndex = lastIndexOf(stat, index, listEnd);
return Arrays.binarySearch(params, startIndex, endIndex, encodedParams);
} else {
return index;
}
}
int indexOf(final int list, final short stat) {
assertSimple(stat);
return indexOfEncoded(list, stat, 0);
}
int indexOf(final int list, final StatRef ref) {
return indexOfEncoded(list, ref.id(), ref.encodedParams());
}
private int firstIndexOf(final short stat, final int startIndex, final int listStart) {
int i = startIndex - 1;
final short[] ids = this.ids;
while (i >= listStart && ids[i] == stat) i--;
return i + 1;
}
private int lastIndexOf(final short stat, final int startIndex, final int listEnd) {
int i = startIndex + 1;
final short[] ids = this.ids;
for (final int s = listEnd; i < s && ids[i] == stat; i++);
return i;
}
int firstIndexOf(final int list, final short stat) {
return indexOfEncoded(list, stat, 0, true);
}
private static int index(final int list) {
return list << 1;
}
private int startingOffset(final int list) {
return offsets[index(list)] & 0xFF;
}
private int setStartingOffset(final int list, final int index) {
offsets[index(list)] = (byte) index;
assertSorted(-1);
return index;
}
private int endingOffset(final int list) {
return offsets[index(list) + 1] & 0xFF;
}
private int setEndingOffset(final int list, final int index) {
offsets[index(list) + 1] = (byte) index;
if (list + 1 >= numLists) tail = index;
assertSorted();
return index;
}
private void assertSorted() {
assertSorted(0);
}
private void assertSorted(final int offset) {
assert isSorted(offset) :
"offsets({" + StringUtils.join(offsets, ',', 0, index(numLists) + offset) + "}) "
+ "tail(" + tail + ") contains property lists that are out of order";
}
private boolean isSorted(final int offset) {
final int slicesLength = index(numLists) + offset;
final byte[] offsets = this.offsets;
int previous = offsets[0] & 0xFF;
for (int i = 1; i < slicesLength; i++) {
final int current = offsets[i] & 0xFF;
if (previous > current) return false;
previous = current;
}
return tail == previous;
}
public IndexIterator indexIterator(final int list) {
return INDEX_ITERATOR == null
? INDEX_ITERATOR = new IndexIterator().reset(list)
: INDEX_ITERATOR.reset(list);
}
public final class IndexIterator {
int index;
int startIndex;
int endIndex;
IndexIterator reset(final int list) {
index = startIndex = startingOffset(list);
endIndex = endingOffset(list);
return this;
}
public boolean hasNext() {
return index < endIndex;
}
public int next() {
return index++;
}
int pushback(final int count) {
index -= count;
if (index < startIndex) {
log.warn("index({}) < startIndex({})", index, startIndex);
index = startIndex;
}
return index;
}
}
public StatIterator statIterator(final int list) {
return STAT_ITERATOR == null
? STAT_ITERATOR = new StatIterator().reset(list)
: STAT_ITERATOR.reset(list);
}
public final class StatIterator implements Iterator<StatRef> {
final StatRef stat = new StatRef(StatList.this);
int list; /** used for {@link #pushback} */
int head; /** used for {@link #pushback} */
int index;
int endIndex;
StatIterator reset(final int list) {
this.list = list;
stat.reset(list);
head = index = startingOffset(list);
endIndex = endingOffset(list);
return this;
}
@Override
public boolean hasNext() {
return index < endIndex;
}
@Override
public StatRef next() {
return stat.update(index++);
}
/**
* Recycles the previously read stat by re-adding it onto a new list being
* formed at the start of this list while iteration is occurring. This
* method effectively breaks the old list and shrinks it to a subset of
* entries.
*/
void pushback() {
assert head < index : "head(" + head + ") cannot pass index(" + index + ")";
StatList.this.setEncoded(list, head++, stat.id(), stat.entry(), stat.encodedParams(), stat.encodedValues());
setEndingOffset(list, head);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
public StatListIterator listIterator() {
return STAT_LIST_ITERATOR == null
? STAT_LIST_ITERATOR = new StatListIterator().reset()
: STAT_LIST_ITERATOR.reset();
}
public final class StatListIterator implements Iterator<StatListRef>, Iterable<StatListRef> {
int list = 0;
StatListIterator reset() {
list = 0;
return this;
}
@Override
public Iterator<StatListRef> iterator() {
return this;
}
@Override
public boolean hasNext() {
return list < numLists;
}
@Override
public StatListRef next() {
return get(list++);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
|
package algorithms.primitives;
/**
* Given an input integer input, return the integer value of input's bits reversed.
* You will only be reversing the "significant portion" of the binary representation
* (ignoring leading zeros).
*
* Example:
* Input: 1
* Output: 1
* Explanation: Under 8 bits 1 can be represented as 00000001. If we only reverse the
* significant protion of this we yield 00000001 which is 1 in binary.
*/
public class ReverseBits {
public static void main(String[] args)
{
System.out.println(reverse(4));
}
private static int reverse(int input){
/*
* Approach:
* 1. Check the last bit of input
* 2. Shift output left
* 3. If it is 1, add it to output (do nothing otherwise)
* 4. Shift input right
*
* Stop when input is 0
*/
int output = 0;
while(input != 0){
output = output<<1;
if((input&1) == 1) {
output = output | 1;
}
input = input>>1;
}
return output;
}
}
|
package com.xwintop.xJavaFxTool.controller.games;
import com.xwintop.xJavaFxTool.services.games.X2048Service;
import com.xwintop.xJavaFxTool.view.games.X2048View;
import javafx.application.Platform;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import javafx.scene.input.*;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import java.net.URL;
import java.util.ResourceBundle;
/**
* @ClassName: X2048Controller
* @Description: 2048游戏
* @author: xufeng
* @date: 2019/4/25 0025 23:24
*/
@Getter
@Setter
@Slf4j
public class X2048Controller extends X2048View implements EventHandler<KeyEvent> {
private X2048Service x2048Service = new X2048Service(this);
@Override
public void initialize(URL location, ResourceBundle resources) {
initView();
initEvent();
initService();
}
private void initView() {
}
private void initEvent() {
}
private void initService() {
x2048Service.Init();
Platform.runLater(() -> {
x2048Service.ImplInit(4);
});
}
@Override
public void handle(KeyEvent event) {
if (event.getEventType() == KeyEvent.KEY_PRESSED) {
x2048Service.OnKeyPressed(event);
} else if (event.getEventType() == KeyEvent.KEY_RELEASED) {
x2048Service.OnKeyReleased(event);
}
}
public void OnReset(ActionEvent actionEvent) {
x2048Service.ImplInit(x2048Service.getSize());
}
public void OnSwipeDown(SwipeEvent swipeEvent) {
x2048Service.ProcessCode(KeyCode.DOWN);
}
public void OnSwipeLeft(SwipeEvent swipeEvent) {
x2048Service.ProcessCode(KeyCode.LEFT);
}
public void OnSwipeRight(SwipeEvent swipeEvent) {
x2048Service.ProcessCode(KeyCode.RIGHT);
}
public void OnSwipeUp(SwipeEvent swipeEvent) {
x2048Service.ProcessCode(KeyCode.UP);
}
public void OnMousePressed(MouseEvent mouseEvent) {
if (mouseEvent.getButton() == MouseButton.PRIMARY) {
if (!x2048Service.isMMousePressed()) {
x2048Service.setMMousePressed(true);
x2048Service.setMPressedPoint(new X2048Service.Point((int) mouseEvent.getX(), (int) mouseEvent.getY()));
}
}
}
public void OnMouseReleased(MouseEvent mouseEvent) {
if (mouseEvent.getButton() == MouseButton.PRIMARY) {
if (x2048Service.isMMousePressed()) {
x2048Service.setMMousePressed(false);
x2048Service.setMReleasedPoint(new X2048Service.Point((int) mouseEvent.getX(), (int) mouseEvent.getY()));
x2048Service.MouseManipulation();
}
}
}
}
|
package org.gradle.test.performance.mediummonolithicjavaproject.p328;
public class Production6571 {
private Production6568 property0;
public Production6568 getProperty0() {
return property0;
}
public void setProperty0(Production6568 value) {
property0 = value;
}
private Production6569 property1;
public Production6569 getProperty1() {
return property1;
}
public void setProperty1(Production6569 value) {
property1 = value;
}
private Production6570 property2;
public Production6570 getProperty2() {
return property2;
}
public void setProperty2(Production6570 value) {
property2 = value;
}
private String property3;
public String getProperty3() {
return property3;
}
public void setProperty3(String value) {
property3 = value;
}
private String property4;
public String getProperty4() {
return property4;
}
public void setProperty4(String value) {
property4 = value;
}
private String property5;
public String getProperty5() {
return property5;
}
public void setProperty5(String value) {
property5 = value;
}
private String property6;
public String getProperty6() {
return property6;
}
public void setProperty6(String value) {
property6 = value;
}
private String property7;
public String getProperty7() {
return property7;
}
public void setProperty7(String value) {
property7 = value;
}
private String property8;
public String getProperty8() {
return property8;
}
public void setProperty8(String value) {
property8 = value;
}
private String property9;
public String getProperty9() {
return property9;
}
public void setProperty9(String value) {
property9 = value;
}
}
|
package com.hebin.interactiveservice.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.hebin.core.bean.PageVo;
import com.hebin.core.bean.Query;
import com.hebin.core.bean.QueryCondition;
import com.hebin.interactiveservice.dao.InteractiveQaDao;
import com.hebin.interactiveservice.entity.InteractiveQaEntity;
import com.hebin.interactiveservice.service.InteractiveQaService;
import org.springframework.stereotype.Service;
@Service("interactiveQaService")
public class InteractiveQaServiceImpl extends ServiceImpl<InteractiveQaDao, InteractiveQaEntity> implements InteractiveQaService {
@Override
public PageVo queryPage(QueryCondition params) {
IPage<InteractiveQaEntity> page = this.page(
new Query<InteractiveQaEntity>().getPage(params),
new QueryWrapper<InteractiveQaEntity>()
);
return new PageVo(page);
}
}
|
package proteus.dl.syntax;
import proteus.dl.semantics.*;
public class NormTermFormatException extends Exception {
public NormTermFormatException( String message ) {
super( message );
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.pinpoint.model;
import javax.annotation.Generated;
/**
*
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public enum ButtonAction {
LINK("LINK"),
DEEP_LINK("DEEP_LINK"),
CLOSE("CLOSE");
private String value;
private ButtonAction(String value) {
this.value = value;
}
@Override
public String toString() {
return this.value;
}
/**
* Use this in place of valueOf.
*
* @param value
* real value
* @return ButtonAction corresponding to the value
*
* @throws IllegalArgumentException
* If the specified value does not map to one of the known values in this enum.
*/
public static ButtonAction fromValue(String value) {
if (value == null || "".equals(value)) {
throw new IllegalArgumentException("Value cannot be null or empty!");
}
for (ButtonAction enumEntry : ButtonAction.values()) {
if (enumEntry.toString().equals(value)) {
return enumEntry;
}
}
throw new IllegalArgumentException("Cannot create enum from " + value + " value!");
}
}
|
package org.bian.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.bian.dto.CRITSystemAssessmentExchangeInputModelITSystemAssessmentExchangeActionRequest;
import javax.validation.Valid;
/**
* BQProductionRiskExchangeInputModel
*/
public class BQProductionRiskExchangeInputModel {
private String iTSystemAssessmentInstanceReference = null;
private String productionRiskInstanceReference = null;
private Object productionRiskExchangeActionTaskRecord = null;
private CRITSystemAssessmentExchangeInputModelITSystemAssessmentExchangeActionRequest productionRiskExchangeActionRequest = null;
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::ISO20022andUNCEFACT::Identifier general-info: Reference to the parent IT System Assessment instance
* @return iTSystemAssessmentInstanceReference
**/
public String getITSystemAssessmentInstanceReference() {
return iTSystemAssessmentInstanceReference;
}
public void setITSystemAssessmentInstanceReference(String iTSystemAssessmentInstanceReference) {
this.iTSystemAssessmentInstanceReference = iTSystemAssessmentInstanceReference;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::ISO20022andUNCEFACT::Identifier general-info: Reference to the Production Risk instance
* @return productionRiskInstanceReference
**/
public String getProductionRiskInstanceReference() {
return productionRiskInstanceReference;
}
public void setProductionRiskInstanceReference(String productionRiskInstanceReference) {
this.productionRiskInstanceReference = productionRiskInstanceReference;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Binary general-info: The exchange service call consolidated processing record
* @return productionRiskExchangeActionTaskRecord
**/
public Object getProductionRiskExchangeActionTaskRecord() {
return productionRiskExchangeActionTaskRecord;
}
public void setProductionRiskExchangeActionTaskRecord(Object productionRiskExchangeActionTaskRecord) {
this.productionRiskExchangeActionTaskRecord = productionRiskExchangeActionTaskRecord;
}
/**
* Get productionRiskExchangeActionRequest
* @return productionRiskExchangeActionRequest
**/
public CRITSystemAssessmentExchangeInputModelITSystemAssessmentExchangeActionRequest getProductionRiskExchangeActionRequest() {
return productionRiskExchangeActionRequest;
}
public void setProductionRiskExchangeActionRequest(CRITSystemAssessmentExchangeInputModelITSystemAssessmentExchangeActionRequest productionRiskExchangeActionRequest) {
this.productionRiskExchangeActionRequest = productionRiskExchangeActionRequest;
}
}
|
/*
* Copyright 2011-2014 Rafael Iñigo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.vikingbrain.nmt.operations.playback;
import java.io.FileNotFoundException;
import java.util.Arrays;
import java.util.Collection;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.vikingbrain.nmt.client.modules.ModulePlayback;
import com.vikingbrain.nmt.responses.playback.ResponseGetCurrentPodInfo;
import com.vikingbrain.nmt.test.util.AbstractClientMock;
import com.vikingbrain.nmt.test.util.XmlTestFiles;
/**
*
* @author vikingBrain
*/
@RunWith(value = Parameterized.class)
public class GetCurrentPodInfoOperationTest extends AbstractClientMock {
/** Logger. */
private static Logger logger = LoggerFactory.getLogger(GetCurrentPodInfoOperationTest.class);
private String xmlFileName;
@Parameters(name = "{index}: {0}")
public static Collection<Object[]> data() {
Object[][] data = new Object[][] {
{ XmlTestFiles.GET_CURRENT_POD_INFO_DOCUMENTATION_EXAMPLE },
{ XmlTestFiles.GET_CURRENT_POD_INFO_A200 },
{ XmlTestFiles.GET_CURRENT_POD_INFO_A200_RETURN_VALUE_1 }
};
return Arrays.asList(data);
}
public GetCurrentPodInfoOperationTest(String _xmlFileName) throws FileNotFoundException {
xmlFileName = _xmlFileName;
}
@Test
public void getCurrentPodInfotest() throws Exception {
//Put the expected response in the mock of the http client
setXmlFileResponseInHttpMockClient(xmlFileName);
ModulePlayback modulePlayback = theDavidBoxClient.getModulePlayback();
GetCurrentPodInfoOperation operation = modulePlayback.buildGetCurrentPodInfoOperation();
//All the info for the operation
logger.info(operation.toString());
ResponseGetCurrentPodInfo response = operation.execute();
Assert.assertNotNull(response);
Assert.assertFalse("".equals(response.getTitle()));
}
}
|
// --------------------------------------------------------------------------------
// Copyright 2002-2022 Echo Three, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// --------------------------------------------------------------------------------
package com.echothree.util.common.command;
public enum EditMode {
LOCK,
UPDATE,
ABANDON
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2019 the original author or authors.
*/
package org.assertj.core.api.intarray;
import org.assertj.core.api.IntArrayAssert;
import org.assertj.core.api.IntArrayAssertBaseTest;
import static org.mockito.Mockito.verify;
public class IntArrayAssert_hasSizeGreaterThan_Test extends IntArrayAssertBaseTest {
@Override
protected IntArrayAssert invoke_api_method() {
return assertions.hasSizeGreaterThan(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeGreaterThan(getInfo(assertions), getActual(assertions), 6);
}
}
|
package io.github.wimdeblauwe.errorhandlingspringbootstarter.exception;
import io.github.wimdeblauwe.errorhandlingspringbootstarter.ResponseErrorProperty;
public class ExceptionWithResponseErrorPropertyOnMethod extends RuntimeException {
private final String myProperty;
public ExceptionWithResponseErrorPropertyOnMethod(String message, String myProperty) {
super(message);
this.myProperty = myProperty;
}
public ExceptionWithResponseErrorPropertyOnMethod(String myProperty) {
this.myProperty = myProperty;
}
@ResponseErrorProperty
public String getMyProperty() {
return myProperty;
}
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.impala.planner;
import java.util.List;
import org.apache.impala.analysis.Expr;
import org.apache.impala.catalog.FeFsTable;
import org.apache.impala.catalog.FeHBaseTable;
import org.apache.impala.catalog.FeKuduTable;
import org.apache.impala.catalog.FeTable;
import org.apache.impala.common.Pair;
import org.apache.impala.thrift.TSinkAction;
import org.apache.impala.thrift.TSortingOrder;
import com.google.common.base.Preconditions;
/**
* A DataSink that writes into a table.
*
*/
public abstract class TableSink extends DataSink {
/**
* Enum to specify the sink operation type.
*/
public enum Op {
INSERT {
@Override
public String toExplainString() { return "INSERT INTO"; }
@Override
public TSinkAction toThrift() { return TSinkAction.INSERT; }
},
UPDATE {
@Override
public String toExplainString() { return "UPDATE"; }
@Override
public TSinkAction toThrift() { return TSinkAction.UPDATE; }
},
UPSERT {
@Override
public String toExplainString() { return "UPSERT INTO"; }
@Override
public TSinkAction toThrift() { return TSinkAction.UPSERT; }
},
DELETE {
@Override
public String toExplainString() { return "DELETE FROM"; }
@Override
public TSinkAction toThrift() { return TSinkAction.DELETE; }
};
public abstract String toExplainString();
public abstract TSinkAction toThrift();
}
// Table which is to be populated by this sink.
protected final FeTable targetTable_;
// The type of operation to be performed by this sink.
protected final Op sinkOp_;
// One expression per result column for the query. Always non-null.
protected final List<Expr> outputExprs_;
public TableSink(FeTable targetTable, Op sinkAction, List<Expr> outputExprs) {
Preconditions.checkState(outputExprs != null);
targetTable_ = targetTable;
sinkOp_ = sinkAction;
outputExprs_ = outputExprs;
}
/**
* Returns an output sink appropriate for writing to the given table.
* Not all Ops are supported for all tables.
* All parameters must be non-null, the lists in particular need to be empty if they
* don't make sense for a certain table type.
* For HDFS tables 'sortProperties' specifies two things, the indices into the list of
* non-clustering columns of the target table that are stored in the 'sort.columns'
* table property, and the sorting order.
*/
public static TableSink create(FeTable table, Op sinkAction,
List<Expr> partitionKeyExprs, List<Expr> outputExprs,
List<Integer> referencedColumns, boolean overwrite,
boolean inputIsClustered, Pair<List<Integer>, TSortingOrder> sortProperties) {
return create(table, sinkAction, partitionKeyExprs, outputExprs, referencedColumns,
overwrite, inputIsClustered, sortProperties, -1, 0);
}
/**
* Same as above, plus it takes an ACID write id in parameter 'writeId'.
*/
public static TableSink create(FeTable table, Op sinkAction,
List<Expr> partitionKeyExprs, List<Expr> outputExprs,
List<Integer> referencedColumns, boolean overwrite, boolean inputIsClustered,
Pair<List<Integer>, TSortingOrder> sortProperties, long writeId,
int maxTableSinks) {
Preconditions.checkNotNull(partitionKeyExprs);
Preconditions.checkNotNull(referencedColumns);
Preconditions.checkNotNull(sortProperties.first);
if (table instanceof FeFsTable) {
// Hdfs only supports inserts.
Preconditions.checkState(sinkAction == Op.INSERT);
// Referenced columns don't make sense for an Hdfs table.
Preconditions.checkState(referencedColumns.isEmpty());
return new HdfsTableSink(table, partitionKeyExprs,outputExprs, overwrite,
inputIsClustered, sortProperties, writeId, maxTableSinks);
} else if (table instanceof FeHBaseTable) {
// HBase only supports inserts.
Preconditions.checkState(sinkAction == Op.INSERT);
// Partition clause doesn't make sense for an HBase table.
Preconditions.checkState(partitionKeyExprs.isEmpty());
// HBase doesn't have a way to perform INSERT OVERWRITE
Preconditions.checkState(overwrite == false);
// Referenced columns don't make sense for an HBase table.
Preconditions.checkState(referencedColumns.isEmpty());
// Sort columns are not supported for HBase tables.
Preconditions.checkState(sortProperties.first.isEmpty());
// Create the HBaseTableSink and return it.
return new HBaseTableSink(table, outputExprs);
} else if (table instanceof FeKuduTable) {
// Kudu doesn't have a way to perform INSERT OVERWRITE.
Preconditions.checkState(overwrite == false);
// Sort columns are not supported for Kudu tables.
Preconditions.checkState(sortProperties.first.isEmpty());
return new KuduTableSink(table, sinkAction, referencedColumns, outputExprs);
} else {
throw new UnsupportedOperationException(
"Cannot create data sink into table of type: " + table.getClass().getName());
}
}
}
|
/*
* Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.android.dalvik;
import com.facebook.buck.jvm.java.classes.AbstractFileLike;
import com.facebook.buck.jvm.java.classes.FileLike;
import com.google.common.base.Charsets;
import com.google.common.primitives.Bytes;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
/** Helper to create a "canary" class for the secondary DEX. See {@link #create}. */
public class CanaryFactory {
static final String CANARY_PATH_FORMAT = "%s/dex%s/Canary";
/**
* Produced by:
*
* <pre>
* $ echo -e "package secondary.dex01;\npublic interface Canary {}" > Canary.java
* $ javac -target 6 -source 6 Canary.java
* $ xxd -c 4 -g 1 Canary.class | cut -d' ' -f2-5 | sed -E 's/(..) ?/(byte) 0x\1, /g'
* </pre>
*/
private static final byte[] CANARY_START = {
(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe,
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x32,
(byte) 0x00, (byte) 0x07, (byte) 0x07, (byte) 0x00,
(byte) 0x05, (byte) 0x07, (byte) 0x00, (byte) 0x06,
(byte) 0x01, (byte) 0x00, (byte) 0x0a, (byte) 0x53,
(byte) 0x6f, (byte) 0x75, (byte) 0x72, (byte) 0x63,
(byte) 0x65, (byte) 0x46, (byte) 0x69, (byte) 0x6c,
(byte) 0x65, (byte) 0x01, (byte) 0x00, (byte) 0x0b,
(byte) 0x43, (byte) 0x61, (byte) 0x6e, (byte) 0x61,
(byte) 0x72, (byte) 0x79, (byte) 0x2e, (byte) 0x6a,
(byte) 0x61, (byte) 0x76, (byte) 0x61,
};
/*
The part between CANARY_START and CANARY_REMAINDER is regenerated
to rename the canary class.
(byte) 0x01, (byte) 0x00, (byte) 0x16,
(byte) 0x73, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
(byte) 0x6e, (byte) 0x64, (byte) 0x61, (byte) 0x72,
(byte) 0x79, (byte) 0x2f, (byte) 0x64, (byte) 0x65,
(byte) 0x78, (byte) 0x30, (byte) 0x31, (byte) 0x2f,
(byte) 0x43, (byte) 0x61, (byte) 0x6e, (byte) 0x61,
(byte) 0x72, (byte) 0x79,
This corresponds to a constant pool entry of type CONSTANT_Utf8_info
CONSTANT_Utf8_info {
u1 tag; 0x01 (Utf8 string)
u2 length; 0x0016 (22)
u1 bytes[length]; secondary/dex01/Canary
}
u1,u2,etc. types in the java class file spec are big-endian
https://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.4
https://en.wikipedia.org/wiki/Java_class_file
*/
private static final byte[] CANARY_REMAINDER = {
(byte) 0x01, (byte) 0x00, (byte) 0x10, (byte) 0x6a,
(byte) 0x61, (byte) 0x76, (byte) 0x61, (byte) 0x2f,
(byte) 0x6c, (byte) 0x61, (byte) 0x6e, (byte) 0x67,
(byte) 0x2f, (byte) 0x4f, (byte) 0x62, (byte) 0x6a,
(byte) 0x65, (byte) 0x63, (byte) 0x74, (byte) 0x06,
(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00,
(byte) 0x02, (byte) 0x00, (byte) 0x00, (byte) 0x00,
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00,
(byte) 0x01, (byte) 0x00, (byte) 0x03, (byte) 0x00,
(byte) 0x00, (byte) 0x00, (byte) 0x02, (byte) 0x00,
(byte) 0x04,
};
/** Utility class: do not instantiate */
private CanaryFactory() {}
/**
* Adds a "canary" class to a secondary dex that can be safely loaded on any system. This avoids
* an issue where, during secondary dex loading, we attempt to verify a secondary dex by loading
* an arbitrary class, but the class we try to load isn't valid on that system (e.g., it depends
* on Google Maps, but we are on AOSP).
*
* @param store dex store name of the current zip (to ensure unique names).
* @param index Index of the current zip (to ensure unique names).
*/
public static FileLike create(String store, String index) {
String className = String.format(CANARY_PATH_FORMAT, store, index);
byte[] classNameBytes = className.getBytes(Charsets.UTF_8);
// See above comment regarding CONSTANT_Utf8_info
byte[] stringInfo = new byte[3];
stringInfo[0] = (byte) 0x01;
stringInfo[1] = (byte) ((classNameBytes.length & 0x0000FF00) >> 8);
stringInfo[2] = (byte) (classNameBytes.length & 0x000000FF);
byte[] canaryClass = Bytes.concat(CANARY_START, stringInfo, classNameBytes, CANARY_REMAINDER);
String classFilePath = className + ".class";
return getCanaryClass(classFilePath, canaryClass);
}
private static FileLike getCanaryClass(String relativePath, byte[] canaryClass) {
return new AbstractFileLike() {
@Override
public Path getContainer() {
return Paths.get(":memory:");
}
@Override
public String getRelativePath() {
return relativePath;
}
@Override
public long getSize() {
return canaryClass.length;
}
@Override
public InputStream getInput() {
return new ByteArrayInputStream(canaryClass);
}
};
}
}
|
/*
* Copyright LWJGL. All rights reserved.
* License terms: https://www.lwjgl.org/license
* MACHINE GENERATED FILE, DO NOT EDIT
*/
package org.lwjgl.system.jawt;
import javax.annotation.*;
import java.nio.*;
import org.lwjgl.*;
import org.lwjgl.system.*;
import static org.lwjgl.system.MemoryUtil.*;
import static org.lwjgl.system.MemoryStack.*;
/**
* Structure for containing native AWT functions.
*
* <h3>Layout</h3>
*
* <pre><code>
* struct JAWT {
* jint {@link #version};
* void * GetDrawingSurface;
* void * FreeDrawingSurface;
* void * Lock;
* void * Unlock;
* void * GetComponent;
* void * CreateEmbeddedFrame;
* void * SetBounds;
* void * SynthesizeWindowActivation;
* }</code></pre>
*/
public class JAWT extends Struct implements NativeResource {
/** The struct size in bytes. */
public static final int SIZEOF;
/** The struct alignment in bytes. */
public static final int ALIGNOF;
/** The struct member offsets. */
public static final int
VERSION,
GETDRAWINGSURFACE,
FREEDRAWINGSURFACE,
LOCK,
UNLOCK,
GETCOMPONENT,
CREATEEMBEDDEDFRAME,
SETBOUNDS,
SYNTHESIZEWINDOWACTIVATION;
static {
Layout layout = __struct(
__member(4),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE)
);
SIZEOF = layout.getSize();
ALIGNOF = layout.getAlignment();
VERSION = layout.offsetof(0);
GETDRAWINGSURFACE = layout.offsetof(1);
FREEDRAWINGSURFACE = layout.offsetof(2);
LOCK = layout.offsetof(3);
UNLOCK = layout.offsetof(4);
GETCOMPONENT = layout.offsetof(5);
CREATEEMBEDDEDFRAME = layout.offsetof(6);
SETBOUNDS = layout.offsetof(7);
SYNTHESIZEWINDOWACTIVATION = layout.offsetof(8);
}
/**
* Creates a {@code JAWT} instance at the current position of the specified {@link ByteBuffer} container. Changes to the buffer's content will be
* visible to the struct instance and vice versa.
*
* <p>The created instance holds a strong reference to the container object.</p>
*/
public JAWT(ByteBuffer container) {
super(memAddress(container), __checkContainer(container, SIZEOF));
}
@Override
public int sizeof() { return SIZEOF; }
/** Version of this structure. This must always be set before calling JAWT_GetAWT() */
@NativeType("jint")
public int version() { return nversion(address()); }
/** @return the value of the {@code GetDrawingSurface} field. */
@NativeType("void *")
public long GetDrawingSurface() { return nGetDrawingSurface(address()); }
/** @return the value of the {@code FreeDrawingSurface} field. */
@NativeType("void *")
public long FreeDrawingSurface() { return nFreeDrawingSurface(address()); }
/** @return the value of the {@code Lock} field. */
@NativeType("void *")
public long Lock() { return nLock(address()); }
/** @return the value of the {@code Unlock} field. */
@NativeType("void *")
public long Unlock() { return nUnlock(address()); }
/** @return the value of the {@code GetComponent} field. */
@NativeType("void *")
public long GetComponent() { return nGetComponent(address()); }
/** @return the value of the {@code CreateEmbeddedFrame} field. */
@NativeType("void *")
public long CreateEmbeddedFrame() { return nCreateEmbeddedFrame(address()); }
/** @return the value of the {@code SetBounds} field. */
@NativeType("void *")
public long SetBounds() { return nSetBounds(address()); }
/** @return the value of the {@code SynthesizeWindowActivation} field. */
@NativeType("void *")
public long SynthesizeWindowActivation() { return nSynthesizeWindowActivation(address()); }
/** Sets the specified value to the {@link #version} field. */
public JAWT version(@NativeType("jint") int value) { nversion(address(), value); return this; }
/**
* Copies the specified struct data to this struct.
*
* @param src the source struct
*
* @return this struct
*/
public JAWT set(JAWT src) {
memCopy(src.address(), address(), SIZEOF);
return this;
}
// -----------------------------------
/** Returns a new {@code JAWT} instance allocated with {@link MemoryUtil#memAlloc memAlloc}. The instance must be explicitly freed. */
public static JAWT malloc() {
return wrap(JAWT.class, nmemAllocChecked(SIZEOF));
}
/** Returns a new {@code JAWT} instance allocated with {@link MemoryUtil#memCalloc memCalloc}. The instance must be explicitly freed. */
public static JAWT calloc() {
return wrap(JAWT.class, nmemCallocChecked(1, SIZEOF));
}
/** Returns a new {@code JAWT} instance allocated with {@link BufferUtils}. */
public static JAWT create() {
ByteBuffer container = BufferUtils.createByteBuffer(SIZEOF);
return wrap(JAWT.class, memAddress(container), container);
}
/** Returns a new {@code JAWT} instance for the specified memory address. */
public static JAWT create(long address) {
return wrap(JAWT.class, address);
}
/** Like {@link #create(long) create}, but returns {@code null} if {@code address} is {@code NULL}. */
@Nullable
public static JAWT createSafe(long address) {
return address == NULL ? null : wrap(JAWT.class, address);
}
// -----------------------------------
/** Deprecated for removal in 3.4.0. Use {@link #malloc(MemoryStack)} instead. */
@Deprecated public static JAWT mallocStack() { return malloc(stackGet()); }
/** Deprecated for removal in 3.4.0. Use {@link #calloc(MemoryStack)} instead. */
@Deprecated public static JAWT callocStack() { return calloc(stackGet()); }
/** Deprecated for removal in 3.4.0. Use {@link #malloc(MemoryStack)} instead. */
@Deprecated public static JAWT mallocStack(MemoryStack stack) { return malloc(stack); }
/** Deprecated for removal in 3.4.0. Use {@link #calloc(MemoryStack)} instead. */
@Deprecated public static JAWT callocStack(MemoryStack stack) { return calloc(stack); }
/** Deprecated for removal in 3.4.0. Use {@link #malloc(int, MemoryStack)} instead. */
/**
* Returns a new {@code JAWT} instance allocated on the specified {@link MemoryStack}.
*
* @param stack the stack from which to allocate
*/
public static JAWT malloc(MemoryStack stack) {
return wrap(JAWT.class, stack.nmalloc(ALIGNOF, SIZEOF));
}
/**
* Returns a new {@code JAWT} instance allocated on the specified {@link MemoryStack} and initializes all its bits to zero.
*
* @param stack the stack from which to allocate
*/
public static JAWT calloc(MemoryStack stack) {
return wrap(JAWT.class, stack.ncalloc(ALIGNOF, 1, SIZEOF));
}
// -----------------------------------
/** Unsafe version of {@link #version}. */
public static int nversion(long struct) { return UNSAFE.getInt(null, struct + JAWT.VERSION); }
/** Unsafe version of {@link #GetDrawingSurface}. */
public static long nGetDrawingSurface(long struct) { return memGetAddress(struct + JAWT.GETDRAWINGSURFACE); }
/** Unsafe version of {@link #FreeDrawingSurface}. */
public static long nFreeDrawingSurface(long struct) { return memGetAddress(struct + JAWT.FREEDRAWINGSURFACE); }
/** Unsafe version of {@link #Lock}. */
public static long nLock(long struct) { return memGetAddress(struct + JAWT.LOCK); }
/** Unsafe version of {@link #Unlock}. */
public static long nUnlock(long struct) { return memGetAddress(struct + JAWT.UNLOCK); }
/** Unsafe version of {@link #GetComponent}. */
public static long nGetComponent(long struct) { return memGetAddress(struct + JAWT.GETCOMPONENT); }
/** Unsafe version of {@link #CreateEmbeddedFrame}. */
public static long nCreateEmbeddedFrame(long struct) { return memGetAddress(struct + JAWT.CREATEEMBEDDEDFRAME); }
/** Unsafe version of {@link #SetBounds}. */
public static long nSetBounds(long struct) { return memGetAddress(struct + JAWT.SETBOUNDS); }
/** Unsafe version of {@link #SynthesizeWindowActivation}. */
public static long nSynthesizeWindowActivation(long struct) { return memGetAddress(struct + JAWT.SYNTHESIZEWINDOWACTIVATION); }
/** Unsafe version of {@link #version(int) version}. */
public static void nversion(long struct, int value) { UNSAFE.putInt(null, struct + JAWT.VERSION, value); }
}
|
/**
* Copyright 2010-2014 Axel Fontaine
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flywaydb.core.internal.dbsupport;
import org.flywaydb.core.api.FlywayException;
import org.flywaydb.core.internal.util.StringUtils;
import org.flywaydb.core.internal.util.logging.Log;
import org.flywaydb.core.internal.util.logging.LogFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* Sql script containing a series of statements terminated by semi-columns (;). Single-line (--) and multi-line (/* * /)
* comments are stripped and ignored.
*/
public class SqlScript {
private static final Log LOG = LogFactory.getLog(SqlScript.class);
/**
* The database-specific support.
*/
private final DbSupport dbSupport;
/**
* The sql statements contained in this script.
*/
private final List<SqlStatement> sqlStatements;
/**
* Creates a new sql script from this source with these placeholders to replace.
*
* @param sqlScriptSource The sql script as a text block with all placeholders already replaced.
* @param dbSupport The database-specific support.
*/
public SqlScript(String sqlScriptSource, DbSupport dbSupport) {
this.dbSupport = dbSupport;
this.sqlStatements = parse(sqlScriptSource);
}
/**
* Dummy constructor to increase testability.
*
* @param dbSupport The database-specific support.
*/
SqlScript(DbSupport dbSupport) {
this.dbSupport = dbSupport;
this.sqlStatements = null;
}
/**
* For increased testability.
*
* @return The sql statements contained in this script.
*/
public List<SqlStatement> getSqlStatements() {
return sqlStatements;
}
/**
* Executes this script against the database.
*
* @param jdbcTemplate The jdbc template to use to execute this script.
*/
public void execute(final JdbcTemplate jdbcTemplate) {
for (SqlStatement sqlStatement : sqlStatements) {
String sql = sqlStatement.getSql();
LOG.debug("Executing SQL: " + sql);
try {
jdbcTemplate.executeStatement(sql);
} catch (SQLException e) {
throw new FlywaySqlScriptException(sqlStatement.getLineNumber(), sql, e);
}
}
}
/**
* Parses this script source into statements.
*
* @param sqlScriptSource The script source to parse.
* @return The parsed statements.
*/
/* private -> for testing */
List<SqlStatement> parse(String sqlScriptSource) {
return linesToStatements(readLines(new StringReader(sqlScriptSource)));
}
/**
* Turns these lines in a series of statements.
*
* @param lines The lines to analyse.
* @return The statements contained in these lines (in order).
*/
/* private -> for testing */
List<SqlStatement> linesToStatements(List<String> lines) {
List<SqlStatement> statements = new ArrayList<SqlStatement>();
boolean inMultilineComment = false;
Delimiter nonStandardDelimiter = null;
SqlStatementBuilder sqlStatementBuilder = dbSupport.createSqlStatementBuilder();
for (int lineNumber = 1; lineNumber <= lines.size(); lineNumber++) {
String line = lines.get(lineNumber - 1);
if (sqlStatementBuilder.isEmpty()) {
if (!StringUtils.hasText(line)) {
// Skip empty line between statements.
continue;
}
String trimmedLine = line.trim();
if (!sqlStatementBuilder.isCommentDirective(trimmedLine)) {
if (trimmedLine.startsWith("/*")) {
inMultilineComment = true;
}
if (inMultilineComment) {
if (trimmedLine.endsWith("*/")) {
inMultilineComment = false;
}
// Skip line part of a multi-line comment
continue;
}
if (sqlStatementBuilder.isSingleLineComment(trimmedLine)) {
// Skip single-line comment
continue;
}
}
Delimiter newDelimiter = sqlStatementBuilder.extractNewDelimiterFromLine(line);
if (newDelimiter != null) {
nonStandardDelimiter = newDelimiter;
// Skip this line as it was an explicit delimiter change directive outside of any statements.
continue;
}
sqlStatementBuilder.setLineNumber(lineNumber);
// Start a new statement, marking it with this line number.
if (nonStandardDelimiter != null) {
sqlStatementBuilder.setDelimiter(nonStandardDelimiter);
}
}
sqlStatementBuilder.addLine(line);
if (sqlStatementBuilder.isTerminated()) {
SqlStatement sqlStatement = sqlStatementBuilder.getSqlStatement();
statements.add(sqlStatement);
LOG.debug("Found statement at line " + sqlStatement.getLineNumber() + ": " + sqlStatement.getSql());
sqlStatementBuilder = dbSupport.createSqlStatementBuilder();
}
}
// Catch any statements not followed by delimiter.
if (!sqlStatementBuilder.isEmpty()) {
statements.add(sqlStatementBuilder.getSqlStatement());
}
return statements;
}
/**
* Parses the textual data provided by this reader into a list of lines.
*
* @param reader The reader for the textual data.
* @return The list of lines (in order).
* @throws IllegalStateException Thrown when the textual data parsing failed.
*/
private List<String> readLines(Reader reader) {
List<String> lines = new ArrayList<String>();
BufferedReader bufferedReader = new BufferedReader(reader);
String line;
try {
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new FlywayException("Cannot parse lines", e);
}
return lines;
}
}
|
package com.geccocrawler.gecco.listener;
import com.geccocrawler.gecco.GeccoEngine;
/**
* 简单的引擎时间兼容实现类,可以继承该类覆盖需要的方法
*
* @author LiuJunGuang
*/
public abstract class AbstractSimpleEventListener implements EventListener {
/*
* (non-Javadoc)
*
* @see com.geccocrawler.gecco.listener.EventListener#onStart(com.geccocrawler.gecco.GeccoEngine)
*/
@Override
public void onStart(GeccoEngine ge) {
}
/*
* (non-Javadoc)
*
* @see com.geccocrawler.gecco.listener.EventListener#onPause(com.geccocrawler.gecco.GeccoEngine)
*/
@Override
public void onPause(GeccoEngine ge) {
}
/*
* (non-Javadoc)
*
* @see com.geccocrawler.gecco.listener.EventListener#onRestart(com.geccocrawler.gecco.GeccoEngine)
*/
@Override
public void onRestart(GeccoEngine ge) {
}
/*
* (non-Javadoc)
*
* @see com.geccocrawler.gecco.listener.EventListener#onStop(com.geccocrawler.gecco.GeccoEngine)
*/
@Override
public void onStop(GeccoEngine ge) {
}
}
|
package org.wx.sdk.card.respone;
import org.wx.sdk.base.Response;
/**
* 查询导入code数目返回对象
* @author Rocye
* @version 2017.12.26
*/
public class CardCodeGetDepositCountRespone extends Response {
/** 已经成功存入的code数目 */
private Integer count;
public Integer getCount() {
return count;
}
public void setCount(Integer count) {
this.count = count;
}
}
|
package mysteryDungeon.pokemons;
import static mysteryDungeon.MysteryDungeon.makeBackSpritePath;
import com.badlogic.gdx.graphics.Color;
import com.megacrit.cardcrawl.cards.AbstractCard;
import com.megacrit.cardcrawl.cards.AbstractCard.CardColor;
import mysteryDungeon.cards.Bulbasaur.BulbasaurDefend;
import mysteryDungeon.cards.Bulbasaur.BulbasaurLeechSeed;
import mysteryDungeon.cards.Bulbasaur.BulbasaurTackle;
import mysteryDungeon.characters.Pokemon;
public class Cubone extends AbstractPokemon {
public static String NAME = "Bulbasaur";
public static int MAX_HP = 35;
public static int ORB_SLOTS = 1;
public static AbstractCard[] STARTING_DECK = new AbstractCard[]{new BulbasaurTackle(), new BulbasaurTackle(), new BulbasaurDefend(), new BulbasaurDefend(), new BulbasaurLeechSeed()};
public static Color COLOR = Color.GREEN;
public static CardColor CARD_COLOR = Pokemon.Enums.BULBASAUR_GREEN;
public static String PATH_TO_BACK_SPRITE = makeBackSpritePath(Cubone.class.getSimpleName()+".png");
public Cubone(){
super(NAME, MAX_HP, ORB_SLOTS, STARTING_DECK, COLOR, CARD_COLOR, PATH_TO_BACK_SPRITE);
}
@Override
public AbstractPokemon evolve() {
AbstractPokemon evolution = new Ivysaur();
if (shiny)
evolution.shiny = true;
return evolution;
}
}
|
int mult(long a, long b) {
a *= b;
if (a >= MOD)
a %= MOD;
return (int) a;
}
|
/*
* (C) Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* ohun@live.cn (夜色)
*/
package com.mpush.api.push;
import com.mpush.api.Constants;
import java.util.List;
import java.util.Set;
/**
* Created by ohun on 16/9/8.
*
* @author ohun@live.cn (夜色)
*/
public class PushContext {
/**
* 待推送的内容
*/
private byte[] context;
/**
* 待推送的消息
*/
private PushMsg pushMsg;
/**
* 目标用户
*/
private String userId;
/**
* 目标用户,批量
*/
private List<String> userIds;
/**
* 用户标签过滤,目前只有include, 后续会增加exclude
*/
private Set<String> tags;
/**
* 消息ack模式
*/
private AckModel ackModel = AckModel.NO_ACK;
/**
* 推送成功后的回调
*/
private PushCallback callback;
/**
* 全网广播在线用户
*/
private boolean broadcast = false;
/**
* 推送超时时间
*/
private int timeout = 3000;
public PushContext(byte[] context) {
this.context = context;
}
public PushContext(PushMsg pushMsg) {
this.pushMsg = pushMsg;
}
public static PushContext build(String msg) {
return new PushContext(msg.getBytes(Constants.UTF_8));
}
public static PushContext build(PushMsg msg) {
return new PushContext(msg);
}
public byte[] getContext() {
return context;
}
public String getUserId() {
return userId;
}
public PushContext setUserId(String userId) {
this.userId = userId;
return this;
}
public List<String> getUserIds() {
return userIds;
}
public PushContext setUserIds(List<String> userIds) {
this.userIds = userIds;
return this;
}
public AckModel getAckModel() {
return ackModel;
}
public PushContext setAckModel(AckModel ackModel) {
this.ackModel = ackModel;
return this;
}
public PushCallback getCallback() {
return callback;
}
public PushContext setCallback(PushCallback callback) {
this.callback = callback;
return this;
}
public PushMsg getPushMsg() {
return pushMsg;
}
public boolean isBroadcast() {
return broadcast;
}
public PushContext setBroadcast(boolean broadcast) {
this.broadcast = broadcast;
return this;
}
public int getTimeout() {
return timeout;
}
public PushContext setTimeout(int timeout) {
this.timeout = timeout;
return this;
}
public Set<String> getTags() {
return tags;
}
public PushContext setTags(Set<String> tags) {
this.tags = tags;
return this;
}
}
|
/*
* Copyright (C) 2021 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.goots;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Unit test for simple App.
*/
public class AppTest
{
/**
* Rigorous Test :-)
*/
@Test
public void shouldAnswerWithTrue()
{
assertTrue( true );
}
}
|
package com.codepath.apps.restclienttemplate.fragments;
import android.app.DialogFragment;
import android.os.Bundle;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Toast;
import com.codepath.apps.restclienttemplate.R;
import com.codepath.apps.restclienttemplate.RestApplication;
import com.codepath.apps.restclienttemplate.RestClient;
import com.codepath.apps.restclienttemplate.models.TwitterUser;
import com.loopj.android.http.JsonHttpResponseHandler;
import org.apache.http.Header;
//import cz.msebera.android.httpclient.Header;
import org.json.JSONObject;
public class TweetDialogFragment extends DialogFragment {
RestClient client;
private View vBlackout;
private EditText etEditor;
private Button btnTweet;
private long replyToId;
public TweetDialogFragment() {
}
OnTweetSentListener onTweetSentListener;
public interface OnTweetSentListener {
void onTweetSent();
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.fragment_write_tweet, container);
getDialog().setTitle(getResources().getString(R.string.compose_tweet));
vBlackout = view.findViewById(R.id.vBlackout);
etEditor = (EditText) view.findViewById(R.id.etTweetEditor);
btnTweet = (Button) view.findViewById(R.id.btnSubmit);
btnTweet.setOnClickListener(onTweetListener);
if (replyToId > 0) {
TwitterUser user = TwitterUser.getById(replyToId);
if (user != null) {
etEditor.setText("@" + user.getScreen_name() + " ");
etEditor.setSelection(etEditor.getText().length());
} else
Log.d("tag", "Unable to find user with id: " + replyToId);
}
return view;
}
public View.OnClickListener onTweetListener = new View.OnClickListener() {
@Override
public void onClick(View v) {
String status = etEditor.getText().toString();
if (status.isEmpty()) {
Toast.makeText(getActivity(), "Status is empty", Toast.LENGTH_SHORT).show();
return;
}
etEditor.setEnabled(false);
btnTweet.setEnabled(false);
vBlackout.setVisibility(View.VISIBLE);
vBlackout.animate().alpha(0.5F).setDuration(2000);
client = RestApplication.getRestClient();
client.postStatus(status, "", onStatusSentHandler);
}
private JsonHttpResponseHandler onStatusSentHandler = new JsonHttpResponseHandler() {
@Override
public void onSuccess(int statusCode, Header[] headers, JSONObject response) {
Log.d("tag", "Status Response: " + response.toString());
if (onTweetSentListener != null)
onTweetSentListener.onTweetSent();
else
Log.d("tag", "Callback is null");
dismiss();
}
@Override
public void onFailure(int statusCode, Header[] headers, Throwable throwable, JSONObject errorResponse) {
Log.d("tag", "Status Update failed with status: " + statusCode);
if (errorResponse != null)
Log.d("tag", "Status Response: " + errorResponse.toString());
}
};
};
}
|
//TODO GKE by Djer |Audit Code| Prends en comtpe les remarques de CheckStyle !
package fr.houseofcode.dap.server.rma.data;
|
/*
* Copyright 2019 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.server.services.jbpm.ui;
import java.io.ByteArrayInputStream;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.jbpm.process.svg.SVGImageProcessor;
import org.jbpm.process.svg.processor.SVGProcessor;
import org.jbpm.services.api.ProcessInstanceNotFoundException;
import org.jbpm.services.api.RuntimeDataService;
import org.jbpm.services.api.model.NodeInstanceDesc;
import org.jbpm.services.api.model.ProcessDefinition;
import org.jbpm.services.api.model.ProcessInstanceDesc;
import org.kie.api.runtime.query.QueryContext;
import org.kie.server.api.KieServerConstants;
import org.kie.server.services.api.KieServerRegistry;
import org.kie.server.services.impl.locator.ContainerLocatorProvider;
import org.kie.server.services.jbpm.ui.img.ImageReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.jbpm.process.svg.processor.SVGProcessor.ACTIVE_BORDER_COLOR;
import static org.jbpm.process.svg.processor.SVGProcessor.COMPLETED_BORDER_COLOR;
import static org.jbpm.process.svg.processor.SVGProcessor.COMPLETED_COLOR;
import static org.kie.server.api.KieServerConstants.KIE_SERVER_IMAGESERVICE_MAX_NODES;
public class ImageServiceBase {
private static final Logger logger = LoggerFactory.getLogger(ImageServiceBase.class);
/**
* This causes the image service to limit the number of nodes (performance).
* Due to this limitation it could cause a known issue not blurring all the nodes active or completed depending on
* the process size.
*/
private static final int MAX_NODES = Integer.parseInt(System.getProperty(KIE_SERVER_IMAGESERVICE_MAX_NODES, "1000"));
private RuntimeDataService dataService;
private Map<String, ImageReference> imageReferenceMap;
private String kieServerLocation;
private String processInstanceImageLink = "containers/{0}/images/processes/instances/{1}";
private KieServerRegistry registry;
public ImageServiceBase() {
// for tests only
this.kieServerLocation = "";
}
public ImageServiceBase(RuntimeDataService dataService, Map<String, ImageReference> imageReferenceMap, KieServerRegistry registry) {
this.dataService = dataService;
this.imageReferenceMap = imageReferenceMap;
this.registry = registry;
this.kieServerLocation = this.registry.getConfig().getConfigItemValue(KieServerConstants.KIE_SERVER_LOCATION, System.getProperty(KieServerConstants.KIE_SERVER_LOCATION, "unknown"));
if (!this.kieServerLocation.endsWith("/")) {
this.kieServerLocation = kieServerLocation + "/";
}
}
private byte[] getProcessImageAsBytes(String containerId, String processId) {
ProcessDefinition procDef = dataService.getProcessesByDeploymentIdProcessId(containerId, processId);
if (procDef == null) {
throw new IllegalArgumentException("No process found for " + processId + " within container " + containerId);
}
String location = "";
if (procDef.getPackageName() != null && !procDef.getPackageName().trim().isEmpty()) {
location = procDef.getPackageName().replaceAll("\\.", "/") + "/";
}
// get SVG String
byte[] imageSVG = imageReferenceMap.get(containerId).getImageContent(location, processId);
if (imageSVG == null) {
logger.warn("Could not find SVG image file for process '" + processId + "' within container " + containerId);
return null;
}
return imageSVG;
}
public String getProcessImage(String containerId, String processId) {
containerId = registry.getContainerId(containerId, ContainerLocatorProvider.get().getLocator());
String imageSVGString = null;
byte[] imageSVG = getProcessImageAsBytes(containerId, processId);
if (imageSVG != null) {
ByteArrayInputStream svgStream = new ByteArrayInputStream(imageSVG);
SVGProcessor processor = new SVGImageProcessor(svgStream).getProcessor();
imageSVGString = processor.getSVG();
}
return imageSVGString;
}
public String getActiveProcessImage(String containerId, long procInstId) {
return getActiveProcessImage(containerId, procInstId, COMPLETED_COLOR, COMPLETED_BORDER_COLOR, ACTIVE_BORDER_COLOR);
}
public String getActiveProcessImage(String containerId, long procInstId, String completedNodeColor,
String completedNodeBorderColor, String activeNodeBorderColor) {
ProcessInstanceDesc instance = dataService.getProcessInstanceById(procInstId);
if (instance == null) {
throw new ProcessInstanceNotFoundException("No instance found for process instance id " + procInstId);
}
String imageSVGString = null;
// get SVG String
byte[] imageSVG = getProcessImageAsBytes(instance.getDeploymentId(), instance.getProcessId());
if (imageSVG != null) {
// find active nodes and modify image
Map<String, String> subProcessLinks = new HashMap<>();
QueryContext qc = MAX_NODES > 0 ? new QueryContext(0, MAX_NODES) : null;
Collection<NodeInstanceDesc> activeLogs = dataService.getProcessInstanceHistoryActive(procInstId, qc);
Collection<NodeInstanceDesc> completedLogs = dataService.getProcessInstanceHistoryCompleted(procInstId, qc);
Map<Long, String> active = new HashMap<Long, String>();
List<String> completed = new ArrayList<String>();
for (NodeInstanceDesc activeNode : activeLogs) {
active.put(activeNode.getId(), activeNode.getNodeId());
}
for (NodeInstanceDesc completeNode : completedLogs) {
completed.add(completeNode.getNodeId());
active.remove(completeNode.getId());
populateSubProcessLink(containerId, completeNode, subProcessLinks);
}
activeLogs.forEach(activeNode -> {
populateSubProcessLink(containerId, activeNode, subProcessLinks);
});
ByteArrayInputStream svgStream = new ByteArrayInputStream(imageSVG);
imageSVGString = SVGImageProcessor.transform(svgStream, completed, new ArrayList<String>(active.values()),
subProcessLinks, completedNodeColor, completedNodeBorderColor,
activeNodeBorderColor);
return imageSVGString;
}
throw new IllegalArgumentException("No process found for " + instance.getProcessId() + " within container " + containerId);
}
protected void populateSubProcessLink(String containerId, NodeInstanceDesc node, Map<String, String> subProcessLinks) {
if (node.getReferenceId() != null && node.getNodeType().endsWith("SubProcessNode")) {
String link = kieServerLocation + MessageFormat.format(processInstanceImageLink, containerId, node.getReferenceId().toString());
subProcessLinks.put(node.getNodeId(), link);
}
}
}
|
/*
* Copyright (c) 2021.
*
* This source code is subject to the terms of the GNU General Public
* License, version 3. If a copy of the GPL was not distributed with this
* file, You can obtain one at: https://www.gnu.org/licenses/gpl-3.0.txt
*/
package com.cullinan.cullinanclient.hack;
import com.cullinan.cullinanclient.hacks.NoFallHack;
import com.cullinan.cullinanclient.hacks.SpiderHack;
import net.minecraft.util.crash.CrashException;
import net.minecraft.util.crash.CrashReport;
import java.lang.reflect.Field;
import java.util.Collection;
import java.util.Collections;
import java.util.TreeMap;
public class HackList {
public final NoFallHack noFallHack = new NoFallHack();
public final SpiderHack spiderHack = new SpiderHack();
private final TreeMap<String, Hack> hacks =
new TreeMap<>(String::compareToIgnoreCase);
private static HackList instance;
public static HackList getInstance() {
if(instance == null) instance = new HackList();
return instance;
}
private HackList() {
try
{
for(Field field : HackList.class.getDeclaredFields())
{
if(!field.getName().endsWith("Hack"))
continue;
Hack hack = (Hack)field.get(this);
hacks.put(hack.getName(), hack);
}
}catch(Exception e)
{
e.printStackTrace();
String message = "Initializing Cullinan hacks";
CrashReport report = CrashReport.create(e, message);
throw new CrashException(report);
}
}
public Hack getHackByName(String name) {
return hacks.get(name);
}
public Collection<Hack> getAllHacks()
{
return Collections.unmodifiableCollection(hacks.values());
}
}
|
package com.crownquest.crownquest.commands;
import com.crownquest.crownquest.CrownQuest;
import com.crownquest.crownquest.User;
import org.bukkit.ChatColor;
import org.bukkit.command.Command;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
public class WalletCommand extends CommandAction {
private CrownQuest bitQuest;
public WalletCommand(CrownQuest plugin) {
bitQuest = plugin;
}
public boolean run(CommandSender sender, Command cmd, String label, String[] args, Player player) {
try {
bitQuest.sendWalletInfo(player);
bitQuest.updateScoreboard(player);
} catch (Exception e) {
e.printStackTrace();
player.sendMessage(ChatColor.RED + "There was a problem reading your wallet.");
}
return true;
}
}
|
//
// ========================================================================
// Copyright (c) 1995-2020 Mort Bay Consulting Pty Ltd and others.
//
// This program and the accompanying materials are made available under
// the terms of the Eclipse Public License 2.0 which is available at
// https://www.eclipse.org/legal/epl-2.0
//
// This Source Code may also be made available under the following
// Secondary Licenses when the conditions for such availability set
// forth in the Eclipse Public License, v. 2.0 are satisfied:
// the Apache License v2.0 which is available at
// https://www.apache.org/licenses/LICENSE-2.0
//
// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
// ========================================================================
//
package org.eclipse.jetty.util.resource;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.io.StringWriter;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.DirectoryStream;
import java.nio.file.FileSystem;
import java.nio.file.FileSystemException;
import java.nio.file.Files;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Stream;
import org.eclipse.jetty.toolchain.test.FS;
import org.eclipse.jetty.toolchain.test.IO;
import org.eclipse.jetty.toolchain.test.jupiter.WorkDir;
import org.eclipse.jetty.toolchain.test.jupiter.WorkDirExtension;
import org.eclipse.jetty.util.BufferUtil;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.junit.jupiter.api.condition.DisabledOnOs;
import org.junit.jupiter.api.condition.EnabledOnOs;
import org.junit.jupiter.api.condition.OS;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.junit.jupiter.api.condition.OS.LINUX;
import static org.junit.jupiter.api.condition.OS.MAC;
import static org.junit.jupiter.api.condition.OS.WINDOWS;
@ExtendWith(WorkDirExtension.class)
public class FileSystemResourceTest
{
public WorkDir workDir;
public static Stream<Class<PathResource>> fsResourceProvider()
{
return Stream.of(PathResource.class);
}
public Resource newResource(Class<? extends Resource> resourceClass, URL url) throws Exception
{
try
{
return resourceClass.getConstructor(URL.class).newInstance(url);
}
catch (InvocationTargetException e)
{
try
{
throw e.getTargetException();
}
catch (Exception | Error ex)
{
throw ex;
}
catch (Throwable th)
{
throw new Error(th);
}
}
}
public Resource newResource(Class<? extends Resource> resourceClass, URI uri) throws Exception
{
try
{
return resourceClass.getConstructor(URI.class).newInstance(uri);
}
catch (InvocationTargetException e)
{
try
{
throw e.getTargetException();
}
catch (Exception | Error ex)
{
throw ex;
}
catch (Throwable th)
{
throw new Error(th);
}
}
}
public Resource newResource(Class<? extends Resource> resourceClass, File file) throws Exception
{
try
{
return resourceClass.getConstructor(File.class).newInstance(file);
}
catch (InvocationTargetException e)
{
try
{
throw e.getTargetException();
}
catch (Exception | Error ex)
{
throw ex;
}
catch (Throwable th)
{
throw new Error(th);
}
}
}
private Matcher<Resource> hasNoAlias()
{
return new BaseMatcher<>()
{
@Override
public boolean matches(Object item)
{
final Resource res = (Resource)item;
return !res.isAlias();
}
@Override
public void describeTo(Description description)
{
description.appendText("getAlias should return null");
}
@Override
public void describeMismatch(Object item, Description description)
{
description.appendText("was ").appendValue(((Resource)item).getAlias());
}
};
}
private Matcher<Resource> isAliasFor(final Resource resource)
{
return new BaseMatcher<>()
{
@Override
public boolean matches(Object item)
{
final Resource ritem = (Resource)item;
final URI alias = ritem.getAlias();
if (alias == null)
{
return false;
}
else
{
return alias.equals(resource.getURI());
}
}
@Override
public void describeTo(Description description)
{
description.appendText("getAlias should return ").appendValue(resource.getURI());
}
@Override
public void describeMismatch(Object item, Description description)
{
description.appendText("was ").appendValue(((Resource)item).getAlias());
}
};
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testNonAbsoluteURI(Class<PathResource> resourceClass)
{
assertThrows(IllegalArgumentException.class,
() -> newResource(resourceClass, new URI("path/to/resource")));
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testNotFileURI(Class<PathResource> resourceClass)
{
assertThrows(IllegalArgumentException.class,
() -> newResource(resourceClass, new URI("http://www.eclipse.org/jetty/")));
}
@ParameterizedTest
@EnabledOnOs(WINDOWS)
@MethodSource("fsResourceProvider")
public void testBogusFilename_Windows(Class<PathResource> resourceClass)
{
// "CON" is a reserved name under windows
assertThrows(IllegalArgumentException.class,
() -> newResource(resourceClass, new URI("file://CON")));
}
@ParameterizedTest
@EnabledOnOs({LINUX, MAC})
@MethodSource("fsResourceProvider")
public void testBogusFilename_Unix(Class<PathResource> resourceClass)
{
// A windows path is invalid under unix
assertThrows(IllegalArgumentException.class,
() -> newResource(resourceClass, new URI("file://Z:/:")));
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testNewResource_WithSpace(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getPath().normalize().toRealPath();
Path baseDir = dir.resolve("base with spaces");
FS.ensureDirExists(baseDir.toFile());
Path subdir = baseDir.resolve("sub");
FS.ensureDirExists(subdir.toFile());
URL baseUrl = baseDir.toUri().toURL();
assertThat("url.protocol", baseUrl.getProtocol(), is("file"));
try (Resource base = newResource(resourceClass, baseUrl))
{
Resource sub = base.addPath("sub");
assertThat("sub/.isDirectory", sub.isDirectory(), is(true));
Resource tmp = sub.addPath("/tmp");
assertThat("No root", tmp.exists(), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddPathClass(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Path subdir = dir.resolve("sub");
FS.ensureDirExists(subdir.toFile());
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource sub = base.addPath("sub");
assertThat("sub/.isDirectory", sub.isDirectory(), is(true));
Resource tmp = sub.addPath("/tmp");
assertThat("No root", tmp.exists(), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddRootPath(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Path subdir = dir.resolve("sub");
Files.createDirectories(subdir);
String readableRootDir = findRootDir(dir.getFileSystem());
assumeTrue(readableRootDir != null, "Readable Root Dir found");
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource sub = base.addPath("sub");
assertThat("sub", sub.isDirectory(), is(true));
try
{
Resource rrd = sub.addPath(readableRootDir);
// valid path for unix and OSX
assertThat("Readable Root Dir", rrd.exists(), is(false));
}
catch (MalformedURLException | InvalidPathException e)
{
// valid path on Windows
}
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAccessUniCodeFile(Class resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
String readableRootDir = findRootDir(dir.getFileSystem());
assumeTrue(readableRootDir != null, "Readable Root Dir found");
Path subdir = dir.resolve("sub");
Files.createDirectories(subdir);
touchFile(subdir.resolve("swedish-å.txt"), "hi a-with-circle");
touchFile(subdir.resolve("swedish-ä.txt"), "hi a-with-two-dots");
touchFile(subdir.resolve("swedish-ö.txt"), "hi o-with-two-dots");
try (Resource base = newResource(resourceClass, subdir.toFile()))
{
Resource refA1 = base.addPath("swedish-å.txt");
Resource refA2 = base.addPath("swedish-ä.txt");
Resource refO1 = base.addPath("swedish-ö.txt");
assertThat("Ref A1 exists", refA1.exists(), is(true));
assertThat("Ref A2 exists", refA2.exists(), is(true));
assertThat("Ref O1 exists", refO1.exists(), is(true));
assertThat("Ref A1 alias", refA1.isAlias(), is(false));
assertThat("Ref A2 alias", refA2.isAlias(), is(false));
assertThat("Ref O1 alias", refO1.isAlias(), is(false));
assertThat("Ref A1 contents", toString(refA1), is("hi a-with-circle"));
assertThat("Ref A2 contents", toString(refA2), is("hi a-with-two-dots"));
assertThat("Ref O1 contents", toString(refO1), is("hi o-with-two-dots"));
}
}
private String findRootDir(FileSystem fs)
{
// look for a directory off of a root path
for (Path rootDir : fs.getRootDirectories())
{
try (DirectoryStream<Path> dir = Files.newDirectoryStream(rootDir))
{
for (Path entry : dir)
{
if (Files.isDirectory(entry) && !Files.isHidden(entry) && !entry.getFileName().toString().contains("$"))
{
return entry.toAbsolutePath().toString();
}
}
}
catch (Exception ignored)
{
// FIXME why ignoring exceptions??
}
}
return null;
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testIsContainedIn(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path foo = dir.resolve("foo");
Files.createFile(foo);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("is contained in", res.isContainedIn(base), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testIsDirectory(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path foo = dir.resolve("foo");
Files.createFile(foo);
Path subdir = dir.resolve("sub");
Files.createDirectories(subdir);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("foo.isDirectory", res.isDirectory(), is(false));
Resource sub = base.addPath("sub");
assertThat("sub/.isDirectory", sub.isDirectory(), is(true));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testLastModified(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
File file = workDir.getPathFile("foo").toFile();
assertTrue(file.createNewFile());
long expected = file.lastModified();
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("foo.lastModified", res.lastModified() / 1000 * 1000, lessThanOrEqualTo(expected));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testLastModified_NotExists(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("foo.lastModified", res.lastModified(), is(0L));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testLength(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path file = dir.resolve("foo");
touchFile(file, "foo");
long expected = Files.size(file);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("foo.length", res.length(), is(expected));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testLength_NotExists(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo");
assertThat("foo.length", res.length(), is(0L));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testDelete(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path file = dir.resolve("foo");
Files.createFile(file);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
// Is it there?
Resource res = base.addPath("foo");
assertThat("foo.exists", res.exists(), is(true));
// delete it
assertThat("foo.delete", res.delete(), is(true));
// is it there?
assertThat("foo.exists", res.exists(), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testDelete_NotExists(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
// Is it there?
Resource res = base.addPath("foo");
assertThat("foo.exists", res.exists(), is(false));
// delete it
assertThat("foo.delete", res.delete(), is(false));
// is it there?
assertThat("foo.exists", res.exists(), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testName(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
String expected = dir.toAbsolutePath().toString();
try (Resource base = newResource(resourceClass, dir.toFile()))
{
assertThat("base.name", base.getName(), is(expected));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testInputStream(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path file = dir.resolve("foo");
String content = "Foo is here";
touchFile(file, content);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource foo = base.addPath("foo");
try (InputStream stream = foo.getInputStream();
InputStreamReader reader = new InputStreamReader(stream);
StringWriter writer = new StringWriter())
{
IO.copy(reader, writer);
assertThat("Stream", writer.toString(), is(content));
}
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testReadableByteChannel(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path file = dir.resolve("foo");
String content = "Foo is here";
try (StringReader reader = new StringReader(content);
BufferedWriter writer = Files.newBufferedWriter(file))
{
IO.copy(reader, writer);
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource foo = base.addPath("foo");
try (ReadableByteChannel channel = foo.getReadableByteChannel())
{
ByteBuffer buf = ByteBuffer.allocate(256);
channel.read(buf);
buf.flip();
String actual = BufferUtil.toUTF8String(buf);
assertThat("ReadableByteChannel content", actual, is(content));
}
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testGetURI(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path file = dir.resolve("foo");
Files.createFile(file);
URI expected = file.toUri();
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource foo = base.addPath("foo");
assertThat("getURI", foo.getURI(), is(expected));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testList(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Files.createFile(dir.resolve("foo"));
Files.createFile(dir.resolve("bar"));
Files.createDirectories(dir.resolve("tick"));
Files.createDirectories(dir.resolve("tock"));
List<String> expected = new ArrayList<>();
expected.add("foo");
expected.add("bar");
expected.add("tick/");
expected.add("tock/");
try (Resource base = newResource(resourceClass, dir.toFile()))
{
String[] list = base.list();
List<String> actual = Arrays.asList(list);
assertEquals(expected.size(), actual.size());
for (String s : expected)
{
assertTrue(actual.contains(s));
}
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
@DisabledOnOs(WINDOWS)
public void testSymlink(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Path foo = dir.resolve("foo");
Path bar = dir.resolve("bar");
try
{
Files.createFile(foo);
Files.createSymbolicLink(bar, foo);
}
catch (UnsupportedOperationException | FileSystemException e)
{
// if unable to create symlink, no point testing the rest
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource resFoo = base.addPath("foo");
Resource resBar = base.addPath("bar");
assertThat("resFoo.uri", resFoo.getURI(), is(foo.toUri()));
// Access to the same resource, but via a symlink means that they are not equivalent
assertThat("foo.equals(bar)", resFoo.equals(resBar), is(false));
assertThat("resource.alias", resFoo, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resFoo.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resFoo.getFile()), hasNoAlias());
assertThat("alias", resBar, isAliasFor(resFoo));
assertThat("uri.alias", newResource(resourceClass, resBar.getURI()), isAliasFor(resFoo));
assertThat("file.alias", newResource(resourceClass, resBar.getFile()), isAliasFor(resFoo));
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource does not support this
@DisabledOnOs(WINDOWS)
public void testNonExistantSymlink(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path foo = dir.resolve("foo");
Path bar = dir.resolve("bar");
try
{
Files.createSymbolicLink(bar, foo);
}
catch (UnsupportedOperationException | FileSystemException e)
{
// if unable to create symlink, no point testing the rest
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource resFoo = base.addPath("foo");
Resource resBar = base.addPath("bar");
assertThat("resFoo.uri", resFoo.getURI(), is(foo.toUri()));
// Access to the same resource, but via a symlink means that they are not equivalent
assertThat("foo.equals(bar)", resFoo.equals(resBar), is(false));
assertThat("resource.alias", resFoo, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resFoo.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resFoo.getFile()), hasNoAlias());
assertThat("alias", resBar, isAliasFor(resFoo));
assertThat("uri.alias", newResource(resourceClass, resBar.getURI()), isAliasFor(resFoo));
assertThat("file.alias", newResource(resourceClass, resBar.getFile()), isAliasFor(resFoo));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testCaseInsensitiveAlias(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("file");
Files.createFile(path);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
// Reference to actual resource that exists
Resource resource = base.addPath("file");
assertThat("resource.alias", resource, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resource.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resource.getFile()), hasNoAlias());
// On some case insensitive file systems, lets see if an alternate
// case for the filename results in an alias reference
Resource alias = base.addPath("FILE");
if (alias.exists())
{
// If it exists, it must be an alias
assertThat("alias", alias, isAliasFor(resource));
assertThat("alias.uri", newResource(resourceClass, alias.getURI()), isAliasFor(resource));
assertThat("alias.file", newResource(resourceClass, alias.getFile()), isAliasFor(resource));
}
}
}
/**
* Test for Windows feature that exposes 8.3 filename references
* for long filenames.
* <p>
* See: http://support.microsoft.com/kb/142982
*
* @throws Exception failed test
*/
@ParameterizedTest
@MethodSource("fsResourceProvider")
@EnabledOnOs(WINDOWS)
public void testCase8dot3Alias(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("TextFile.Long.txt");
Files.createFile(path);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
// Long filename
Resource resource = base.addPath("TextFile.Long.txt");
assertThat("resource.alias", resource, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resource.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resource.getFile()), hasNoAlias());
// On some versions of Windows, the long filename can be referenced
// via a short 8.3 equivalent filename.
Resource alias = base.addPath("TEXTFI~1.TXT");
if (alias.exists())
{
// If it exists, it must be an alias
assertThat("alias", alias, isAliasFor(resource));
assertThat("alias.uri", newResource(resourceClass, alias.getURI()), isAliasFor(resource));
assertThat("alias.file", newResource(resourceClass, alias.getFile()), isAliasFor(resource));
}
}
}
/**
* NTFS Alternative Data / File Streams.
* <p>
* See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa364404(v=vs.85).aspx
*
* @throws Exception failed test
*/
@ParameterizedTest
@MethodSource("fsResourceProvider")
@EnabledOnOs(WINDOWS)
public void testNTFSFileStreamAlias(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("testfile");
Files.createFile(path);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource resource = base.addPath("testfile");
assertThat("resource.alias", resource, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resource.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resource.getFile()), hasNoAlias());
try
{
// Attempt to reference same file, but via NTFS simple stream
Resource alias = base.addPath("testfile:stream");
if (alias.exists())
{
// If it exists, it must be an alias
assertThat("resource.alias", alias, isAliasFor(resource));
assertThat("resource.uri.alias", newResource(resourceClass, alias.getURI()), isAliasFor(resource));
assertThat("resource.file.alias", newResource(resourceClass, alias.getFile()), isAliasFor(resource));
}
}
catch (InvalidPathException e)
{
// NTFS filesystem streams are unsupported on some platforms.
assumeTrue(true, "Not supported");
}
}
}
/**
* NTFS Alternative Data / File Streams.
* <p>
* See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa364404(v=vs.85).aspx
*
* @throws Exception failed test
*/
@ParameterizedTest
@ValueSource(classes = PathResource.class) // not supported on FileResource
@EnabledOnOs(WINDOWS)
public void testNTFSFileDataStreamAlias(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("testfile");
Files.createFile(path);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource resource = base.addPath("testfile");
assertThat("resource.alias", resource, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resource.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resource.getFile()), hasNoAlias());
try
{
// Attempt to reference same file, but via NTFS DATA stream
Resource alias = base.addPath("testfile::$DATA");
if (alias.exists())
{
assumeTrue(alias.getURI().getScheme().equals("file"));
// If it exists, it must be an alias
assertThat("resource.alias", alias, isAliasFor(resource));
assertThat("resource.uri.alias", newResource(resourceClass, alias.getURI()), isAliasFor(resource));
assertThat("resource.file.alias", newResource(resourceClass, alias.getFile()), isAliasFor(resource));
}
}
catch (InvalidPathException e)
{
// NTFS filesystem streams are unsupported on some platforms.
assumeTrue(true, "Not supported");
}
}
}
/**
* NTFS Alternative Data / File Streams.
* <p>
* See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa364404(v=vs.85).aspx
*
* @throws Exception failed test
*/
@ParameterizedTest
@MethodSource("fsResourceProvider")
@EnabledOnOs(WINDOWS)
public void testNTFSFileEncodedDataStreamAlias(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("testfile");
Files.createFile(path);
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource resource = base.addPath("testfile");
assertThat("resource.alias", resource, hasNoAlias());
assertThat("resource.uri.alias", newResource(resourceClass, resource.getURI()), hasNoAlias());
assertThat("resource.file.alias", newResource(resourceClass, resource.getFile()), hasNoAlias());
try
{
// Attempt to reference same file, but via NTFS DATA stream (encoded addPath version)
Resource alias = base.addPath("testfile::%24DATA");
if (alias.exists())
{
// If it exists, it must be an alias
assertThat("resource.alias", alias, isAliasFor(resource));
assertThat("resource.uri.alias", newResource(resourceClass, alias.getURI()), isAliasFor(resource));
assertThat("resource.file.alias", newResource(resourceClass, alias.getFile()), isAliasFor(resource));
}
}
catch (InvalidPathException e)
{
// NTFS filesystem streams are unsupported on some platforms.
assumeTrue(true, "Not supported on this OS");
}
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
@DisabledOnOs(WINDOWS)
public void testSemicolon(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
try
{
// attempt to create file
Path foo = dir.resolve("foo;");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo;");
assertThat("Alias: " + res, res, hasNoAlias());
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
@DisabledOnOs(WINDOWS)
public void testSingleQuote(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo' bar");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo' bar");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
@DisabledOnOs(WINDOWS)
public void testSingleBackTick(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo` bar");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo` bar");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
@DisabledOnOs(WINDOWS)
public void testBrackets(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo[1]");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo[1]");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource does not support this
@DisabledOnOs(WINDOWS)
public void testBraces(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo.{bar}.txt");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo.{bar}.txt");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource does not support this
@DisabledOnOs(WINDOWS)
public void testCaret(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo^3.txt");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo^3.txt");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource does not support this
@DisabledOnOs(WINDOWS)
public void testPipe(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
try
{
// attempt to create file
Path foo = dir.resolve("foo|bar.txt");
Files.createFile(foo);
}
catch (Exception e)
{
// if unable to create file, no point testing the rest.
// this is the path that Microsoft Windows takes.
assumeTrue(true, "Not supported on this OS");
}
try (Resource base = newResource(resourceClass, dir.toFile()))
{
Resource res = base.addPath("foo|bar.txt");
assertThat("Alias: " + res, res.getAlias(), nullValue());
}
}
/**
* The most basic access example
*
* @throws Exception failed test
*/
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testExist_Normal(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("a.jsp");
Files.createFile(path);
URI ref = workDir.getPath().toUri().resolve("a.jsp");
try (Resource fileres = newResource(resourceClass, ref))
{
assertThat("Resource: " + fileres, fileres.exists(), is(true));
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource not supported here
public void testSingleQuoteInFileName(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path fooA = dir.resolve("foo's.txt");
Path fooB = dir.resolve("f o's.txt");
Files.createFile(fooA);
Files.createFile(fooB);
URI refQuoted = dir.resolve("foo's.txt").toUri();
try (Resource fileres = newResource(resourceClass, refQuoted))
{
assertThat("Exists: " + refQuoted, fileres.exists(), is(true));
assertThat("Alias: " + refQuoted, fileres, hasNoAlias());
}
URI refEncoded = dir.toUri().resolve("foo%27s.txt");
try (Resource fileres = newResource(resourceClass, refEncoded))
{
assertThat("Exists: " + refEncoded, fileres.exists(), is(true));
assertThat("Alias: " + refEncoded, fileres, hasNoAlias());
}
URI refQuoteSpace = dir.toUri().resolve("f%20o's.txt");
try (Resource fileres = newResource(resourceClass, refQuoteSpace))
{
assertThat("Exists: " + refQuoteSpace, fileres.exists(), is(true));
assertThat("Alias: " + refQuoteSpace, fileres, hasNoAlias());
}
URI refEncodedSpace = dir.toUri().resolve("f%20o%27s.txt");
try (Resource fileres = newResource(resourceClass, refEncodedSpace))
{
assertThat("Exists: " + refEncodedSpace, fileres.exists(), is(true));
assertThat("Alias: " + refEncodedSpace, fileres, hasNoAlias());
}
URI refA = dir.toUri().resolve("foo's.txt");
URI refB = dir.toUri().resolve("foo%27s.txt");
// show that simple URI.equals() doesn't work
String msg = "URI[a].equals(URI[b])" + System.lineSeparator() +
"URI[a] = " + refA + System.lineSeparator() +
"URI[b] = " + refB;
assertThat(msg, refA.equals(refB), is(false));
// now show that Resource.equals() does work
try (Resource a = newResource(resourceClass, refA);
Resource b = newResource(resourceClass, refB))
{
assertThat("A.equals(B)", a.equals(b), is(true));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testExist_BadURINull(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("a.jsp");
Files.createFile(path);
try
{
// request with null at end
URI uri = workDir.getPath().toUri().resolve("a.jsp%00");
assertThat("Null URI", uri, notNullValue());
Resource r = newResource(resourceClass, uri);
// if we have r, then it better not exist
assertFalse(r.exists());
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testExist_BadURINullX(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path path = dir.resolve("a.jsp");
Files.createFile(path);
try
{
// request with null and x at end
URI uri = workDir.getPath().toUri().resolve("a.jsp%00x");
assertThat("NullX URI", uri, notNullValue());
Resource r = newResource(resourceClass, uri);
// if we have r, then it better not exist
assertFalse(r.exists());
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddPath_WindowsSlash(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path basePath = dir.resolve("base");
FS.ensureDirExists(basePath);
Path dirPath = basePath.resolve("aa");
FS.ensureDirExists(dirPath);
Path filePath = dirPath.resolve("foo.txt");
Files.createFile(filePath);
try (Resource base = newResource(resourceClass, basePath.toFile()))
{
assertThat("Exists: " + basePath, base.exists(), is(true));
assertThat("Alias: " + basePath, base, hasNoAlias());
Resource r = base.addPath("aa\\/foo.txt");
assertThat("getURI()", r.getURI().toASCIIString(), containsString("aa%5C/foo.txt"));
if (org.junit.jupiter.api.condition.OS.WINDOWS.isCurrentOs())
{
assertThat("isAlias()", r.isAlias(), is(true));
assertThat("getAlias()", r.getAlias(), notNullValue());
assertThat("getAlias()", r.getAlias().toASCIIString(), containsString("aa/foo.txt"));
assertThat("Exists: " + r, r.exists(), is(true));
}
else
{
assertThat("isAlias()", r.isAlias(), is(false));
assertThat("Exists: " + r, r.exists(), is(false));
}
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddPath_WindowsExtensionLess(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path basePath = dir.resolve("base");
FS.ensureDirExists(basePath);
Path dirPath = basePath.resolve("aa");
FS.ensureDirExists(dirPath);
Path filePath = dirPath.resolve("foo.txt");
Files.createFile(filePath);
try (Resource base = newResource(resourceClass, basePath.toFile()))
{
assertThat("Exists: " + basePath, base.exists(), is(true));
assertThat("Alias: " + basePath, base, hasNoAlias());
Resource r = base.addPath("aa./foo.txt");
assertThat("getURI()", r.getURI().toASCIIString(), containsString("aa./foo.txt"));
if (OS.WINDOWS.isCurrentOs())
{
assertThat("isAlias()", r.isAlias(), is(true));
assertThat("getAlias()", r.getAlias(), notNullValue());
assertThat("getAlias()", r.getAlias().toASCIIString(), containsString("aa/foo.txt"));
assertThat("Exists: " + r, r.exists(), is(true));
}
else
{
assertThat("isAlias()", r.isAlias(), is(false));
assertThat("Exists: " + r, r.exists(), is(false));
}
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddInitialSlash(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path basePath = dir.resolve("base");
FS.ensureDirExists(basePath);
Path filePath = basePath.resolve("foo.txt");
Files.createFile(filePath);
try (Resource base = newResource(resourceClass, basePath.toFile()))
{
assertThat("Exists: " + basePath, base.exists(), is(true));
assertThat("Alias: " + basePath, base, hasNoAlias());
Resource r = base.addPath("/foo.txt");
assertThat("getURI()", r.getURI().toASCIIString(), containsString("/foo.txt"));
assertThat("isAlias()", r.isAlias(), is(false));
assertThat("Exists: " + r, r.exists(), is(true));
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddInitialDoubleSlash(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path basePath = dir.resolve("base");
FS.ensureDirExists(basePath);
Path filePath = basePath.resolve("foo.txt");
Files.createFile(filePath);
try (Resource base = newResource(resourceClass, basePath.toFile()))
{
assertThat("Exists: " + basePath, base.exists(), is(true));
assertThat("Alias: " + basePath, base, hasNoAlias());
Resource r = base.addPath("//foo.txt");
assertThat("getURI()", r.getURI().toASCIIString(), containsString("//foo.txt"));
assertThat("isAlias()", r.isAlias(), is(true));
assertThat("getAlias()", r.getAlias(), notNullValue());
assertThat("getAlias()", r.getAlias().toASCIIString(), containsString("/foo.txt"));
assertThat("Exists: " + r, r.exists(), is(true));
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testAddDoubleSlash(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path basePath = dir.resolve("base");
FS.ensureDirExists(basePath);
Path dirPath = basePath.resolve("aa");
FS.ensureDirExists(dirPath);
Path filePath = dirPath.resolve("foo.txt");
Files.createFile(filePath);
try (Resource base = newResource(resourceClass, basePath.toFile()))
{
assertThat("Exists: " + basePath, base.exists(), is(true));
assertThat("Alias: " + basePath, base, hasNoAlias());
Resource r = base.addPath("aa//foo.txt");
assertThat("getURI()", r.getURI().toASCIIString(), containsString("aa//foo.txt"));
assertThat("isAlias()", r.isAlias(), is(true));
assertThat("getAlias()", r.getAlias(), notNullValue());
assertThat("getAlias()", r.getAlias().toASCIIString(), containsString("aa/foo.txt"));
assertThat("Exists: " + r, r.exists(), is(true));
}
catch (InvalidPathException e)
{
// Exception is acceptable
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testEncoding(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Files.createDirectories(dir);
Path specials = dir.resolve("a file with,spe#ials");
Files.createFile(specials);
try (Resource res = newResource(resourceClass, specials.toFile()))
{
assertThat("Specials URL", res.getURI().toASCIIString(), containsString("a%20file%20with,spe%23ials"));
assertThat("Specials Filename", res.getFile().toString(), containsString("a file with,spe#ials"));
res.delete();
assertThat("File should have been deleted.", res.exists(), is(false));
}
}
@ParameterizedTest
@MethodSource("fsResourceProvider")
public void testUtf8Dir(Class<PathResource> resourceClass) throws Exception
{
Path dir = workDir.getEmptyPathDir();
Path utf8Dir;
try
{
utf8Dir = dir.resolve("bãm");
Files.createDirectories(utf8Dir);
}
catch (InvalidPathException e)
{
// if unable to create file, no point testing the rest.
// this is the path that occurs if you have a system that doesn't support UTF-8
// directory names (or you simply don't have a Locale set properly)
assumeTrue(true, "Not supported on this OS");
return;
}
Path file = utf8Dir.resolve("file.txt");
Files.createFile(file);
try (Resource base = newResource(resourceClass, utf8Dir.toFile()))
{
assertThat("Exists: " + utf8Dir, base.exists(), is(true));
assertThat("Alias: " + utf8Dir, base, hasNoAlias());
Resource r = base.addPath("file.txt");
assertThat("Exists: " + r, r.exists(), is(true));
assertThat("Alias: " + r, r, hasNoAlias());
}
}
@ParameterizedTest
@ValueSource(classes = PathResource.class) // FileResource does not support this
@EnabledOnOs(WINDOWS)
public void testUncPath(Class<PathResource> resourceClass) throws Exception
{
try (Resource base = newResource(resourceClass, URI.create("file:////127.0.0.1/path")))
{
Resource resource = base.addPath("WEB-INF/");
assertThat("getURI()", resource.getURI().toASCIIString(), containsString("path/WEB-INF/"));
assertThat("isAlias()", resource.isAlias(), is(false));
assertThat("getAlias()", resource.getAlias(), nullValue());
}
}
private String toString(Resource resource) throws IOException
{
try (InputStream inputStream = resource.getInputStream();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream())
{
IO.copy(inputStream, outputStream);
return outputStream.toString("utf-8");
}
}
private void touchFile(Path outputFile, String content) throws IOException
{
try (StringReader reader = new StringReader(content);
BufferedWriter writer = Files.newBufferedWriter(outputFile))
{
IO.copy(reader, writer);
}
}
}
|
package org.randomcoder.mvc.controller;
import org.randomcoder.bo.TagBusiness;
import org.randomcoder.mvc.command.TagAddCommand;
import org.randomcoder.mvc.command.TagEditCommand;
import org.randomcoder.mvc.validator.TagAddValidator;
import org.randomcoder.mvc.validator.TagEditValidator;
import org.randomcoder.pagination.PagerInfo;
import org.randomcoder.tag.TagStatistics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.data.web.PageableDefault;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.validation.BindingResult;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.WebDataBinder;
import org.springframework.web.bind.annotation.InitBinder;
import org.springframework.web.bind.annotation.ModelAttribute;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import jakarta.inject.Inject;
import jakarta.servlet.http.HttpServletRequest;
/**
* Controller class which handles tag management.
*/
@Controller("tagController") public class TagController {
private static final Logger logger =
LoggerFactory.getLogger(TagController.class);
private TagBusiness tagBusiness;
private TagAddValidator tagAddValidator;
private TagEditValidator tagEditValidator;
private int maximumPageSize = 100;
/**
* Sets the TagBusiness implementation to use.
*
* @param tagBusiness TagBusiness implementation
*/
@Inject public void setTagBusiness(TagBusiness tagBusiness) {
this.tagBusiness = tagBusiness;
}
/**
* Sets the validator to use for adding tags.
*
* @param tagAddValidator tag add validator
*/
@Inject public void setTagAddValidator(TagAddValidator tagAddValidator) {
this.tagAddValidator = tagAddValidator;
}
/**
* Sets the validator to use for editing tags.
*
* @param tagEditValidator tag edit validator
*/
@Inject public void setTagEditValidator(TagEditValidator tagEditValidator) {
this.tagEditValidator = tagEditValidator;
}
/**
* Sets the maximum number of items to allow per page (defaults to 100).
*
* @param maximumPageSize maximum number of items per page
*/
@Value("${tag.pagesize.max}") public void setMaximumPageSize(
int maximumPageSize) {
this.maximumPageSize = maximumPageSize;
}
/**
* Binds validators.
*
* @param binder data binder
*/
@InitBinder public void initBinder(WebDataBinder binder) {
Object target = binder.getTarget();
if (target instanceof TagEditCommand) {
binder.setValidator(tagEditValidator);
} else if (target instanceof TagAddCommand) {
binder.setValidator(tagAddValidator);
}
}
/**
* Generates the tag list.
*
* @param model MVC model
* @param pageable page to retrieve
* @param request HTTP servlet request
* @return tag list view
*/
@RequestMapping("/tag") public String tagList(Model model,
@PageableDefault(25) Pageable pageable, HttpServletRequest request) {
int size = pageable.getPageSize();
int page = pageable.getPageNumber();
if (size > maximumPageSize) {
size = maximumPageSize;
page = 0;
}
pageable = PageRequest.of(page, size, Sort.by("displayName"));
if (pageable.getPageSize() > maximumPageSize) {
pageable = PageRequest.of(0, maximumPageSize, pageable.getSort());
}
Page<TagStatistics> tagStats = tagBusiness.findTagStatistics(pageable);
// populate model
model.addAttribute("pager", tagStats);
model.addAttribute("pagerInfo", new PagerInfo<>(tagStats, request));
return "tag-list";
}
/**
* Begins adding a tag.
*
* @param cmd tag add command
* @param result binding result
* @param model model
* @return tag add view
*/
@RequestMapping(value = "/tag/add", method = RequestMethod.GET)
public String addTag(@ModelAttribute("command") TagAddCommand cmd,
BindingResult result, Model model) {
model.addAttribute("command", new TagAddCommand());
return "tag-add";
}
/**
* Cancels adding a new tag.
*
* @return redirect to tag list view
*/
@RequestMapping(value = "/tag/add", method = RequestMethod.POST, params = "cancel")
public String addTagCancel() {
return "tag-list-redirect";
}
/**
* Saves a new tag.
*
* @param cmd tag add command
* @param result validation result
* @return redirect to tag list view
*/
@RequestMapping(value = "/tag/add", method = RequestMethod.POST, params = "!cancel")
public String addTagSubmit(
@ModelAttribute("command") @Validated TagAddCommand cmd,
BindingResult result) {
if (result.hasErrors()) {
return "tag-add";
}
tagBusiness.createTag(cmd);
return "tag-list-redirect";
}
/**
* Begins editing a tag.
*
* @param cmd tag edit command
* @param result binding result
* @param model model
* @return tag add view
*/
@RequestMapping(value = "/tag/edit", method = RequestMethod.GET)
public String editTag(@ModelAttribute("command") TagEditCommand cmd,
BindingResult result, Model model) {
if (logger.isDebugEnabled()) {
logger.debug("Command: " + cmd);
}
tagBusiness.loadTagForEditing(cmd, cmd.getId());
model.addAttribute("command", cmd);
return "tag-edit";
}
/**
* Cancels editing a new tag.
*
* @return redirect to tag list view
*/
@RequestMapping(value = "/tag/edit", method = RequestMethod.POST, params = "cancel")
public String editTagCancel() {
return "tag-list-redirect";
}
/**
* Saves an edited tag.
*
* @param cmd tag edit command
* @param result validation result
* @return redirect to tag list view
*/
@RequestMapping(value = "/tag/edit", method = RequestMethod.POST, params = "!cancel")
public String editTagSubmit(
@ModelAttribute("command") @Validated TagEditCommand cmd,
BindingResult result) {
if (result.hasErrors()) {
return "tag-edit";
}
if (logger.isDebugEnabled()) {
logger.debug("Command: " + cmd);
}
tagBusiness.updateTag(cmd, cmd.getId());
return "tag-list-redirect";
}
/**
* Deletes the selected tag.
*
* @param id tag ID
* @return redirect to tag list view
*/
@RequestMapping("/tag/delete") public String deleteTag(
@RequestParam("id") long id) {
tagBusiness.deleteTag(id);
return "tag-list-redirect";
}
}
|
package com.example.notification;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class SpringbootNotificationApplicationTests {
@Test
public void contextLoads() {
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.